drm/amdgpu: fix possible UAF in amdgpu_cs_pass1()
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include <drm/ttm/ttm_tt.h>
36
37 #include "amdgpu_cs.h"
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_gmc.h"
41 #include "amdgpu_gem.h"
42 #include "amdgpu_ras.h"
43
44 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
45                                  struct amdgpu_device *adev,
46                                  struct drm_file *filp,
47                                  union drm_amdgpu_cs *cs)
48 {
49         struct amdgpu_fpriv *fpriv = filp->driver_priv;
50
51         if (cs->in.num_chunks == 0)
52                 return -EINVAL;
53
54         memset(p, 0, sizeof(*p));
55         p->adev = adev;
56         p->filp = filp;
57
58         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
59         if (!p->ctx)
60                 return -EINVAL;
61
62         if (atomic_read(&p->ctx->guilty)) {
63                 amdgpu_ctx_put(p->ctx);
64                 return -ECANCELED;
65         }
66
67         amdgpu_sync_create(&p->sync);
68         drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
69         return 0;
70 }
71
72 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
73                              struct drm_amdgpu_cs_chunk_ib *chunk_ib)
74 {
75         struct drm_sched_entity *entity;
76         unsigned int i;
77         int r;
78
79         r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
80                                   chunk_ib->ip_instance,
81                                   chunk_ib->ring, &entity);
82         if (r)
83                 return r;
84
85         /*
86          * Abort if there is no run queue associated with this entity.
87          * Possibly because of disabled HW IP.
88          */
89         if (entity->rq == NULL)
90                 return -EINVAL;
91
92         /* Check if we can add this IB to some existing job */
93         for (i = 0; i < p->gang_size; ++i)
94                 if (p->entities[i] == entity)
95                         return i;
96
97         /* If not increase the gang size if possible */
98         if (i == AMDGPU_CS_GANG_SIZE)
99                 return -EINVAL;
100
101         p->entities[i] = entity;
102         p->gang_size = i + 1;
103         return i;
104 }
105
106 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
107                            struct drm_amdgpu_cs_chunk_ib *chunk_ib,
108                            unsigned int *num_ibs)
109 {
110         int r;
111
112         r = amdgpu_cs_job_idx(p, chunk_ib);
113         if (r < 0)
114                 return r;
115
116         if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
117                 return -EINVAL;
118
119         ++(num_ibs[r]);
120         p->gang_leader_idx = r;
121         return 0;
122 }
123
124 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
125                                    struct drm_amdgpu_cs_chunk_fence *data,
126                                    uint32_t *offset)
127 {
128         struct drm_gem_object *gobj;
129         unsigned long size;
130         int r;
131
132         gobj = drm_gem_object_lookup(p->filp, data->handle);
133         if (gobj == NULL)
134                 return -EINVAL;
135
136         p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
137         drm_gem_object_put(gobj);
138
139         size = amdgpu_bo_size(p->uf_bo);
140         if (size != PAGE_SIZE || (data->offset + 8) > size) {
141                 r = -EINVAL;
142                 goto error_unref;
143         }
144
145         if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
146                 r = -EINVAL;
147                 goto error_unref;
148         }
149
150         *offset = data->offset;
151
152         return 0;
153
154 error_unref:
155         amdgpu_bo_unref(&p->uf_bo);
156         return r;
157 }
158
159 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
160                                    struct drm_amdgpu_bo_list_in *data)
161 {
162         struct drm_amdgpu_bo_list_entry *info;
163         int r;
164
165         r = amdgpu_bo_create_list_entry_array(data, &info);
166         if (r)
167                 return r;
168
169         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
170                                   &p->bo_list);
171         if (r)
172                 goto error_free;
173
174         kvfree(info);
175         return 0;
176
177 error_free:
178         kvfree(info);
179
180         return r;
181 }
182
183 /* Copy the data from userspace and go over it the first time */
184 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
185                            union drm_amdgpu_cs *cs)
186 {
187         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
188         unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
189         struct amdgpu_vm *vm = &fpriv->vm;
190         uint64_t *chunk_array_user;
191         uint64_t *chunk_array;
192         uint32_t uf_offset = 0;
193         size_t size;
194         int ret;
195         int i;
196
197         chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
198                                      GFP_KERNEL);
199         if (!chunk_array)
200                 return -ENOMEM;
201
202         /* get chunks */
203         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
204         if (copy_from_user(chunk_array, chunk_array_user,
205                            sizeof(uint64_t)*cs->in.num_chunks)) {
206                 ret = -EFAULT;
207                 goto free_chunk;
208         }
209
210         p->nchunks = cs->in.num_chunks;
211         p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
212                             GFP_KERNEL);
213         if (!p->chunks) {
214                 ret = -ENOMEM;
215                 goto free_chunk;
216         }
217
218         for (i = 0; i < p->nchunks; i++) {
219                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
220                 struct drm_amdgpu_cs_chunk user_chunk;
221                 uint32_t __user *cdata;
222
223                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
224                 if (copy_from_user(&user_chunk, chunk_ptr,
225                                        sizeof(struct drm_amdgpu_cs_chunk))) {
226                         ret = -EFAULT;
227                         i--;
228                         goto free_partial_kdata;
229                 }
230                 p->chunks[i].chunk_id = user_chunk.chunk_id;
231                 p->chunks[i].length_dw = user_chunk.length_dw;
232
233                 size = p->chunks[i].length_dw;
234                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
235
236                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
237                                                     GFP_KERNEL);
238                 if (p->chunks[i].kdata == NULL) {
239                         ret = -ENOMEM;
240                         i--;
241                         goto free_partial_kdata;
242                 }
243                 size *= sizeof(uint32_t);
244                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
245                         ret = -EFAULT;
246                         goto free_partial_kdata;
247                 }
248
249                 /* Assume the worst on the following checks */
250                 ret = -EINVAL;
251                 switch (p->chunks[i].chunk_id) {
252                 case AMDGPU_CHUNK_ID_IB:
253                         if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
254                                 goto free_partial_kdata;
255
256                         ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
257                         if (ret)
258                                 goto free_partial_kdata;
259                         break;
260
261                 case AMDGPU_CHUNK_ID_FENCE:
262                         if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
263                                 goto free_partial_kdata;
264
265                         ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
266                                                       &uf_offset);
267                         if (ret)
268                                 goto free_partial_kdata;
269                         break;
270
271                 case AMDGPU_CHUNK_ID_BO_HANDLES:
272                         if (size < sizeof(struct drm_amdgpu_bo_list_in))
273                                 goto free_partial_kdata;
274
275                         ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
276                         if (ret)
277                                 goto free_partial_kdata;
278                         break;
279
280                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
281                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
282                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
283                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
284                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
285                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
286                 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
287                         break;
288
289                 default:
290                         goto free_partial_kdata;
291                 }
292         }
293
294         if (!p->gang_size) {
295                 ret = -EINVAL;
296                 goto free_all_kdata;
297         }
298
299         for (i = 0; i < p->gang_size; ++i) {
300                 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
301                                        num_ibs[i], &p->jobs[i]);
302                 if (ret)
303                         goto free_all_kdata;
304         }
305         p->gang_leader = p->jobs[p->gang_leader_idx];
306
307         if (p->ctx->generation != p->gang_leader->generation) {
308                 ret = -ECANCELED;
309                 goto free_all_kdata;
310         }
311
312         if (p->uf_bo)
313                 p->gang_leader->uf_addr = uf_offset;
314         kvfree(chunk_array);
315
316         /* Use this opportunity to fill in task info for the vm */
317         amdgpu_vm_set_task_info(vm);
318
319         return 0;
320
321 free_all_kdata:
322         i = p->nchunks - 1;
323 free_partial_kdata:
324         for (; i >= 0; i--)
325                 kvfree(p->chunks[i].kdata);
326         kvfree(p->chunks);
327         p->chunks = NULL;
328         p->nchunks = 0;
329 free_chunk:
330         kvfree(chunk_array);
331
332         return ret;
333 }
334
335 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
336                            struct amdgpu_cs_chunk *chunk,
337                            unsigned int *ce_preempt,
338                            unsigned int *de_preempt)
339 {
340         struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
341         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
342         struct amdgpu_vm *vm = &fpriv->vm;
343         struct amdgpu_ring *ring;
344         struct amdgpu_job *job;
345         struct amdgpu_ib *ib;
346         int r;
347
348         r = amdgpu_cs_job_idx(p, chunk_ib);
349         if (r < 0)
350                 return r;
351
352         job = p->jobs[r];
353         ring = amdgpu_job_ring(job);
354         ib = &job->ibs[job->num_ibs++];
355
356         /* MM engine doesn't support user fences */
357         if (p->uf_bo && ring->funcs->no_user_fence)
358                 return -EINVAL;
359
360         if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
361             chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
362                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
363                         (*ce_preempt)++;
364                 else
365                         (*de_preempt)++;
366
367                 /* Each GFX command submit allows only 1 IB max
368                  * preemptible for CE & DE */
369                 if (*ce_preempt > 1 || *de_preempt > 1)
370                         return -EINVAL;
371         }
372
373         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
374                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
375
376         r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
377                            chunk_ib->ib_bytes : 0,
378                            AMDGPU_IB_POOL_DELAYED, ib);
379         if (r) {
380                 DRM_ERROR("Failed to get ib !\n");
381                 return r;
382         }
383
384         ib->gpu_addr = chunk_ib->va_start;
385         ib->length_dw = chunk_ib->ib_bytes / 4;
386         ib->flags = chunk_ib->flags;
387         return 0;
388 }
389
390 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
391                                      struct amdgpu_cs_chunk *chunk)
392 {
393         struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
394         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
395         unsigned int num_deps;
396         int i, r;
397
398         num_deps = chunk->length_dw * 4 /
399                 sizeof(struct drm_amdgpu_cs_chunk_dep);
400
401         for (i = 0; i < num_deps; ++i) {
402                 struct amdgpu_ctx *ctx;
403                 struct drm_sched_entity *entity;
404                 struct dma_fence *fence;
405
406                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
407                 if (ctx == NULL)
408                         return -EINVAL;
409
410                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
411                                           deps[i].ip_instance,
412                                           deps[i].ring, &entity);
413                 if (r) {
414                         amdgpu_ctx_put(ctx);
415                         return r;
416                 }
417
418                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
419                 amdgpu_ctx_put(ctx);
420
421                 if (IS_ERR(fence))
422                         return PTR_ERR(fence);
423                 else if (!fence)
424                         continue;
425
426                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
427                         struct drm_sched_fence *s_fence;
428                         struct dma_fence *old = fence;
429
430                         s_fence = to_drm_sched_fence(fence);
431                         fence = dma_fence_get(&s_fence->scheduled);
432                         dma_fence_put(old);
433                 }
434
435                 r = amdgpu_sync_fence(&p->sync, fence);
436                 dma_fence_put(fence);
437                 if (r)
438                         return r;
439         }
440         return 0;
441 }
442
443 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
444                                          uint32_t handle, u64 point,
445                                          u64 flags)
446 {
447         struct dma_fence *fence;
448         int r;
449
450         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
451         if (r) {
452                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
453                           handle, point, r);
454                 return r;
455         }
456
457         r = amdgpu_sync_fence(&p->sync, fence);
458         dma_fence_put(fence);
459         return r;
460 }
461
462 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
463                                    struct amdgpu_cs_chunk *chunk)
464 {
465         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
466         unsigned int num_deps;
467         int i, r;
468
469         num_deps = chunk->length_dw * 4 /
470                 sizeof(struct drm_amdgpu_cs_chunk_sem);
471         for (i = 0; i < num_deps; ++i) {
472                 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
473                 if (r)
474                         return r;
475         }
476
477         return 0;
478 }
479
480 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
481                                               struct amdgpu_cs_chunk *chunk)
482 {
483         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
484         unsigned int num_deps;
485         int i, r;
486
487         num_deps = chunk->length_dw * 4 /
488                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
489         for (i = 0; i < num_deps; ++i) {
490                 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
491                                                   syncobj_deps[i].point,
492                                                   syncobj_deps[i].flags);
493                 if (r)
494                         return r;
495         }
496
497         return 0;
498 }
499
500 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
501                                     struct amdgpu_cs_chunk *chunk)
502 {
503         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
504         unsigned int num_deps;
505         int i;
506
507         num_deps = chunk->length_dw * 4 /
508                 sizeof(struct drm_amdgpu_cs_chunk_sem);
509
510         if (p->post_deps)
511                 return -EINVAL;
512
513         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
514                                      GFP_KERNEL);
515         p->num_post_deps = 0;
516
517         if (!p->post_deps)
518                 return -ENOMEM;
519
520
521         for (i = 0; i < num_deps; ++i) {
522                 p->post_deps[i].syncobj =
523                         drm_syncobj_find(p->filp, deps[i].handle);
524                 if (!p->post_deps[i].syncobj)
525                         return -EINVAL;
526                 p->post_deps[i].chain = NULL;
527                 p->post_deps[i].point = 0;
528                 p->num_post_deps++;
529         }
530
531         return 0;
532 }
533
534 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
535                                                 struct amdgpu_cs_chunk *chunk)
536 {
537         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
538         unsigned int num_deps;
539         int i;
540
541         num_deps = chunk->length_dw * 4 /
542                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
543
544         if (p->post_deps)
545                 return -EINVAL;
546
547         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
548                                      GFP_KERNEL);
549         p->num_post_deps = 0;
550
551         if (!p->post_deps)
552                 return -ENOMEM;
553
554         for (i = 0; i < num_deps; ++i) {
555                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
556
557                 dep->chain = NULL;
558                 if (syncobj_deps[i].point) {
559                         dep->chain = dma_fence_chain_alloc();
560                         if (!dep->chain)
561                                 return -ENOMEM;
562                 }
563
564                 dep->syncobj = drm_syncobj_find(p->filp,
565                                                 syncobj_deps[i].handle);
566                 if (!dep->syncobj) {
567                         dma_fence_chain_free(dep->chain);
568                         return -EINVAL;
569                 }
570                 dep->point = syncobj_deps[i].point;
571                 p->num_post_deps++;
572         }
573
574         return 0;
575 }
576
577 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
578                                struct amdgpu_cs_chunk *chunk)
579 {
580         struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
581         int i;
582
583         if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
584                 return -EINVAL;
585
586         for (i = 0; i < p->gang_size; ++i) {
587                 p->jobs[i]->shadow_va = shadow->shadow_va;
588                 p->jobs[i]->csa_va = shadow->csa_va;
589                 p->jobs[i]->gds_va = shadow->gds_va;
590                 p->jobs[i]->init_shadow =
591                         shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
592         }
593
594         return 0;
595 }
596
597 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
598 {
599         unsigned int ce_preempt = 0, de_preempt = 0;
600         int i, r;
601
602         for (i = 0; i < p->nchunks; ++i) {
603                 struct amdgpu_cs_chunk *chunk;
604
605                 chunk = &p->chunks[i];
606
607                 switch (chunk->chunk_id) {
608                 case AMDGPU_CHUNK_ID_IB:
609                         r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
610                         if (r)
611                                 return r;
612                         break;
613                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
614                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
615                         r = amdgpu_cs_p2_dependencies(p, chunk);
616                         if (r)
617                                 return r;
618                         break;
619                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
620                         r = amdgpu_cs_p2_syncobj_in(p, chunk);
621                         if (r)
622                                 return r;
623                         break;
624                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
625                         r = amdgpu_cs_p2_syncobj_out(p, chunk);
626                         if (r)
627                                 return r;
628                         break;
629                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
630                         r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
631                         if (r)
632                                 return r;
633                         break;
634                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
635                         r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
636                         if (r)
637                                 return r;
638                         break;
639                 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
640                         r = amdgpu_cs_p2_shadow(p, chunk);
641                         if (r)
642                                 return r;
643                         break;
644                 }
645         }
646
647         return 0;
648 }
649
650 /* Convert microseconds to bytes. */
651 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
652 {
653         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
654                 return 0;
655
656         /* Since accum_us is incremented by a million per second, just
657          * multiply it by the number of MB/s to get the number of bytes.
658          */
659         return us << adev->mm_stats.log2_max_MBps;
660 }
661
662 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
663 {
664         if (!adev->mm_stats.log2_max_MBps)
665                 return 0;
666
667         return bytes >> adev->mm_stats.log2_max_MBps;
668 }
669
670 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
671  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
672  * which means it can go over the threshold once. If that happens, the driver
673  * will be in debt and no other buffer migrations can be done until that debt
674  * is repaid.
675  *
676  * This approach allows moving a buffer of any size (it's important to allow
677  * that).
678  *
679  * The currency is simply time in microseconds and it increases as the clock
680  * ticks. The accumulated microseconds (us) are converted to bytes and
681  * returned.
682  */
683 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
684                                               u64 *max_bytes,
685                                               u64 *max_vis_bytes)
686 {
687         s64 time_us, increment_us;
688         u64 free_vram, total_vram, used_vram;
689         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
690          * throttling.
691          *
692          * It means that in order to get full max MBps, at least 5 IBs per
693          * second must be submitted and not more than 200ms apart from each
694          * other.
695          */
696         const s64 us_upper_bound = 200000;
697
698         if (!adev->mm_stats.log2_max_MBps) {
699                 *max_bytes = 0;
700                 *max_vis_bytes = 0;
701                 return;
702         }
703
704         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
705         used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
706         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
707
708         spin_lock(&adev->mm_stats.lock);
709
710         /* Increase the amount of accumulated us. */
711         time_us = ktime_to_us(ktime_get());
712         increment_us = time_us - adev->mm_stats.last_update_us;
713         adev->mm_stats.last_update_us = time_us;
714         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
715                                       us_upper_bound);
716
717         /* This prevents the short period of low performance when the VRAM
718          * usage is low and the driver is in debt or doesn't have enough
719          * accumulated us to fill VRAM quickly.
720          *
721          * The situation can occur in these cases:
722          * - a lot of VRAM is freed by userspace
723          * - the presence of a big buffer causes a lot of evictions
724          *   (solution: split buffers into smaller ones)
725          *
726          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
727          * accum_us to a positive number.
728          */
729         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
730                 s64 min_us;
731
732                 /* Be more aggressive on dGPUs. Try to fill a portion of free
733                  * VRAM now.
734                  */
735                 if (!(adev->flags & AMD_IS_APU))
736                         min_us = bytes_to_us(adev, free_vram / 4);
737                 else
738                         min_us = 0; /* Reset accum_us on APUs. */
739
740                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
741         }
742
743         /* This is set to 0 if the driver is in debt to disallow (optional)
744          * buffer moves.
745          */
746         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
747
748         /* Do the same for visible VRAM if half of it is free */
749         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
750                 u64 total_vis_vram = adev->gmc.visible_vram_size;
751                 u64 used_vis_vram =
752                   amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
753
754                 if (used_vis_vram < total_vis_vram) {
755                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
756
757                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
758                                                           increment_us, us_upper_bound);
759
760                         if (free_vis_vram >= total_vis_vram / 2)
761                                 adev->mm_stats.accum_us_vis =
762                                         max(bytes_to_us(adev, free_vis_vram / 2),
763                                             adev->mm_stats.accum_us_vis);
764                 }
765
766                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
767         } else {
768                 *max_vis_bytes = 0;
769         }
770
771         spin_unlock(&adev->mm_stats.lock);
772 }
773
774 /* Report how many bytes have really been moved for the last command
775  * submission. This can result in a debt that can stop buffer migrations
776  * temporarily.
777  */
778 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
779                                   u64 num_vis_bytes)
780 {
781         spin_lock(&adev->mm_stats.lock);
782         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
783         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
784         spin_unlock(&adev->mm_stats.lock);
785 }
786
787 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
788 {
789         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
790         struct amdgpu_cs_parser *p = param;
791         struct ttm_operation_ctx ctx = {
792                 .interruptible = true,
793                 .no_wait_gpu = false,
794                 .resv = bo->tbo.base.resv
795         };
796         uint32_t domain;
797         int r;
798
799         if (bo->tbo.pin_count)
800                 return 0;
801
802         /* Don't move this buffer if we have depleted our allowance
803          * to move it. Don't move anything if the threshold is zero.
804          */
805         if (p->bytes_moved < p->bytes_moved_threshold &&
806             (!bo->tbo.base.dma_buf ||
807             list_empty(&bo->tbo.base.dma_buf->attachments))) {
808                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
809                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
810                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
811                          * visible VRAM if we've depleted our allowance to do
812                          * that.
813                          */
814                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
815                                 domain = bo->preferred_domains;
816                         else
817                                 domain = bo->allowed_domains;
818                 } else {
819                         domain = bo->preferred_domains;
820                 }
821         } else {
822                 domain = bo->allowed_domains;
823         }
824
825 retry:
826         amdgpu_bo_placement_from_domain(bo, domain);
827         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
828
829         p->bytes_moved += ctx.bytes_moved;
830         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
831             amdgpu_bo_in_cpu_visible_vram(bo))
832                 p->bytes_moved_vis += ctx.bytes_moved;
833
834         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
835                 domain = bo->allowed_domains;
836                 goto retry;
837         }
838
839         return r;
840 }
841
842 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
843                                 union drm_amdgpu_cs *cs)
844 {
845         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
846         struct ttm_operation_ctx ctx = { true, false };
847         struct amdgpu_vm *vm = &fpriv->vm;
848         struct amdgpu_bo_list_entry *e;
849         struct drm_gem_object *obj;
850         unsigned long index;
851         unsigned int i;
852         int r;
853
854         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
855         if (cs->in.bo_list_handle) {
856                 if (p->bo_list)
857                         return -EINVAL;
858
859                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
860                                        &p->bo_list);
861                 if (r)
862                         return r;
863         } else if (!p->bo_list) {
864                 /* Create a empty bo_list when no handle is provided */
865                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
866                                           &p->bo_list);
867                 if (r)
868                         return r;
869         }
870
871         mutex_lock(&p->bo_list->bo_list_mutex);
872
873         /* Get userptr backing pages. If pages are updated after registered
874          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
875          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
876          */
877         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
878                 bool userpage_invalidated = false;
879                 struct amdgpu_bo *bo = e->bo;
880                 int i;
881
882                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
883                                         sizeof(struct page *),
884                                         GFP_KERNEL | __GFP_ZERO);
885                 if (!e->user_pages) {
886                         DRM_ERROR("kvmalloc_array failure\n");
887                         r = -ENOMEM;
888                         goto out_free_user_pages;
889                 }
890
891                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
892                 if (r) {
893                         kvfree(e->user_pages);
894                         e->user_pages = NULL;
895                         goto out_free_user_pages;
896                 }
897
898                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
899                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
900                                 userpage_invalidated = true;
901                                 break;
902                         }
903                 }
904                 e->user_invalidated = userpage_invalidated;
905         }
906
907         drm_exec_until_all_locked(&p->exec) {
908                 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
909                 drm_exec_retry_on_contention(&p->exec);
910                 if (unlikely(r))
911                         goto out_free_user_pages;
912
913                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
914                         /* One fence for TTM and one for each CS job */
915                         r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
916                                                  1 + p->gang_size);
917                         drm_exec_retry_on_contention(&p->exec);
918                         if (unlikely(r))
919                                 goto out_free_user_pages;
920
921                         e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
922                 }
923
924                 if (p->uf_bo) {
925                         r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
926                                                  1 + p->gang_size);
927                         drm_exec_retry_on_contention(&p->exec);
928                         if (unlikely(r))
929                                 goto out_free_user_pages;
930                 }
931         }
932
933         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
934                 struct mm_struct *usermm;
935
936                 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
937                 if (usermm && usermm != current->mm) {
938                         r = -EPERM;
939                         goto out_free_user_pages;
940                 }
941
942                 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
943                     e->user_invalidated && e->user_pages) {
944                         amdgpu_bo_placement_from_domain(e->bo,
945                                                         AMDGPU_GEM_DOMAIN_CPU);
946                         r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
947                                             &ctx);
948                         if (r)
949                                 goto out_free_user_pages;
950
951                         amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
952                                                      e->user_pages);
953                 }
954
955                 kvfree(e->user_pages);
956                 e->user_pages = NULL;
957         }
958
959         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
960                                           &p->bytes_moved_vis_threshold);
961         p->bytes_moved = 0;
962         p->bytes_moved_vis = 0;
963
964         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
965                                       amdgpu_cs_bo_validate, p);
966         if (r) {
967                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
968                 goto out_free_user_pages;
969         }
970
971         drm_exec_for_each_locked_object(&p->exec, index, obj) {
972                 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
973                 if (unlikely(r))
974                         goto out_free_user_pages;
975         }
976
977         if (p->uf_bo) {
978                 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
979                 if (unlikely(r))
980                         goto out_free_user_pages;
981
982                 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
983         }
984
985         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
986                                      p->bytes_moved_vis);
987
988         for (i = 0; i < p->gang_size; ++i)
989                 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
990                                          p->bo_list->gws_obj,
991                                          p->bo_list->oa_obj);
992         return 0;
993
994 out_free_user_pages:
995         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
996                 struct amdgpu_bo *bo = e->bo;
997
998                 if (!e->user_pages)
999                         continue;
1000                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1001                 kvfree(e->user_pages);
1002                 e->user_pages = NULL;
1003                 e->range = NULL;
1004         }
1005         mutex_unlock(&p->bo_list->bo_list_mutex);
1006         return r;
1007 }
1008
1009 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1010 {
1011         int i, j;
1012
1013         if (!trace_amdgpu_cs_enabled())
1014                 return;
1015
1016         for (i = 0; i < p->gang_size; ++i) {
1017                 struct amdgpu_job *job = p->jobs[i];
1018
1019                 for (j = 0; j < job->num_ibs; ++j)
1020                         trace_amdgpu_cs(p, job, &job->ibs[j]);
1021         }
1022 }
1023
1024 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1025                                struct amdgpu_job *job)
1026 {
1027         struct amdgpu_ring *ring = amdgpu_job_ring(job);
1028         unsigned int i;
1029         int r;
1030
1031         /* Only for UVD/VCE VM emulation */
1032         if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1033                 return 0;
1034
1035         for (i = 0; i < job->num_ibs; ++i) {
1036                 struct amdgpu_ib *ib = &job->ibs[i];
1037                 struct amdgpu_bo_va_mapping *m;
1038                 struct amdgpu_bo *aobj;
1039                 uint64_t va_start;
1040                 uint8_t *kptr;
1041
1042                 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1043                 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1044                 if (r) {
1045                         DRM_ERROR("IB va_start is invalid\n");
1046                         return r;
1047                 }
1048
1049                 if ((va_start + ib->length_dw * 4) >
1050                     (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1051                         DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1052                         return -EINVAL;
1053                 }
1054
1055                 /* the IB should be reserved at this point */
1056                 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1057                 if (r)
1058                         return r;
1059
1060                 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1061
1062                 if (ring->funcs->parse_cs) {
1063                         memcpy(ib->ptr, kptr, ib->length_dw * 4);
1064                         amdgpu_bo_kunmap(aobj);
1065
1066                         r = amdgpu_ring_parse_cs(ring, p, job, ib);
1067                         if (r)
1068                                 return r;
1069                 } else {
1070                         ib->ptr = (uint32_t *)kptr;
1071                         r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1072                         amdgpu_bo_kunmap(aobj);
1073                         if (r)
1074                                 return r;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1082 {
1083         unsigned int i;
1084         int r;
1085
1086         for (i = 0; i < p->gang_size; ++i) {
1087                 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1088                 if (r)
1089                         return r;
1090         }
1091         return 0;
1092 }
1093
1094 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1095 {
1096         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1097         struct amdgpu_job *job = p->gang_leader;
1098         struct amdgpu_device *adev = p->adev;
1099         struct amdgpu_vm *vm = &fpriv->vm;
1100         struct amdgpu_bo_list_entry *e;
1101         struct amdgpu_bo_va *bo_va;
1102         unsigned int i;
1103         int r;
1104
1105         r = amdgpu_vm_clear_freed(adev, vm, NULL);
1106         if (r)
1107                 return r;
1108
1109         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1110         if (r)
1111                 return r;
1112
1113         r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1114         if (r)
1115                 return r;
1116
1117         if (fpriv->csa_va) {
1118                 bo_va = fpriv->csa_va;
1119                 BUG_ON(!bo_va);
1120                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1121                 if (r)
1122                         return r;
1123
1124                 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1125                 if (r)
1126                         return r;
1127         }
1128
1129         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1130                 bo_va = e->bo_va;
1131                 if (bo_va == NULL)
1132                         continue;
1133
1134                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1135                 if (r)
1136                         return r;
1137
1138                 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1139                 if (r)
1140                         return r;
1141         }
1142
1143         r = amdgpu_vm_handle_moved(adev, vm);
1144         if (r)
1145                 return r;
1146
1147         r = amdgpu_vm_update_pdes(adev, vm, false);
1148         if (r)
1149                 return r;
1150
1151         r = amdgpu_sync_fence(&p->sync, vm->last_update);
1152         if (r)
1153                 return r;
1154
1155         for (i = 0; i < p->gang_size; ++i) {
1156                 job = p->jobs[i];
1157
1158                 if (!job->vm)
1159                         continue;
1160
1161                 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1162         }
1163
1164         if (amdgpu_vm_debug) {
1165                 /* Invalidate all BOs to test for userspace bugs */
1166                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1167                         struct amdgpu_bo *bo = e->bo;
1168
1169                         /* ignore duplicates */
1170                         if (!bo)
1171                                 continue;
1172
1173                         amdgpu_vm_bo_invalidate(adev, bo, false);
1174                 }
1175         }
1176
1177         return 0;
1178 }
1179
1180 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1181 {
1182         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1183         struct drm_gpu_scheduler *sched;
1184         struct drm_gem_object *obj;
1185         struct dma_fence *fence;
1186         unsigned long index;
1187         unsigned int i;
1188         int r;
1189
1190         r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1191         if (r) {
1192                 if (r != -ERESTARTSYS)
1193                         DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1194                 return r;
1195         }
1196
1197         drm_exec_for_each_locked_object(&p->exec, index, obj) {
1198                 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1199
1200                 struct dma_resv *resv = bo->tbo.base.resv;
1201                 enum amdgpu_sync_mode sync_mode;
1202
1203                 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1204                         AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1205                 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1206                                      &fpriv->vm);
1207                 if (r)
1208                         return r;
1209         }
1210
1211         for (i = 0; i < p->gang_size; ++i) {
1212                 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1213                 if (r)
1214                         return r;
1215         }
1216
1217         sched = p->gang_leader->base.entity->rq->sched;
1218         while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1219                 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1220
1221                 /*
1222                  * When we have an dependency it might be necessary to insert a
1223                  * pipeline sync to make sure that all caches etc are flushed and the
1224                  * next job actually sees the results from the previous one
1225                  * before we start executing on the same scheduler ring.
1226                  */
1227                 if (!s_fence || s_fence->sched != sched) {
1228                         dma_fence_put(fence);
1229                         continue;
1230                 }
1231
1232                 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1233                 dma_fence_put(fence);
1234                 if (r)
1235                         return r;
1236         }
1237         return 0;
1238 }
1239
1240 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1241 {
1242         int i;
1243
1244         for (i = 0; i < p->num_post_deps; ++i) {
1245                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1246                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1247                                               p->post_deps[i].chain,
1248                                               p->fence, p->post_deps[i].point);
1249                         p->post_deps[i].chain = NULL;
1250                 } else {
1251                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1252                                                   p->fence);
1253                 }
1254         }
1255 }
1256
1257 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1258                             union drm_amdgpu_cs *cs)
1259 {
1260         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1261         struct amdgpu_job *leader = p->gang_leader;
1262         struct amdgpu_bo_list_entry *e;
1263         struct drm_gem_object *gobj;
1264         unsigned long index;
1265         unsigned int i;
1266         uint64_t seq;
1267         int r;
1268
1269         for (i = 0; i < p->gang_size; ++i)
1270                 drm_sched_job_arm(&p->jobs[i]->base);
1271
1272         for (i = 0; i < p->gang_size; ++i) {
1273                 struct dma_fence *fence;
1274
1275                 if (p->jobs[i] == leader)
1276                         continue;
1277
1278                 fence = &p->jobs[i]->base.s_fence->scheduled;
1279                 dma_fence_get(fence);
1280                 r = drm_sched_job_add_dependency(&leader->base, fence);
1281                 if (r) {
1282                         dma_fence_put(fence);
1283                         return r;
1284                 }
1285         }
1286
1287         if (p->gang_size > 1) {
1288                 for (i = 0; i < p->gang_size; ++i)
1289                         amdgpu_job_set_gang_leader(p->jobs[i], leader);
1290         }
1291
1292         /* No memory allocation is allowed while holding the notifier lock.
1293          * The lock is held until amdgpu_cs_submit is finished and fence is
1294          * added to BOs.
1295          */
1296         mutex_lock(&p->adev->notifier_lock);
1297
1298         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1299          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1300          */
1301         r = 0;
1302         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1303                 r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1304                                                         e->range);
1305                 e->range = NULL;
1306         }
1307         if (r) {
1308                 r = -EAGAIN;
1309                 mutex_unlock(&p->adev->notifier_lock);
1310                 return r;
1311         }
1312
1313         p->fence = dma_fence_get(&leader->base.s_fence->finished);
1314         drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1315
1316                 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1317
1318                 /* Everybody except for the gang leader uses READ */
1319                 for (i = 0; i < p->gang_size; ++i) {
1320                         if (p->jobs[i] == leader)
1321                                 continue;
1322
1323                         dma_resv_add_fence(gobj->resv,
1324                                            &p->jobs[i]->base.s_fence->finished,
1325                                            DMA_RESV_USAGE_READ);
1326                 }
1327
1328                 /* The gang leader as remembered as writer */
1329                 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1330         }
1331
1332         seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1333                                    p->fence);
1334         amdgpu_cs_post_dependencies(p);
1335
1336         if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1337             !p->ctx->preamble_presented) {
1338                 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1339                 p->ctx->preamble_presented = true;
1340         }
1341
1342         cs->out.handle = seq;
1343         leader->uf_sequence = seq;
1344
1345         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1346         for (i = 0; i < p->gang_size; ++i) {
1347                 amdgpu_job_free_resources(p->jobs[i]);
1348                 trace_amdgpu_cs_ioctl(p->jobs[i]);
1349                 drm_sched_entity_push_job(&p->jobs[i]->base);
1350                 p->jobs[i] = NULL;
1351         }
1352
1353         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1354
1355         mutex_unlock(&p->adev->notifier_lock);
1356         mutex_unlock(&p->bo_list->bo_list_mutex);
1357         return 0;
1358 }
1359
1360 /* Cleanup the parser structure */
1361 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1362 {
1363         unsigned int i;
1364
1365         amdgpu_sync_free(&parser->sync);
1366         drm_exec_fini(&parser->exec);
1367
1368         for (i = 0; i < parser->num_post_deps; i++) {
1369                 drm_syncobj_put(parser->post_deps[i].syncobj);
1370                 kfree(parser->post_deps[i].chain);
1371         }
1372         kfree(parser->post_deps);
1373
1374         dma_fence_put(parser->fence);
1375
1376         if (parser->ctx)
1377                 amdgpu_ctx_put(parser->ctx);
1378         if (parser->bo_list)
1379                 amdgpu_bo_list_put(parser->bo_list);
1380
1381         for (i = 0; i < parser->nchunks; i++)
1382                 kvfree(parser->chunks[i].kdata);
1383         kvfree(parser->chunks);
1384         for (i = 0; i < parser->gang_size; ++i) {
1385                 if (parser->jobs[i])
1386                         amdgpu_job_free(parser->jobs[i]);
1387         }
1388         amdgpu_bo_unref(&parser->uf_bo);
1389 }
1390
1391 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1392 {
1393         struct amdgpu_device *adev = drm_to_adev(dev);
1394         struct amdgpu_cs_parser parser;
1395         int r;
1396
1397         if (amdgpu_ras_intr_triggered())
1398                 return -EHWPOISON;
1399
1400         if (!adev->accel_working)
1401                 return -EBUSY;
1402
1403         r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1404         if (r) {
1405                 if (printk_ratelimit())
1406                         DRM_ERROR("Failed to initialize parser %d!\n", r);
1407                 return r;
1408         }
1409
1410         r = amdgpu_cs_pass1(&parser, data);
1411         if (r)
1412                 goto error_fini;
1413
1414         r = amdgpu_cs_pass2(&parser);
1415         if (r)
1416                 goto error_fini;
1417
1418         r = amdgpu_cs_parser_bos(&parser, data);
1419         if (r) {
1420                 if (r == -ENOMEM)
1421                         DRM_ERROR("Not enough memory for command submission!\n");
1422                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1423                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1424                 goto error_fini;
1425         }
1426
1427         r = amdgpu_cs_patch_jobs(&parser);
1428         if (r)
1429                 goto error_backoff;
1430
1431         r = amdgpu_cs_vm_handling(&parser);
1432         if (r)
1433                 goto error_backoff;
1434
1435         r = amdgpu_cs_sync_rings(&parser);
1436         if (r)
1437                 goto error_backoff;
1438
1439         trace_amdgpu_cs_ibs(&parser);
1440
1441         r = amdgpu_cs_submit(&parser, data);
1442         if (r)
1443                 goto error_backoff;
1444
1445         amdgpu_cs_parser_fini(&parser);
1446         return 0;
1447
1448 error_backoff:
1449         mutex_unlock(&parser.bo_list->bo_list_mutex);
1450
1451 error_fini:
1452         amdgpu_cs_parser_fini(&parser);
1453         return r;
1454 }
1455
1456 /**
1457  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1458  *
1459  * @dev: drm device
1460  * @data: data from userspace
1461  * @filp: file private
1462  *
1463  * Wait for the command submission identified by handle to finish.
1464  */
1465 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1466                          struct drm_file *filp)
1467 {
1468         union drm_amdgpu_wait_cs *wait = data;
1469         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1470         struct drm_sched_entity *entity;
1471         struct amdgpu_ctx *ctx;
1472         struct dma_fence *fence;
1473         long r;
1474
1475         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1476         if (ctx == NULL)
1477                 return -EINVAL;
1478
1479         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1480                                   wait->in.ring, &entity);
1481         if (r) {
1482                 amdgpu_ctx_put(ctx);
1483                 return r;
1484         }
1485
1486         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1487         if (IS_ERR(fence))
1488                 r = PTR_ERR(fence);
1489         else if (fence) {
1490                 r = dma_fence_wait_timeout(fence, true, timeout);
1491                 if (r > 0 && fence->error)
1492                         r = fence->error;
1493                 dma_fence_put(fence);
1494         } else
1495                 r = 1;
1496
1497         amdgpu_ctx_put(ctx);
1498         if (r < 0)
1499                 return r;
1500
1501         memset(wait, 0, sizeof(*wait));
1502         wait->out.status = (r == 0);
1503
1504         return 0;
1505 }
1506
1507 /**
1508  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1509  *
1510  * @adev: amdgpu device
1511  * @filp: file private
1512  * @user: drm_amdgpu_fence copied from user space
1513  */
1514 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1515                                              struct drm_file *filp,
1516                                              struct drm_amdgpu_fence *user)
1517 {
1518         struct drm_sched_entity *entity;
1519         struct amdgpu_ctx *ctx;
1520         struct dma_fence *fence;
1521         int r;
1522
1523         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1524         if (ctx == NULL)
1525                 return ERR_PTR(-EINVAL);
1526
1527         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1528                                   user->ring, &entity);
1529         if (r) {
1530                 amdgpu_ctx_put(ctx);
1531                 return ERR_PTR(r);
1532         }
1533
1534         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1535         amdgpu_ctx_put(ctx);
1536
1537         return fence;
1538 }
1539
1540 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1541                                     struct drm_file *filp)
1542 {
1543         struct amdgpu_device *adev = drm_to_adev(dev);
1544         union drm_amdgpu_fence_to_handle *info = data;
1545         struct dma_fence *fence;
1546         struct drm_syncobj *syncobj;
1547         struct sync_file *sync_file;
1548         int fd, r;
1549
1550         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1551         if (IS_ERR(fence))
1552                 return PTR_ERR(fence);
1553
1554         if (!fence)
1555                 fence = dma_fence_get_stub();
1556
1557         switch (info->in.what) {
1558         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1559                 r = drm_syncobj_create(&syncobj, 0, fence);
1560                 dma_fence_put(fence);
1561                 if (r)
1562                         return r;
1563                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1564                 drm_syncobj_put(syncobj);
1565                 return r;
1566
1567         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1568                 r = drm_syncobj_create(&syncobj, 0, fence);
1569                 dma_fence_put(fence);
1570                 if (r)
1571                         return r;
1572                 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1573                 drm_syncobj_put(syncobj);
1574                 return r;
1575
1576         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1577                 fd = get_unused_fd_flags(O_CLOEXEC);
1578                 if (fd < 0) {
1579                         dma_fence_put(fence);
1580                         return fd;
1581                 }
1582
1583                 sync_file = sync_file_create(fence);
1584                 dma_fence_put(fence);
1585                 if (!sync_file) {
1586                         put_unused_fd(fd);
1587                         return -ENOMEM;
1588                 }
1589
1590                 fd_install(fd, sync_file->file);
1591                 info->out.handle = fd;
1592                 return 0;
1593
1594         default:
1595                 dma_fence_put(fence);
1596                 return -EINVAL;
1597         }
1598 }
1599
1600 /**
1601  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1602  *
1603  * @adev: amdgpu device
1604  * @filp: file private
1605  * @wait: wait parameters
1606  * @fences: array of drm_amdgpu_fence
1607  */
1608 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1609                                      struct drm_file *filp,
1610                                      union drm_amdgpu_wait_fences *wait,
1611                                      struct drm_amdgpu_fence *fences)
1612 {
1613         uint32_t fence_count = wait->in.fence_count;
1614         unsigned int i;
1615         long r = 1;
1616
1617         for (i = 0; i < fence_count; i++) {
1618                 struct dma_fence *fence;
1619                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1620
1621                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1622                 if (IS_ERR(fence))
1623                         return PTR_ERR(fence);
1624                 else if (!fence)
1625                         continue;
1626
1627                 r = dma_fence_wait_timeout(fence, true, timeout);
1628                 if (r > 0 && fence->error)
1629                         r = fence->error;
1630
1631                 dma_fence_put(fence);
1632                 if (r < 0)
1633                         return r;
1634
1635                 if (r == 0)
1636                         break;
1637         }
1638
1639         memset(wait, 0, sizeof(*wait));
1640         wait->out.status = (r > 0);
1641
1642         return 0;
1643 }
1644
1645 /**
1646  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1647  *
1648  * @adev: amdgpu device
1649  * @filp: file private
1650  * @wait: wait parameters
1651  * @fences: array of drm_amdgpu_fence
1652  */
1653 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1654                                     struct drm_file *filp,
1655                                     union drm_amdgpu_wait_fences *wait,
1656                                     struct drm_amdgpu_fence *fences)
1657 {
1658         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1659         uint32_t fence_count = wait->in.fence_count;
1660         uint32_t first = ~0;
1661         struct dma_fence **array;
1662         unsigned int i;
1663         long r;
1664
1665         /* Prepare the fence array */
1666         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1667
1668         if (array == NULL)
1669                 return -ENOMEM;
1670
1671         for (i = 0; i < fence_count; i++) {
1672                 struct dma_fence *fence;
1673
1674                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1675                 if (IS_ERR(fence)) {
1676                         r = PTR_ERR(fence);
1677                         goto err_free_fence_array;
1678                 } else if (fence) {
1679                         array[i] = fence;
1680                 } else { /* NULL, the fence has been already signaled */
1681                         r = 1;
1682                         first = i;
1683                         goto out;
1684                 }
1685         }
1686
1687         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1688                                        &first);
1689         if (r < 0)
1690                 goto err_free_fence_array;
1691
1692 out:
1693         memset(wait, 0, sizeof(*wait));
1694         wait->out.status = (r > 0);
1695         wait->out.first_signaled = first;
1696
1697         if (first < fence_count && array[first])
1698                 r = array[first]->error;
1699         else
1700                 r = 0;
1701
1702 err_free_fence_array:
1703         for (i = 0; i < fence_count; i++)
1704                 dma_fence_put(array[i]);
1705         kfree(array);
1706
1707         return r;
1708 }
1709
1710 /**
1711  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1712  *
1713  * @dev: drm device
1714  * @data: data from userspace
1715  * @filp: file private
1716  */
1717 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1718                                 struct drm_file *filp)
1719 {
1720         struct amdgpu_device *adev = drm_to_adev(dev);
1721         union drm_amdgpu_wait_fences *wait = data;
1722         uint32_t fence_count = wait->in.fence_count;
1723         struct drm_amdgpu_fence *fences_user;
1724         struct drm_amdgpu_fence *fences;
1725         int r;
1726
1727         /* Get the fences from userspace */
1728         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1729                         GFP_KERNEL);
1730         if (fences == NULL)
1731                 return -ENOMEM;
1732
1733         fences_user = u64_to_user_ptr(wait->in.fences);
1734         if (copy_from_user(fences, fences_user,
1735                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1736                 r = -EFAULT;
1737                 goto err_free_fences;
1738         }
1739
1740         if (wait->in.wait_all)
1741                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1742         else
1743                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1744
1745 err_free_fences:
1746         kfree(fences);
1747
1748         return r;
1749 }
1750
1751 /**
1752  * amdgpu_cs_find_mapping - find bo_va for VM address
1753  *
1754  * @parser: command submission parser context
1755  * @addr: VM address
1756  * @bo: resulting BO of the mapping found
1757  * @map: Placeholder to return found BO mapping
1758  *
1759  * Search the buffer objects in the command submission context for a certain
1760  * virtual memory address. Returns allocation structure when found, NULL
1761  * otherwise.
1762  */
1763 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1764                            uint64_t addr, struct amdgpu_bo **bo,
1765                            struct amdgpu_bo_va_mapping **map)
1766 {
1767         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1768         struct ttm_operation_ctx ctx = { false, false };
1769         struct amdgpu_vm *vm = &fpriv->vm;
1770         struct amdgpu_bo_va_mapping *mapping;
1771         int r;
1772
1773         addr /= AMDGPU_GPU_PAGE_SIZE;
1774
1775         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1776         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1777                 return -EINVAL;
1778
1779         *bo = mapping->bo_va->base.bo;
1780         *map = mapping;
1781
1782         /* Double check that the BO is reserved by this CS */
1783         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1784                 return -EINVAL;
1785
1786         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1787                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1788                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1789                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1790                 if (r)
1791                         return r;
1792         }
1793
1794         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1795 }