drm/amdgpu: reorder CS code
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
41
42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
43                                  struct amdgpu_device *adev,
44                                  struct drm_file *filp,
45                                  union drm_amdgpu_cs *cs)
46 {
47         struct amdgpu_fpriv *fpriv = filp->driver_priv;
48
49         if (cs->in.num_chunks == 0)
50                 return -EINVAL;
51
52         memset(p, 0, sizeof(*p));
53         p->adev = adev;
54         p->filp = filp;
55
56         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
57         if (!p->ctx)
58                 return -EINVAL;
59
60         if (atomic_read(&p->ctx->guilty)) {
61                 amdgpu_ctx_put(p->ctx);
62                 return -ECANCELED;
63         }
64         return 0;
65 }
66
67 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
68                            struct drm_amdgpu_cs_chunk_ib *chunk_ib,
69                            unsigned int *num_ibs)
70 {
71         ++(*num_ibs);
72         return 0;
73 }
74
75 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
76                                    struct drm_amdgpu_cs_chunk_fence *data,
77                                    uint32_t *offset)
78 {
79         struct drm_gem_object *gobj;
80         struct amdgpu_bo *bo;
81         unsigned long size;
82         int r;
83
84         gobj = drm_gem_object_lookup(p->filp, data->handle);
85         if (gobj == NULL)
86                 return -EINVAL;
87
88         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
89         p->uf_entry.priority = 0;
90         p->uf_entry.tv.bo = &bo->tbo;
91         /* One for TTM and two for the CS job */
92         p->uf_entry.tv.num_shared = 3;
93
94         drm_gem_object_put(gobj);
95
96         size = amdgpu_bo_size(bo);
97         if (size != PAGE_SIZE || (data->offset + 8) > size) {
98                 r = -EINVAL;
99                 goto error_unref;
100         }
101
102         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
103                 r = -EINVAL;
104                 goto error_unref;
105         }
106
107         *offset = data->offset;
108
109         return 0;
110
111 error_unref:
112         amdgpu_bo_unref(&bo);
113         return r;
114 }
115
116 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
117                                    struct drm_amdgpu_bo_list_in *data)
118 {
119         struct drm_amdgpu_bo_list_entry *info;
120         int r;
121
122         r = amdgpu_bo_create_list_entry_array(data, &info);
123         if (r)
124                 return r;
125
126         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
127                                   &p->bo_list);
128         if (r)
129                 goto error_free;
130
131         kvfree(info);
132         return 0;
133
134 error_free:
135         kvfree(info);
136
137         return r;
138 }
139
140 /* Copy the data from userspace and go over it the first time */
141 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
142                            union drm_amdgpu_cs *cs)
143 {
144         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
145         struct amdgpu_vm *vm = &fpriv->vm;
146         uint64_t *chunk_array_user;
147         uint64_t *chunk_array;
148         unsigned size, num_ibs = 0;
149         uint32_t uf_offset = 0;
150         int ret;
151         int i;
152
153         chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
154                                      GFP_KERNEL);
155         if (!chunk_array)
156                 return -ENOMEM;
157
158         /* get chunks */
159         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
160         if (copy_from_user(chunk_array, chunk_array_user,
161                            sizeof(uint64_t)*cs->in.num_chunks)) {
162                 ret = -EFAULT;
163                 goto free_chunk;
164         }
165
166         p->nchunks = cs->in.num_chunks;
167         p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
168                             GFP_KERNEL);
169         if (!p->chunks) {
170                 ret = -ENOMEM;
171                 goto free_chunk;
172         }
173
174         for (i = 0; i < p->nchunks; i++) {
175                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
176                 struct drm_amdgpu_cs_chunk user_chunk;
177                 uint32_t __user *cdata;
178
179                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
180                 if (copy_from_user(&user_chunk, chunk_ptr,
181                                        sizeof(struct drm_amdgpu_cs_chunk))) {
182                         ret = -EFAULT;
183                         i--;
184                         goto free_partial_kdata;
185                 }
186                 p->chunks[i].chunk_id = user_chunk.chunk_id;
187                 p->chunks[i].length_dw = user_chunk.length_dw;
188
189                 size = p->chunks[i].length_dw;
190                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
191
192                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
193                                                     GFP_KERNEL);
194                 if (p->chunks[i].kdata == NULL) {
195                         ret = -ENOMEM;
196                         i--;
197                         goto free_partial_kdata;
198                 }
199                 size *= sizeof(uint32_t);
200                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
201                         ret = -EFAULT;
202                         goto free_partial_kdata;
203                 }
204
205                 /* Assume the worst on the following checks */
206                 ret = -EINVAL;
207                 switch (p->chunks[i].chunk_id) {
208                 case AMDGPU_CHUNK_ID_IB:
209                         if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
210                                 goto free_partial_kdata;
211
212                         ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, &num_ibs);
213                         if (ret)
214                                 goto free_partial_kdata;
215                         break;
216
217                 case AMDGPU_CHUNK_ID_FENCE:
218                         if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
219                                 goto free_partial_kdata;
220
221                         ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
222                                                       &uf_offset);
223                         if (ret)
224                                 goto free_partial_kdata;
225                         break;
226
227                 case AMDGPU_CHUNK_ID_BO_HANDLES:
228                         if (size < sizeof(struct drm_amdgpu_bo_list_in))
229                                 goto free_partial_kdata;
230
231                         ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
232                         if (ret)
233                                 goto free_partial_kdata;
234                         break;
235
236                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
237                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
238                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
239                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
240                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
241                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
242                         break;
243
244                 default:
245                         goto free_partial_kdata;
246                 }
247         }
248
249         ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
250         if (ret)
251                 goto free_all_kdata;
252
253         if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
254                 ret = -ECANCELED;
255                 goto free_all_kdata;
256         }
257
258         if (p->uf_entry.tv.bo)
259                 p->job->uf_addr = uf_offset;
260         kvfree(chunk_array);
261
262         /* Use this opportunity to fill in task info for the vm */
263         amdgpu_vm_set_task_info(vm);
264
265         return 0;
266
267 free_all_kdata:
268         i = p->nchunks - 1;
269 free_partial_kdata:
270         for (; i >= 0; i--)
271                 kvfree(p->chunks[i].kdata);
272         kvfree(p->chunks);
273         p->chunks = NULL;
274         p->nchunks = 0;
275 free_chunk:
276         kvfree(chunk_array);
277
278         return ret;
279 }
280
281 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
282                              struct amdgpu_cs_parser *parser)
283 {
284         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
285         struct amdgpu_vm *vm = &fpriv->vm;
286         int r, ce_preempt = 0, de_preempt = 0;
287         struct amdgpu_ring *ring;
288         int i, j;
289
290         for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
291                 struct amdgpu_cs_chunk *chunk;
292                 struct amdgpu_ib *ib;
293                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
294                 struct drm_sched_entity *entity;
295
296                 chunk = &parser->chunks[i];
297                 ib = &parser->job->ibs[j];
298                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
299
300                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
301                         continue;
302
303                 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
304                     chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
305                         if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
306                                 ce_preempt++;
307                         else
308                                 de_preempt++;
309
310                         /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
311                         if (ce_preempt > 1 || de_preempt > 1)
312                                 return -EINVAL;
313                 }
314
315                 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
316                                           chunk_ib->ip_instance, chunk_ib->ring,
317                                           &entity);
318                 if (r)
319                         return r;
320
321                 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
322                         parser->job->preamble_status |=
323                                 AMDGPU_PREAMBLE_IB_PRESENT;
324
325                 if (parser->entity && parser->entity != entity)
326                         return -EINVAL;
327
328                 /* Return if there is no run queue associated with this entity.
329                  * Possibly because of disabled HW IP*/
330                 if (entity->rq == NULL)
331                         return -EINVAL;
332
333                 parser->entity = entity;
334
335                 ring = to_amdgpu_ring(entity->rq->sched);
336                 r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
337                                    chunk_ib->ib_bytes : 0,
338                                    AMDGPU_IB_POOL_DELAYED, ib);
339                 if (r) {
340                         DRM_ERROR("Failed to get ib !\n");
341                         return r;
342                 }
343
344                 ib->gpu_addr = chunk_ib->va_start;
345                 ib->length_dw = chunk_ib->ib_bytes / 4;
346                 ib->flags = chunk_ib->flags;
347
348                 j++;
349         }
350
351         /* MM engine doesn't support user fences */
352         ring = to_amdgpu_ring(parser->entity->rq->sched);
353         if (parser->job->uf_addr && ring->funcs->no_user_fence)
354                 return -EINVAL;
355
356         return 0;
357 }
358
359 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
360                                        struct amdgpu_cs_chunk *chunk)
361 {
362         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
363         unsigned num_deps;
364         int i, r;
365         struct drm_amdgpu_cs_chunk_dep *deps;
366
367         deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
368         num_deps = chunk->length_dw * 4 /
369                 sizeof(struct drm_amdgpu_cs_chunk_dep);
370
371         for (i = 0; i < num_deps; ++i) {
372                 struct amdgpu_ctx *ctx;
373                 struct drm_sched_entity *entity;
374                 struct dma_fence *fence;
375
376                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
377                 if (ctx == NULL)
378                         return -EINVAL;
379
380                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
381                                           deps[i].ip_instance,
382                                           deps[i].ring, &entity);
383                 if (r) {
384                         amdgpu_ctx_put(ctx);
385                         return r;
386                 }
387
388                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
389                 amdgpu_ctx_put(ctx);
390
391                 if (IS_ERR(fence))
392                         return PTR_ERR(fence);
393                 else if (!fence)
394                         continue;
395
396                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
397                         struct drm_sched_fence *s_fence;
398                         struct dma_fence *old = fence;
399
400                         s_fence = to_drm_sched_fence(fence);
401                         fence = dma_fence_get(&s_fence->scheduled);
402                         dma_fence_put(old);
403                 }
404
405                 r = amdgpu_sync_fence(&p->job->sync, fence);
406                 dma_fence_put(fence);
407                 if (r)
408                         return r;
409         }
410         return 0;
411 }
412
413 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
414                                                  uint32_t handle, u64 point,
415                                                  u64 flags)
416 {
417         struct dma_fence *fence;
418         int r;
419
420         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
421         if (r) {
422                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
423                           handle, point, r);
424                 return r;
425         }
426
427         r = amdgpu_sync_fence(&p->job->sync, fence);
428         dma_fence_put(fence);
429
430         return r;
431 }
432
433 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
434                                             struct amdgpu_cs_chunk *chunk)
435 {
436         struct drm_amdgpu_cs_chunk_sem *deps;
437         unsigned num_deps;
438         int i, r;
439
440         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
441         num_deps = chunk->length_dw * 4 /
442                 sizeof(struct drm_amdgpu_cs_chunk_sem);
443         for (i = 0; i < num_deps; ++i) {
444                 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
445                                                           0, 0);
446                 if (r)
447                         return r;
448         }
449
450         return 0;
451 }
452
453 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
454                                                      struct amdgpu_cs_chunk *chunk)
455 {
456         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
457         unsigned num_deps;
458         int i, r;
459
460         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
461         num_deps = chunk->length_dw * 4 /
462                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
463         for (i = 0; i < num_deps; ++i) {
464                 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
465                                                           syncobj_deps[i].handle,
466                                                           syncobj_deps[i].point,
467                                                           syncobj_deps[i].flags);
468                 if (r)
469                         return r;
470         }
471
472         return 0;
473 }
474
475 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
476                                              struct amdgpu_cs_chunk *chunk)
477 {
478         struct drm_amdgpu_cs_chunk_sem *deps;
479         unsigned num_deps;
480         int i;
481
482         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
483         num_deps = chunk->length_dw * 4 /
484                 sizeof(struct drm_amdgpu_cs_chunk_sem);
485
486         if (p->post_deps)
487                 return -EINVAL;
488
489         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
490                                      GFP_KERNEL);
491         p->num_post_deps = 0;
492
493         if (!p->post_deps)
494                 return -ENOMEM;
495
496
497         for (i = 0; i < num_deps; ++i) {
498                 p->post_deps[i].syncobj =
499                         drm_syncobj_find(p->filp, deps[i].handle);
500                 if (!p->post_deps[i].syncobj)
501                         return -EINVAL;
502                 p->post_deps[i].chain = NULL;
503                 p->post_deps[i].point = 0;
504                 p->num_post_deps++;
505         }
506
507         return 0;
508 }
509
510
511 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
512                                                       struct amdgpu_cs_chunk *chunk)
513 {
514         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
515         unsigned num_deps;
516         int i;
517
518         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
519         num_deps = chunk->length_dw * 4 /
520                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
521
522         if (p->post_deps)
523                 return -EINVAL;
524
525         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
526                                      GFP_KERNEL);
527         p->num_post_deps = 0;
528
529         if (!p->post_deps)
530                 return -ENOMEM;
531
532         for (i = 0; i < num_deps; ++i) {
533                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
534
535                 dep->chain = NULL;
536                 if (syncobj_deps[i].point) {
537                         dep->chain = dma_fence_chain_alloc();
538                         if (!dep->chain)
539                                 return -ENOMEM;
540                 }
541
542                 dep->syncobj = drm_syncobj_find(p->filp,
543                                                 syncobj_deps[i].handle);
544                 if (!dep->syncobj) {
545                         dma_fence_chain_free(dep->chain);
546                         return -EINVAL;
547                 }
548                 dep->point = syncobj_deps[i].point;
549                 p->num_post_deps++;
550         }
551
552         return 0;
553 }
554
555 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
556                                   struct amdgpu_cs_parser *p)
557 {
558         int i, r;
559
560         for (i = 0; i < p->nchunks; ++i) {
561                 struct amdgpu_cs_chunk *chunk;
562
563                 chunk = &p->chunks[i];
564
565                 switch (chunk->chunk_id) {
566                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
567                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
568                         r = amdgpu_cs_process_fence_dep(p, chunk);
569                         if (r)
570                                 return r;
571                         break;
572                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
573                         r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
574                         if (r)
575                                 return r;
576                         break;
577                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
578                         r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
579                         if (r)
580                                 return r;
581                         break;
582                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
583                         r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
584                         if (r)
585                                 return r;
586                         break;
587                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
588                         r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
589                         if (r)
590                                 return r;
591                         break;
592                 }
593         }
594
595         return 0;
596 }
597
598 /* Convert microseconds to bytes. */
599 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
600 {
601         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
602                 return 0;
603
604         /* Since accum_us is incremented by a million per second, just
605          * multiply it by the number of MB/s to get the number of bytes.
606          */
607         return us << adev->mm_stats.log2_max_MBps;
608 }
609
610 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
611 {
612         if (!adev->mm_stats.log2_max_MBps)
613                 return 0;
614
615         return bytes >> adev->mm_stats.log2_max_MBps;
616 }
617
618 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
619  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
620  * which means it can go over the threshold once. If that happens, the driver
621  * will be in debt and no other buffer migrations can be done until that debt
622  * is repaid.
623  *
624  * This approach allows moving a buffer of any size (it's important to allow
625  * that).
626  *
627  * The currency is simply time in microseconds and it increases as the clock
628  * ticks. The accumulated microseconds (us) are converted to bytes and
629  * returned.
630  */
631 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
632                                               u64 *max_bytes,
633                                               u64 *max_vis_bytes)
634 {
635         s64 time_us, increment_us;
636         u64 free_vram, total_vram, used_vram;
637         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
638          * throttling.
639          *
640          * It means that in order to get full max MBps, at least 5 IBs per
641          * second must be submitted and not more than 200ms apart from each
642          * other.
643          */
644         const s64 us_upper_bound = 200000;
645
646         if (!adev->mm_stats.log2_max_MBps) {
647                 *max_bytes = 0;
648                 *max_vis_bytes = 0;
649                 return;
650         }
651
652         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
653         used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
654         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
655
656         spin_lock(&adev->mm_stats.lock);
657
658         /* Increase the amount of accumulated us. */
659         time_us = ktime_to_us(ktime_get());
660         increment_us = time_us - adev->mm_stats.last_update_us;
661         adev->mm_stats.last_update_us = time_us;
662         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
663                                       us_upper_bound);
664
665         /* This prevents the short period of low performance when the VRAM
666          * usage is low and the driver is in debt or doesn't have enough
667          * accumulated us to fill VRAM quickly.
668          *
669          * The situation can occur in these cases:
670          * - a lot of VRAM is freed by userspace
671          * - the presence of a big buffer causes a lot of evictions
672          *   (solution: split buffers into smaller ones)
673          *
674          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
675          * accum_us to a positive number.
676          */
677         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
678                 s64 min_us;
679
680                 /* Be more aggressive on dGPUs. Try to fill a portion of free
681                  * VRAM now.
682                  */
683                 if (!(adev->flags & AMD_IS_APU))
684                         min_us = bytes_to_us(adev, free_vram / 4);
685                 else
686                         min_us = 0; /* Reset accum_us on APUs. */
687
688                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
689         }
690
691         /* This is set to 0 if the driver is in debt to disallow (optional)
692          * buffer moves.
693          */
694         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
695
696         /* Do the same for visible VRAM if half of it is free */
697         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
698                 u64 total_vis_vram = adev->gmc.visible_vram_size;
699                 u64 used_vis_vram =
700                   amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
701
702                 if (used_vis_vram < total_vis_vram) {
703                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
704                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
705                                                           increment_us, us_upper_bound);
706
707                         if (free_vis_vram >= total_vis_vram / 2)
708                                 adev->mm_stats.accum_us_vis =
709                                         max(bytes_to_us(adev, free_vis_vram / 2),
710                                             adev->mm_stats.accum_us_vis);
711                 }
712
713                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
714         } else {
715                 *max_vis_bytes = 0;
716         }
717
718         spin_unlock(&adev->mm_stats.lock);
719 }
720
721 /* Report how many bytes have really been moved for the last command
722  * submission. This can result in a debt that can stop buffer migrations
723  * temporarily.
724  */
725 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
726                                   u64 num_vis_bytes)
727 {
728         spin_lock(&adev->mm_stats.lock);
729         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
730         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
731         spin_unlock(&adev->mm_stats.lock);
732 }
733
734 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
735 {
736         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
737         struct amdgpu_cs_parser *p = param;
738         struct ttm_operation_ctx ctx = {
739                 .interruptible = true,
740                 .no_wait_gpu = false,
741                 .resv = bo->tbo.base.resv
742         };
743         uint32_t domain;
744         int r;
745
746         if (bo->tbo.pin_count)
747                 return 0;
748
749         /* Don't move this buffer if we have depleted our allowance
750          * to move it. Don't move anything if the threshold is zero.
751          */
752         if (p->bytes_moved < p->bytes_moved_threshold &&
753             (!bo->tbo.base.dma_buf ||
754             list_empty(&bo->tbo.base.dma_buf->attachments))) {
755                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
756                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
757                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
758                          * visible VRAM if we've depleted our allowance to do
759                          * that.
760                          */
761                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
762                                 domain = bo->preferred_domains;
763                         else
764                                 domain = bo->allowed_domains;
765                 } else {
766                         domain = bo->preferred_domains;
767                 }
768         } else {
769                 domain = bo->allowed_domains;
770         }
771
772 retry:
773         amdgpu_bo_placement_from_domain(bo, domain);
774         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
775
776         p->bytes_moved += ctx.bytes_moved;
777         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
778             amdgpu_bo_in_cpu_visible_vram(bo))
779                 p->bytes_moved_vis += ctx.bytes_moved;
780
781         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
782                 domain = bo->allowed_domains;
783                 goto retry;
784         }
785
786         return r;
787 }
788
789 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
790                             struct list_head *validated)
791 {
792         struct ttm_operation_ctx ctx = { true, false };
793         struct amdgpu_bo_list_entry *lobj;
794         int r;
795
796         list_for_each_entry(lobj, validated, tv.head) {
797                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
798                 struct mm_struct *usermm;
799
800                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
801                 if (usermm && usermm != current->mm)
802                         return -EPERM;
803
804                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
805                     lobj->user_invalidated && lobj->user_pages) {
806                         amdgpu_bo_placement_from_domain(bo,
807                                                         AMDGPU_GEM_DOMAIN_CPU);
808                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
809                         if (r)
810                                 return r;
811
812                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
813                                                      lobj->user_pages);
814                 }
815
816                 r = amdgpu_cs_bo_validate(p, bo);
817                 if (r)
818                         return r;
819
820                 kvfree(lobj->user_pages);
821                 lobj->user_pages = NULL;
822         }
823         return 0;
824 }
825
826 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
827                                 union drm_amdgpu_cs *cs)
828 {
829         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
830         struct amdgpu_vm *vm = &fpriv->vm;
831         struct amdgpu_bo_list_entry *e;
832         struct list_head duplicates;
833         int r;
834
835         INIT_LIST_HEAD(&p->validated);
836
837         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
838         if (cs->in.bo_list_handle) {
839                 if (p->bo_list)
840                         return -EINVAL;
841
842                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
843                                        &p->bo_list);
844                 if (r)
845                         return r;
846         } else if (!p->bo_list) {
847                 /* Create a empty bo_list when no handle is provided */
848                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
849                                           &p->bo_list);
850                 if (r)
851                         return r;
852         }
853
854         mutex_lock(&p->bo_list->bo_list_mutex);
855
856         /* One for TTM and one for the CS job */
857         amdgpu_bo_list_for_each_entry(e, p->bo_list)
858                 e->tv.num_shared = 2;
859
860         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
861
862         INIT_LIST_HEAD(&duplicates);
863         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
864
865         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
866                 list_add(&p->uf_entry.tv.head, &p->validated);
867
868         /* Get userptr backing pages. If pages are updated after registered
869          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
870          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
871          */
872         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
873                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
874                 bool userpage_invalidated = false;
875                 int i;
876
877                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
878                                         sizeof(struct page *),
879                                         GFP_KERNEL | __GFP_ZERO);
880                 if (!e->user_pages) {
881                         DRM_ERROR("kvmalloc_array failure\n");
882                         r = -ENOMEM;
883                         goto out_free_user_pages;
884                 }
885
886                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
887                 if (r) {
888                         kvfree(e->user_pages);
889                         e->user_pages = NULL;
890                         goto out_free_user_pages;
891                 }
892
893                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
894                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
895                                 userpage_invalidated = true;
896                                 break;
897                         }
898                 }
899                 e->user_invalidated = userpage_invalidated;
900         }
901
902         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
903                                    &duplicates);
904         if (unlikely(r != 0)) {
905                 if (r != -ERESTARTSYS)
906                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
907                 goto out_free_user_pages;
908         }
909
910         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
911                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
912
913                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
914         }
915
916         /* Move fence waiting after getting reservation lock of
917          * PD root. Then there is no need on a ctx mutex lock.
918          */
919         r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
920         if (unlikely(r != 0)) {
921                 if (r != -ERESTARTSYS)
922                         DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
923                 goto error_validate;
924         }
925
926         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
927                                           &p->bytes_moved_vis_threshold);
928         p->bytes_moved = 0;
929         p->bytes_moved_vis = 0;
930
931         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
932                                       amdgpu_cs_bo_validate, p);
933         if (r) {
934                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
935                 goto error_validate;
936         }
937
938         r = amdgpu_cs_list_validate(p, &duplicates);
939         if (r)
940                 goto error_validate;
941
942         r = amdgpu_cs_list_validate(p, &p->validated);
943         if (r)
944                 goto error_validate;
945
946         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
947                                      p->bytes_moved_vis);
948
949         amdgpu_job_set_resources(p->job, p->bo_list->gds_obj,
950                                  p->bo_list->gws_obj, p->bo_list->oa_obj);
951
952         if (!r && p->uf_entry.tv.bo) {
953                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
954
955                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
956                 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
957         }
958
959 error_validate:
960         if (r)
961                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
962
963 out_free_user_pages:
964         if (r) {
965                 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
966                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
967
968                         if (!e->user_pages)
969                                 continue;
970                         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
971                         kvfree(e->user_pages);
972                         e->user_pages = NULL;
973                 }
974                 mutex_unlock(&p->bo_list->bo_list_mutex);
975         }
976         return r;
977 }
978
979 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
980 {
981         int i;
982
983         if (!trace_amdgpu_cs_enabled())
984                 return;
985
986         for (i = 0; i < parser->job->num_ibs; i++)
987                 trace_amdgpu_cs(parser, i);
988 }
989
990 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
991 {
992         struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
993         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
994         struct amdgpu_device *adev = p->adev;
995         struct amdgpu_vm *vm = &fpriv->vm;
996         struct amdgpu_bo_list_entry *e;
997         struct amdgpu_bo_va *bo_va;
998         struct amdgpu_bo *bo;
999         int r;
1000
1001         /* Only for UVD/VCE VM emulation */
1002         if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
1003                 unsigned i, j;
1004
1005                 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
1006                         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
1007                         struct amdgpu_bo_va_mapping *m;
1008                         struct amdgpu_bo *aobj = NULL;
1009                         struct amdgpu_cs_chunk *chunk;
1010                         uint64_t offset, va_start;
1011                         struct amdgpu_ib *ib;
1012                         uint8_t *kptr;
1013
1014                         chunk = &p->chunks[i];
1015                         ib = &p->job->ibs[j];
1016                         chunk_ib = chunk->kdata;
1017
1018                         if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
1019                                 continue;
1020
1021                         va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
1022                         r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1023                         if (r) {
1024                                 DRM_ERROR("IB va_start is invalid\n");
1025                                 return r;
1026                         }
1027
1028                         if ((va_start + chunk_ib->ib_bytes) >
1029                             (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1030                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1031                                 return -EINVAL;
1032                         }
1033
1034                         /* the IB should be reserved at this point */
1035                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1036                         if (r) {
1037                                 return r;
1038                         }
1039
1040                         offset = m->start * AMDGPU_GPU_PAGE_SIZE;
1041                         kptr += va_start - offset;
1042
1043                         if (ring->funcs->parse_cs) {
1044                                 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
1045                                 amdgpu_bo_kunmap(aobj);
1046
1047                                 r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
1048                                 if (r)
1049                                         return r;
1050                         } else {
1051                                 ib->ptr = (uint32_t *)kptr;
1052                                 r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
1053                                 amdgpu_bo_kunmap(aobj);
1054                                 if (r)
1055                                         return r;
1056                         }
1057
1058                         j++;
1059                 }
1060         }
1061
1062         if (!p->job->vm)
1063                 return 0;
1064
1065         r = amdgpu_vm_clear_freed(adev, vm, NULL);
1066         if (r)
1067                 return r;
1068
1069         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1070         if (r)
1071                 return r;
1072
1073         r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
1074         if (r)
1075                 return r;
1076
1077         if (fpriv->csa_va) {
1078                 bo_va = fpriv->csa_va;
1079                 BUG_ON(!bo_va);
1080                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1081                 if (r)
1082                         return r;
1083
1084                 r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
1085                 if (r)
1086                         return r;
1087         }
1088
1089         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1090                 /* ignore duplicates */
1091                 bo = ttm_to_amdgpu_bo(e->tv.bo);
1092                 if (!bo)
1093                         continue;
1094
1095                 bo_va = e->bo_va;
1096                 if (bo_va == NULL)
1097                         continue;
1098
1099                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1100                 if (r)
1101                         return r;
1102
1103                 r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
1104                 if (r)
1105                         return r;
1106         }
1107
1108         r = amdgpu_vm_handle_moved(adev, vm);
1109         if (r)
1110                 return r;
1111
1112         r = amdgpu_vm_update_pdes(adev, vm, false);
1113         if (r)
1114                 return r;
1115
1116         r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
1117         if (r)
1118                 return r;
1119
1120         p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1121
1122         if (amdgpu_vm_debug) {
1123                 /* Invalidate all BOs to test for userspace bugs */
1124                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1125                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1126
1127                         /* ignore duplicates */
1128                         if (!bo)
1129                                 continue;
1130
1131                         amdgpu_vm_bo_invalidate(adev, bo, false);
1132                 }
1133         }
1134
1135         return 0;
1136 }
1137
1138 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1139 {
1140         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1141         struct amdgpu_bo_list_entry *e;
1142         int r;
1143
1144         list_for_each_entry(e, &p->validated, tv.head) {
1145                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1146                 struct dma_resv *resv = bo->tbo.base.resv;
1147                 enum amdgpu_sync_mode sync_mode;
1148
1149                 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1150                         AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1151                 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
1152                                      &fpriv->vm);
1153                 if (r)
1154                         return r;
1155         }
1156         return 0;
1157 }
1158
1159 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1160 {
1161         int i;
1162
1163         for (i = 0; i < p->num_post_deps; ++i) {
1164                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1165                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1166                                               p->post_deps[i].chain,
1167                                               p->fence, p->post_deps[i].point);
1168                         p->post_deps[i].chain = NULL;
1169                 } else {
1170                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1171                                                   p->fence);
1172                 }
1173         }
1174 }
1175
1176 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1177                             union drm_amdgpu_cs *cs)
1178 {
1179         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1180         struct drm_sched_entity *entity = p->entity;
1181         struct amdgpu_bo_list_entry *e;
1182         struct amdgpu_job *job;
1183         uint64_t seq;
1184         int r;
1185
1186         job = p->job;
1187         p->job = NULL;
1188
1189         r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1190         if (r)
1191                 goto error_unlock;
1192
1193         drm_sched_job_arm(&job->base);
1194
1195         /* No memory allocation is allowed while holding the notifier lock.
1196          * The lock is held until amdgpu_cs_submit is finished and fence is
1197          * added to BOs.
1198          */
1199         mutex_lock(&p->adev->notifier_lock);
1200
1201         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1202          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1203          */
1204         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1205                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1206
1207                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1208         }
1209         if (r) {
1210                 r = -EAGAIN;
1211                 goto error_abort;
1212         }
1213
1214         p->fence = dma_fence_get(&job->base.s_fence->finished);
1215
1216         seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
1217         amdgpu_cs_post_dependencies(p);
1218
1219         if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1220             !p->ctx->preamble_presented) {
1221                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1222                 p->ctx->preamble_presented = true;
1223         }
1224
1225         cs->out.handle = seq;
1226         job->uf_sequence = seq;
1227
1228         amdgpu_job_free_resources(job);
1229
1230         trace_amdgpu_cs_ioctl(job);
1231         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1232         drm_sched_entity_push_job(&job->base);
1233
1234         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1235
1236         /* Make sure all BOs are remembered as writers */
1237         amdgpu_bo_list_for_each_entry(e, p->bo_list)
1238                 e->tv.num_shared = 0;
1239
1240         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1241         mutex_unlock(&p->adev->notifier_lock);
1242         mutex_unlock(&p->bo_list->bo_list_mutex);
1243
1244         return 0;
1245
1246 error_abort:
1247         drm_sched_job_cleanup(&job->base);
1248         mutex_unlock(&p->adev->notifier_lock);
1249
1250 error_unlock:
1251         amdgpu_job_free(job);
1252         return r;
1253 }
1254
1255 /* Cleanup the parser structure */
1256 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
1257                                   bool backoff)
1258 {
1259         unsigned i;
1260
1261         if (error && backoff) {
1262                 ttm_eu_backoff_reservation(&parser->ticket,
1263                                            &parser->validated);
1264                 mutex_unlock(&parser->bo_list->bo_list_mutex);
1265         }
1266
1267         for (i = 0; i < parser->num_post_deps; i++) {
1268                 drm_syncobj_put(parser->post_deps[i].syncobj);
1269                 kfree(parser->post_deps[i].chain);
1270         }
1271         kfree(parser->post_deps);
1272
1273         dma_fence_put(parser->fence);
1274
1275         if (parser->ctx)
1276                 amdgpu_ctx_put(parser->ctx);
1277         if (parser->bo_list)
1278                 amdgpu_bo_list_put(parser->bo_list);
1279
1280         for (i = 0; i < parser->nchunks; i++)
1281                 kvfree(parser->chunks[i].kdata);
1282         kvfree(parser->chunks);
1283         if (parser->job)
1284                 amdgpu_job_free(parser->job);
1285         if (parser->uf_entry.tv.bo) {
1286                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1287
1288                 amdgpu_bo_unref(&uf);
1289         }
1290 }
1291
1292 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1293 {
1294         struct amdgpu_device *adev = drm_to_adev(dev);
1295         struct amdgpu_cs_parser parser;
1296         bool reserved_buffers = false;
1297         int r;
1298
1299         if (amdgpu_ras_intr_triggered())
1300                 return -EHWPOISON;
1301
1302         if (!adev->accel_working)
1303                 return -EBUSY;
1304
1305         r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1306         if (r) {
1307                 if (printk_ratelimit())
1308                         DRM_ERROR("Failed to initialize parser %d!\n", r);
1309                 goto out;
1310         }
1311
1312         r = amdgpu_cs_pass1(&parser, data);
1313         if (r)
1314                 goto out;
1315
1316         r = amdgpu_cs_ib_fill(adev, &parser);
1317         if (r)
1318                 goto out;
1319
1320         r = amdgpu_cs_dependencies(adev, &parser);
1321         if (r) {
1322                 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1323                 goto out;
1324         }
1325
1326         r = amdgpu_cs_parser_bos(&parser, data);
1327         if (r) {
1328                 if (r == -ENOMEM)
1329                         DRM_ERROR("Not enough memory for command submission!\n");
1330                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1331                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1332                 goto out;
1333         }
1334
1335         reserved_buffers = true;
1336
1337         trace_amdgpu_cs_ibs(&parser);
1338
1339         r = amdgpu_cs_vm_handling(&parser);
1340         if (r)
1341                 goto out;
1342
1343         r = amdgpu_cs_sync_rings(&parser);
1344         if (r)
1345                 goto out;
1346
1347         r = amdgpu_cs_submit(&parser, data);
1348 out:
1349         amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1350
1351         return r;
1352 }
1353
1354 /**
1355  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1356  *
1357  * @dev: drm device
1358  * @data: data from userspace
1359  * @filp: file private
1360  *
1361  * Wait for the command submission identified by handle to finish.
1362  */
1363 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1364                          struct drm_file *filp)
1365 {
1366         union drm_amdgpu_wait_cs *wait = data;
1367         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1368         struct drm_sched_entity *entity;
1369         struct amdgpu_ctx *ctx;
1370         struct dma_fence *fence;
1371         long r;
1372
1373         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1374         if (ctx == NULL)
1375                 return -EINVAL;
1376
1377         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1378                                   wait->in.ring, &entity);
1379         if (r) {
1380                 amdgpu_ctx_put(ctx);
1381                 return r;
1382         }
1383
1384         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1385         if (IS_ERR(fence))
1386                 r = PTR_ERR(fence);
1387         else if (fence) {
1388                 r = dma_fence_wait_timeout(fence, true, timeout);
1389                 if (r > 0 && fence->error)
1390                         r = fence->error;
1391                 dma_fence_put(fence);
1392         } else
1393                 r = 1;
1394
1395         amdgpu_ctx_put(ctx);
1396         if (r < 0)
1397                 return r;
1398
1399         memset(wait, 0, sizeof(*wait));
1400         wait->out.status = (r == 0);
1401
1402         return 0;
1403 }
1404
1405 /**
1406  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1407  *
1408  * @adev: amdgpu device
1409  * @filp: file private
1410  * @user: drm_amdgpu_fence copied from user space
1411  */
1412 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1413                                              struct drm_file *filp,
1414                                              struct drm_amdgpu_fence *user)
1415 {
1416         struct drm_sched_entity *entity;
1417         struct amdgpu_ctx *ctx;
1418         struct dma_fence *fence;
1419         int r;
1420
1421         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1422         if (ctx == NULL)
1423                 return ERR_PTR(-EINVAL);
1424
1425         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1426                                   user->ring, &entity);
1427         if (r) {
1428                 amdgpu_ctx_put(ctx);
1429                 return ERR_PTR(r);
1430         }
1431
1432         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1433         amdgpu_ctx_put(ctx);
1434
1435         return fence;
1436 }
1437
1438 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1439                                     struct drm_file *filp)
1440 {
1441         struct amdgpu_device *adev = drm_to_adev(dev);
1442         union drm_amdgpu_fence_to_handle *info = data;
1443         struct dma_fence *fence;
1444         struct drm_syncobj *syncobj;
1445         struct sync_file *sync_file;
1446         int fd, r;
1447
1448         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1449         if (IS_ERR(fence))
1450                 return PTR_ERR(fence);
1451
1452         if (!fence)
1453                 fence = dma_fence_get_stub();
1454
1455         switch (info->in.what) {
1456         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1457                 r = drm_syncobj_create(&syncobj, 0, fence);
1458                 dma_fence_put(fence);
1459                 if (r)
1460                         return r;
1461                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1462                 drm_syncobj_put(syncobj);
1463                 return r;
1464
1465         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1466                 r = drm_syncobj_create(&syncobj, 0, fence);
1467                 dma_fence_put(fence);
1468                 if (r)
1469                         return r;
1470                 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1471                 drm_syncobj_put(syncobj);
1472                 return r;
1473
1474         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1475                 fd = get_unused_fd_flags(O_CLOEXEC);
1476                 if (fd < 0) {
1477                         dma_fence_put(fence);
1478                         return fd;
1479                 }
1480
1481                 sync_file = sync_file_create(fence);
1482                 dma_fence_put(fence);
1483                 if (!sync_file) {
1484                         put_unused_fd(fd);
1485                         return -ENOMEM;
1486                 }
1487
1488                 fd_install(fd, sync_file->file);
1489                 info->out.handle = fd;
1490                 return 0;
1491
1492         default:
1493                 dma_fence_put(fence);
1494                 return -EINVAL;
1495         }
1496 }
1497
1498 /**
1499  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1500  *
1501  * @adev: amdgpu device
1502  * @filp: file private
1503  * @wait: wait parameters
1504  * @fences: array of drm_amdgpu_fence
1505  */
1506 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1507                                      struct drm_file *filp,
1508                                      union drm_amdgpu_wait_fences *wait,
1509                                      struct drm_amdgpu_fence *fences)
1510 {
1511         uint32_t fence_count = wait->in.fence_count;
1512         unsigned int i;
1513         long r = 1;
1514
1515         for (i = 0; i < fence_count; i++) {
1516                 struct dma_fence *fence;
1517                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1518
1519                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1520                 if (IS_ERR(fence))
1521                         return PTR_ERR(fence);
1522                 else if (!fence)
1523                         continue;
1524
1525                 r = dma_fence_wait_timeout(fence, true, timeout);
1526                 dma_fence_put(fence);
1527                 if (r < 0)
1528                         return r;
1529
1530                 if (r == 0)
1531                         break;
1532
1533                 if (fence->error)
1534                         return fence->error;
1535         }
1536
1537         memset(wait, 0, sizeof(*wait));
1538         wait->out.status = (r > 0);
1539
1540         return 0;
1541 }
1542
1543 /**
1544  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1545  *
1546  * @adev: amdgpu device
1547  * @filp: file private
1548  * @wait: wait parameters
1549  * @fences: array of drm_amdgpu_fence
1550  */
1551 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1552                                     struct drm_file *filp,
1553                                     union drm_amdgpu_wait_fences *wait,
1554                                     struct drm_amdgpu_fence *fences)
1555 {
1556         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1557         uint32_t fence_count = wait->in.fence_count;
1558         uint32_t first = ~0;
1559         struct dma_fence **array;
1560         unsigned int i;
1561         long r;
1562
1563         /* Prepare the fence array */
1564         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1565
1566         if (array == NULL)
1567                 return -ENOMEM;
1568
1569         for (i = 0; i < fence_count; i++) {
1570                 struct dma_fence *fence;
1571
1572                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1573                 if (IS_ERR(fence)) {
1574                         r = PTR_ERR(fence);
1575                         goto err_free_fence_array;
1576                 } else if (fence) {
1577                         array[i] = fence;
1578                 } else { /* NULL, the fence has been already signaled */
1579                         r = 1;
1580                         first = i;
1581                         goto out;
1582                 }
1583         }
1584
1585         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1586                                        &first);
1587         if (r < 0)
1588                 goto err_free_fence_array;
1589
1590 out:
1591         memset(wait, 0, sizeof(*wait));
1592         wait->out.status = (r > 0);
1593         wait->out.first_signaled = first;
1594
1595         if (first < fence_count && array[first])
1596                 r = array[first]->error;
1597         else
1598                 r = 0;
1599
1600 err_free_fence_array:
1601         for (i = 0; i < fence_count; i++)
1602                 dma_fence_put(array[i]);
1603         kfree(array);
1604
1605         return r;
1606 }
1607
1608 /**
1609  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1610  *
1611  * @dev: drm device
1612  * @data: data from userspace
1613  * @filp: file private
1614  */
1615 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1616                                 struct drm_file *filp)
1617 {
1618         struct amdgpu_device *adev = drm_to_adev(dev);
1619         union drm_amdgpu_wait_fences *wait = data;
1620         uint32_t fence_count = wait->in.fence_count;
1621         struct drm_amdgpu_fence *fences_user;
1622         struct drm_amdgpu_fence *fences;
1623         int r;
1624
1625         /* Get the fences from userspace */
1626         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1627                         GFP_KERNEL);
1628         if (fences == NULL)
1629                 return -ENOMEM;
1630
1631         fences_user = u64_to_user_ptr(wait->in.fences);
1632         if (copy_from_user(fences, fences_user,
1633                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1634                 r = -EFAULT;
1635                 goto err_free_fences;
1636         }
1637
1638         if (wait->in.wait_all)
1639                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1640         else
1641                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1642
1643 err_free_fences:
1644         kfree(fences);
1645
1646         return r;
1647 }
1648
1649 /**
1650  * amdgpu_cs_find_mapping - find bo_va for VM address
1651  *
1652  * @parser: command submission parser context
1653  * @addr: VM address
1654  * @bo: resulting BO of the mapping found
1655  * @map: Placeholder to return found BO mapping
1656  *
1657  * Search the buffer objects in the command submission context for a certain
1658  * virtual memory address. Returns allocation structure when found, NULL
1659  * otherwise.
1660  */
1661 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1662                            uint64_t addr, struct amdgpu_bo **bo,
1663                            struct amdgpu_bo_va_mapping **map)
1664 {
1665         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1666         struct ttm_operation_ctx ctx = { false, false };
1667         struct amdgpu_vm *vm = &fpriv->vm;
1668         struct amdgpu_bo_va_mapping *mapping;
1669         int r;
1670
1671         addr /= AMDGPU_GPU_PAGE_SIZE;
1672
1673         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1674         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1675                 return -EINVAL;
1676
1677         *bo = mapping->bo_va->base.bo;
1678         *map = mapping;
1679
1680         /* Double check that the BO is reserved by this CS */
1681         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1682                 return -EINVAL;
1683
1684         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1685                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1686                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1687                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1688                 if (r)
1689                         return r;
1690         }
1691
1692         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1693 }