2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include <drm/ttm/ttm_tt.h>
37 #include "amdgpu_cs.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_gmc.h"
41 #include "amdgpu_gem.h"
42 #include "amdgpu_ras.h"
44 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
45 struct amdgpu_device *adev,
46 struct drm_file *filp,
47 union drm_amdgpu_cs *cs)
49 struct amdgpu_fpriv *fpriv = filp->driver_priv;
51 if (cs->in.num_chunks == 0)
54 memset(p, 0, sizeof(*p));
58 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
62 if (atomic_read(&p->ctx->guilty)) {
63 amdgpu_ctx_put(p->ctx);
67 amdgpu_sync_create(&p->sync);
68 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
72 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
73 struct drm_amdgpu_cs_chunk_ib *chunk_ib)
75 struct drm_sched_entity *entity;
79 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
80 chunk_ib->ip_instance,
81 chunk_ib->ring, &entity);
86 * Abort if there is no run queue associated with this entity.
87 * Possibly because of disabled HW IP.
89 if (entity->rq == NULL)
92 /* Check if we can add this IB to some existing job */
93 for (i = 0; i < p->gang_size; ++i)
94 if (p->entities[i] == entity)
97 /* If not increase the gang size if possible */
98 if (i == AMDGPU_CS_GANG_SIZE)
101 p->entities[i] = entity;
102 p->gang_size = i + 1;
106 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
107 struct drm_amdgpu_cs_chunk_ib *chunk_ib,
108 unsigned int *num_ibs)
112 r = amdgpu_cs_job_idx(p, chunk_ib);
116 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
120 p->gang_leader_idx = r;
124 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
125 struct drm_amdgpu_cs_chunk_fence *data,
128 struct drm_gem_object *gobj;
132 gobj = drm_gem_object_lookup(p->filp, data->handle);
136 p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
137 drm_gem_object_put(gobj);
139 size = amdgpu_bo_size(p->uf_bo);
140 if (size != PAGE_SIZE || (data->offset + 8) > size) {
145 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
150 *offset = data->offset;
155 amdgpu_bo_unref(&p->uf_bo);
159 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
160 struct drm_amdgpu_bo_list_in *data)
162 struct drm_amdgpu_bo_list_entry *info;
165 r = amdgpu_bo_create_list_entry_array(data, &info);
169 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
183 /* Copy the data from userspace and go over it the first time */
184 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
185 union drm_amdgpu_cs *cs)
187 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
188 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
189 struct amdgpu_vm *vm = &fpriv->vm;
190 uint64_t *chunk_array_user;
191 uint64_t *chunk_array;
192 uint32_t uf_offset = 0;
197 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
203 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
204 if (copy_from_user(chunk_array, chunk_array_user,
205 sizeof(uint64_t)*cs->in.num_chunks)) {
210 p->nchunks = cs->in.num_chunks;
211 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
218 for (i = 0; i < p->nchunks; i++) {
219 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
220 struct drm_amdgpu_cs_chunk user_chunk;
221 uint32_t __user *cdata;
223 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
224 if (copy_from_user(&user_chunk, chunk_ptr,
225 sizeof(struct drm_amdgpu_cs_chunk))) {
228 goto free_partial_kdata;
230 p->chunks[i].chunk_id = user_chunk.chunk_id;
231 p->chunks[i].length_dw = user_chunk.length_dw;
233 size = p->chunks[i].length_dw;
234 cdata = u64_to_user_ptr(user_chunk.chunk_data);
236 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
238 if (p->chunks[i].kdata == NULL) {
241 goto free_partial_kdata;
243 size *= sizeof(uint32_t);
244 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
246 goto free_partial_kdata;
249 /* Assume the worst on the following checks */
251 switch (p->chunks[i].chunk_id) {
252 case AMDGPU_CHUNK_ID_IB:
253 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
254 goto free_partial_kdata;
256 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
258 goto free_partial_kdata;
261 case AMDGPU_CHUNK_ID_FENCE:
262 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
263 goto free_partial_kdata;
265 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
268 goto free_partial_kdata;
271 case AMDGPU_CHUNK_ID_BO_HANDLES:
272 if (size < sizeof(struct drm_amdgpu_bo_list_in))
273 goto free_partial_kdata;
275 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
277 goto free_partial_kdata;
280 case AMDGPU_CHUNK_ID_DEPENDENCIES:
281 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
282 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
283 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
284 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
285 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
286 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
290 goto free_partial_kdata;
296 goto free_partial_kdata;
299 for (i = 0; i < p->gang_size; ++i) {
300 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
301 num_ibs[i], &p->jobs[i]);
305 p->gang_leader = p->jobs[p->gang_leader_idx];
307 if (p->ctx->generation != p->gang_leader->generation) {
313 p->gang_leader->uf_addr = uf_offset;
316 /* Use this opportunity to fill in task info for the vm */
317 amdgpu_vm_set_task_info(vm);
325 kvfree(p->chunks[i].kdata);
335 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
336 struct amdgpu_cs_chunk *chunk,
337 unsigned int *ce_preempt,
338 unsigned int *de_preempt)
340 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
341 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
342 struct amdgpu_vm *vm = &fpriv->vm;
343 struct amdgpu_ring *ring;
344 struct amdgpu_job *job;
345 struct amdgpu_ib *ib;
348 r = amdgpu_cs_job_idx(p, chunk_ib);
353 ring = amdgpu_job_ring(job);
354 ib = &job->ibs[job->num_ibs++];
356 /* MM engine doesn't support user fences */
357 if (p->uf_bo && ring->funcs->no_user_fence)
360 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
361 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
362 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
367 /* Each GFX command submit allows only 1 IB max
368 * preemptible for CE & DE */
369 if (*ce_preempt > 1 || *de_preempt > 1)
373 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
374 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
376 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
377 chunk_ib->ib_bytes : 0,
378 AMDGPU_IB_POOL_DELAYED, ib);
380 DRM_ERROR("Failed to get ib !\n");
384 ib->gpu_addr = chunk_ib->va_start;
385 ib->length_dw = chunk_ib->ib_bytes / 4;
386 ib->flags = chunk_ib->flags;
390 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
391 struct amdgpu_cs_chunk *chunk)
393 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
394 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
395 unsigned int num_deps;
398 num_deps = chunk->length_dw * 4 /
399 sizeof(struct drm_amdgpu_cs_chunk_dep);
401 for (i = 0; i < num_deps; ++i) {
402 struct amdgpu_ctx *ctx;
403 struct drm_sched_entity *entity;
404 struct dma_fence *fence;
406 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
410 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
412 deps[i].ring, &entity);
418 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
422 return PTR_ERR(fence);
426 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
427 struct drm_sched_fence *s_fence;
428 struct dma_fence *old = fence;
430 s_fence = to_drm_sched_fence(fence);
431 fence = dma_fence_get(&s_fence->scheduled);
435 r = amdgpu_sync_fence(&p->sync, fence);
436 dma_fence_put(fence);
443 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
444 uint32_t handle, u64 point,
447 struct dma_fence *fence;
450 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
452 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
457 r = amdgpu_sync_fence(&p->sync, fence);
458 dma_fence_put(fence);
462 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
463 struct amdgpu_cs_chunk *chunk)
465 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
466 unsigned int num_deps;
469 num_deps = chunk->length_dw * 4 /
470 sizeof(struct drm_amdgpu_cs_chunk_sem);
471 for (i = 0; i < num_deps; ++i) {
472 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
480 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
481 struct amdgpu_cs_chunk *chunk)
483 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
484 unsigned int num_deps;
487 num_deps = chunk->length_dw * 4 /
488 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
489 for (i = 0; i < num_deps; ++i) {
490 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
491 syncobj_deps[i].point,
492 syncobj_deps[i].flags);
500 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
501 struct amdgpu_cs_chunk *chunk)
503 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
504 unsigned int num_deps;
507 num_deps = chunk->length_dw * 4 /
508 sizeof(struct drm_amdgpu_cs_chunk_sem);
513 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
515 p->num_post_deps = 0;
521 for (i = 0; i < num_deps; ++i) {
522 p->post_deps[i].syncobj =
523 drm_syncobj_find(p->filp, deps[i].handle);
524 if (!p->post_deps[i].syncobj)
526 p->post_deps[i].chain = NULL;
527 p->post_deps[i].point = 0;
534 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
535 struct amdgpu_cs_chunk *chunk)
537 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
538 unsigned int num_deps;
541 num_deps = chunk->length_dw * 4 /
542 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
547 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
549 p->num_post_deps = 0;
554 for (i = 0; i < num_deps; ++i) {
555 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
558 if (syncobj_deps[i].point) {
559 dep->chain = dma_fence_chain_alloc();
564 dep->syncobj = drm_syncobj_find(p->filp,
565 syncobj_deps[i].handle);
567 dma_fence_chain_free(dep->chain);
570 dep->point = syncobj_deps[i].point;
577 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
578 struct amdgpu_cs_chunk *chunk)
580 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
583 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
586 for (i = 0; i < p->gang_size; ++i) {
587 p->jobs[i]->shadow_va = shadow->shadow_va;
588 p->jobs[i]->csa_va = shadow->csa_va;
589 p->jobs[i]->gds_va = shadow->gds_va;
590 p->jobs[i]->init_shadow =
591 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
597 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
599 unsigned int ce_preempt = 0, de_preempt = 0;
602 for (i = 0; i < p->nchunks; ++i) {
603 struct amdgpu_cs_chunk *chunk;
605 chunk = &p->chunks[i];
607 switch (chunk->chunk_id) {
608 case AMDGPU_CHUNK_ID_IB:
609 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
613 case AMDGPU_CHUNK_ID_DEPENDENCIES:
614 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
615 r = amdgpu_cs_p2_dependencies(p, chunk);
619 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
620 r = amdgpu_cs_p2_syncobj_in(p, chunk);
624 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
625 r = amdgpu_cs_p2_syncobj_out(p, chunk);
629 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
630 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
634 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
635 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
639 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
640 r = amdgpu_cs_p2_shadow(p, chunk);
650 /* Convert microseconds to bytes. */
651 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
653 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
656 /* Since accum_us is incremented by a million per second, just
657 * multiply it by the number of MB/s to get the number of bytes.
659 return us << adev->mm_stats.log2_max_MBps;
662 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
664 if (!adev->mm_stats.log2_max_MBps)
667 return bytes >> adev->mm_stats.log2_max_MBps;
670 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
671 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
672 * which means it can go over the threshold once. If that happens, the driver
673 * will be in debt and no other buffer migrations can be done until that debt
676 * This approach allows moving a buffer of any size (it's important to allow
679 * The currency is simply time in microseconds and it increases as the clock
680 * ticks. The accumulated microseconds (us) are converted to bytes and
683 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
687 s64 time_us, increment_us;
688 u64 free_vram, total_vram, used_vram;
689 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
692 * It means that in order to get full max MBps, at least 5 IBs per
693 * second must be submitted and not more than 200ms apart from each
696 const s64 us_upper_bound = 200000;
698 if (!adev->mm_stats.log2_max_MBps) {
704 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
705 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
706 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
708 spin_lock(&adev->mm_stats.lock);
710 /* Increase the amount of accumulated us. */
711 time_us = ktime_to_us(ktime_get());
712 increment_us = time_us - adev->mm_stats.last_update_us;
713 adev->mm_stats.last_update_us = time_us;
714 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
717 /* This prevents the short period of low performance when the VRAM
718 * usage is low and the driver is in debt or doesn't have enough
719 * accumulated us to fill VRAM quickly.
721 * The situation can occur in these cases:
722 * - a lot of VRAM is freed by userspace
723 * - the presence of a big buffer causes a lot of evictions
724 * (solution: split buffers into smaller ones)
726 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
727 * accum_us to a positive number.
729 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
732 /* Be more aggressive on dGPUs. Try to fill a portion of free
735 if (!(adev->flags & AMD_IS_APU))
736 min_us = bytes_to_us(adev, free_vram / 4);
738 min_us = 0; /* Reset accum_us on APUs. */
740 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
743 /* This is set to 0 if the driver is in debt to disallow (optional)
746 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
748 /* Do the same for visible VRAM if half of it is free */
749 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
750 u64 total_vis_vram = adev->gmc.visible_vram_size;
752 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
754 if (used_vis_vram < total_vis_vram) {
755 u64 free_vis_vram = total_vis_vram - used_vis_vram;
757 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
758 increment_us, us_upper_bound);
760 if (free_vis_vram >= total_vis_vram / 2)
761 adev->mm_stats.accum_us_vis =
762 max(bytes_to_us(adev, free_vis_vram / 2),
763 adev->mm_stats.accum_us_vis);
766 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
771 spin_unlock(&adev->mm_stats.lock);
774 /* Report how many bytes have really been moved for the last command
775 * submission. This can result in a debt that can stop buffer migrations
778 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
781 spin_lock(&adev->mm_stats.lock);
782 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
783 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
784 spin_unlock(&adev->mm_stats.lock);
787 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
789 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
790 struct amdgpu_cs_parser *p = param;
791 struct ttm_operation_ctx ctx = {
792 .interruptible = true,
793 .no_wait_gpu = false,
794 .resv = bo->tbo.base.resv
799 if (bo->tbo.pin_count)
802 /* Don't move this buffer if we have depleted our allowance
803 * to move it. Don't move anything if the threshold is zero.
805 if (p->bytes_moved < p->bytes_moved_threshold &&
806 (!bo->tbo.base.dma_buf ||
807 list_empty(&bo->tbo.base.dma_buf->attachments))) {
808 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
809 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
810 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
811 * visible VRAM if we've depleted our allowance to do
814 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
815 domain = bo->preferred_domains;
817 domain = bo->allowed_domains;
819 domain = bo->preferred_domains;
822 domain = bo->allowed_domains;
826 amdgpu_bo_placement_from_domain(bo, domain);
827 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
829 p->bytes_moved += ctx.bytes_moved;
830 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
831 amdgpu_bo_in_cpu_visible_vram(bo))
832 p->bytes_moved_vis += ctx.bytes_moved;
834 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
835 domain = bo->allowed_domains;
842 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
843 union drm_amdgpu_cs *cs)
845 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
846 struct ttm_operation_ctx ctx = { true, false };
847 struct amdgpu_vm *vm = &fpriv->vm;
848 struct amdgpu_bo_list_entry *e;
849 struct drm_gem_object *obj;
854 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
855 if (cs->in.bo_list_handle) {
859 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
863 } else if (!p->bo_list) {
864 /* Create a empty bo_list when no handle is provided */
865 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
871 mutex_lock(&p->bo_list->bo_list_mutex);
873 /* Get userptr backing pages. If pages are updated after registered
874 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
875 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
877 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
878 bool userpage_invalidated = false;
879 struct amdgpu_bo *bo = e->bo;
882 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
883 sizeof(struct page *),
884 GFP_KERNEL | __GFP_ZERO);
885 if (!e->user_pages) {
886 DRM_ERROR("kvmalloc_array failure\n");
888 goto out_free_user_pages;
891 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
893 kvfree(e->user_pages);
894 e->user_pages = NULL;
895 goto out_free_user_pages;
898 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
899 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
900 userpage_invalidated = true;
904 e->user_invalidated = userpage_invalidated;
907 drm_exec_until_all_locked(&p->exec) {
908 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
909 drm_exec_retry_on_contention(&p->exec);
911 goto out_free_user_pages;
913 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
914 /* One fence for TTM and one for each CS job */
915 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
917 drm_exec_retry_on_contention(&p->exec);
919 goto out_free_user_pages;
921 e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
925 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
927 drm_exec_retry_on_contention(&p->exec);
929 goto out_free_user_pages;
933 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
934 struct mm_struct *usermm;
936 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
937 if (usermm && usermm != current->mm) {
939 goto out_free_user_pages;
942 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
943 e->user_invalidated && e->user_pages) {
944 amdgpu_bo_placement_from_domain(e->bo,
945 AMDGPU_GEM_DOMAIN_CPU);
946 r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
949 goto out_free_user_pages;
951 amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
955 kvfree(e->user_pages);
956 e->user_pages = NULL;
959 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
960 &p->bytes_moved_vis_threshold);
962 p->bytes_moved_vis = 0;
964 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
965 amdgpu_cs_bo_validate, p);
967 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
968 goto out_free_user_pages;
971 drm_exec_for_each_locked_object(&p->exec, index, obj) {
972 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
974 goto out_free_user_pages;
978 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
980 goto out_free_user_pages;
982 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
985 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
988 for (i = 0; i < p->gang_size; ++i)
989 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
995 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
996 struct amdgpu_bo *bo = e->bo;
1000 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1001 kvfree(e->user_pages);
1002 e->user_pages = NULL;
1005 mutex_unlock(&p->bo_list->bo_list_mutex);
1009 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1013 if (!trace_amdgpu_cs_enabled())
1016 for (i = 0; i < p->gang_size; ++i) {
1017 struct amdgpu_job *job = p->jobs[i];
1019 for (j = 0; j < job->num_ibs; ++j)
1020 trace_amdgpu_cs(p, job, &job->ibs[j]);
1024 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1025 struct amdgpu_job *job)
1027 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1031 /* Only for UVD/VCE VM emulation */
1032 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1035 for (i = 0; i < job->num_ibs; ++i) {
1036 struct amdgpu_ib *ib = &job->ibs[i];
1037 struct amdgpu_bo_va_mapping *m;
1038 struct amdgpu_bo *aobj;
1042 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1043 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1045 DRM_ERROR("IB va_start is invalid\n");
1049 if ((va_start + ib->length_dw * 4) >
1050 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1051 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1055 /* the IB should be reserved at this point */
1056 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1060 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1062 if (ring->funcs->parse_cs) {
1063 memcpy(ib->ptr, kptr, ib->length_dw * 4);
1064 amdgpu_bo_kunmap(aobj);
1066 r = amdgpu_ring_parse_cs(ring, p, job, ib);
1070 ib->ptr = (uint32_t *)kptr;
1071 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1072 amdgpu_bo_kunmap(aobj);
1081 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1086 for (i = 0; i < p->gang_size; ++i) {
1087 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1094 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1096 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1097 struct amdgpu_job *job = p->gang_leader;
1098 struct amdgpu_device *adev = p->adev;
1099 struct amdgpu_vm *vm = &fpriv->vm;
1100 struct amdgpu_bo_list_entry *e;
1101 struct amdgpu_bo_va *bo_va;
1105 r = amdgpu_vm_clear_freed(adev, vm, NULL);
1109 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1113 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1117 if (fpriv->csa_va) {
1118 bo_va = fpriv->csa_va;
1120 r = amdgpu_vm_bo_update(adev, bo_va, false);
1124 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1129 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1134 r = amdgpu_vm_bo_update(adev, bo_va, false);
1138 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1143 r = amdgpu_vm_handle_moved(adev, vm);
1147 r = amdgpu_vm_update_pdes(adev, vm, false);
1151 r = amdgpu_sync_fence(&p->sync, vm->last_update);
1155 for (i = 0; i < p->gang_size; ++i) {
1161 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1164 if (amdgpu_vm_debug) {
1165 /* Invalidate all BOs to test for userspace bugs */
1166 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1167 struct amdgpu_bo *bo = e->bo;
1169 /* ignore duplicates */
1173 amdgpu_vm_bo_invalidate(adev, bo, false);
1180 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1182 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1183 struct drm_gpu_scheduler *sched;
1184 struct drm_gem_object *obj;
1185 struct dma_fence *fence;
1186 unsigned long index;
1190 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1192 if (r != -ERESTARTSYS)
1193 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1197 drm_exec_for_each_locked_object(&p->exec, index, obj) {
1198 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1200 struct dma_resv *resv = bo->tbo.base.resv;
1201 enum amdgpu_sync_mode sync_mode;
1203 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1204 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1205 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1211 for (i = 0; i < p->gang_size; ++i) {
1212 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1217 sched = p->gang_leader->base.entity->rq->sched;
1218 while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1219 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1222 * When we have an dependency it might be necessary to insert a
1223 * pipeline sync to make sure that all caches etc are flushed and the
1224 * next job actually sees the results from the previous one
1225 * before we start executing on the same scheduler ring.
1227 if (!s_fence || s_fence->sched != sched) {
1228 dma_fence_put(fence);
1232 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1233 dma_fence_put(fence);
1240 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1244 for (i = 0; i < p->num_post_deps; ++i) {
1245 if (p->post_deps[i].chain && p->post_deps[i].point) {
1246 drm_syncobj_add_point(p->post_deps[i].syncobj,
1247 p->post_deps[i].chain,
1248 p->fence, p->post_deps[i].point);
1249 p->post_deps[i].chain = NULL;
1251 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1257 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1258 union drm_amdgpu_cs *cs)
1260 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1261 struct amdgpu_job *leader = p->gang_leader;
1262 struct amdgpu_bo_list_entry *e;
1263 struct drm_gem_object *gobj;
1264 unsigned long index;
1269 for (i = 0; i < p->gang_size; ++i)
1270 drm_sched_job_arm(&p->jobs[i]->base);
1272 for (i = 0; i < p->gang_size; ++i) {
1273 struct dma_fence *fence;
1275 if (p->jobs[i] == leader)
1278 fence = &p->jobs[i]->base.s_fence->scheduled;
1279 dma_fence_get(fence);
1280 r = drm_sched_job_add_dependency(&leader->base, fence);
1282 dma_fence_put(fence);
1287 if (p->gang_size > 1) {
1288 for (i = 0; i < p->gang_size; ++i)
1289 amdgpu_job_set_gang_leader(p->jobs[i], leader);
1292 /* No memory allocation is allowed while holding the notifier lock.
1293 * The lock is held until amdgpu_cs_submit is finished and fence is
1296 mutex_lock(&p->adev->notifier_lock);
1298 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1299 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1302 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1303 r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1309 mutex_unlock(&p->adev->notifier_lock);
1313 p->fence = dma_fence_get(&leader->base.s_fence->finished);
1314 drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1316 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1318 /* Everybody except for the gang leader uses READ */
1319 for (i = 0; i < p->gang_size; ++i) {
1320 if (p->jobs[i] == leader)
1323 dma_resv_add_fence(gobj->resv,
1324 &p->jobs[i]->base.s_fence->finished,
1325 DMA_RESV_USAGE_READ);
1328 /* The gang leader as remembered as writer */
1329 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1332 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1334 amdgpu_cs_post_dependencies(p);
1336 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1337 !p->ctx->preamble_presented) {
1338 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1339 p->ctx->preamble_presented = true;
1342 cs->out.handle = seq;
1343 leader->uf_sequence = seq;
1345 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1346 for (i = 0; i < p->gang_size; ++i) {
1347 amdgpu_job_free_resources(p->jobs[i]);
1348 trace_amdgpu_cs_ioctl(p->jobs[i]);
1349 drm_sched_entity_push_job(&p->jobs[i]->base);
1353 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1355 mutex_unlock(&p->adev->notifier_lock);
1356 mutex_unlock(&p->bo_list->bo_list_mutex);
1360 /* Cleanup the parser structure */
1361 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1365 amdgpu_sync_free(&parser->sync);
1366 drm_exec_fini(&parser->exec);
1368 for (i = 0; i < parser->num_post_deps; i++) {
1369 drm_syncobj_put(parser->post_deps[i].syncobj);
1370 kfree(parser->post_deps[i].chain);
1372 kfree(parser->post_deps);
1374 dma_fence_put(parser->fence);
1377 amdgpu_ctx_put(parser->ctx);
1378 if (parser->bo_list)
1379 amdgpu_bo_list_put(parser->bo_list);
1381 for (i = 0; i < parser->nchunks; i++)
1382 kvfree(parser->chunks[i].kdata);
1383 kvfree(parser->chunks);
1384 for (i = 0; i < parser->gang_size; ++i) {
1385 if (parser->jobs[i])
1386 amdgpu_job_free(parser->jobs[i]);
1388 amdgpu_bo_unref(&parser->uf_bo);
1391 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1393 struct amdgpu_device *adev = drm_to_adev(dev);
1394 struct amdgpu_cs_parser parser;
1397 if (amdgpu_ras_intr_triggered())
1400 if (!adev->accel_working)
1403 r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1405 if (printk_ratelimit())
1406 DRM_ERROR("Failed to initialize parser %d!\n", r);
1410 r = amdgpu_cs_pass1(&parser, data);
1414 r = amdgpu_cs_pass2(&parser);
1418 r = amdgpu_cs_parser_bos(&parser, data);
1421 DRM_ERROR("Not enough memory for command submission!\n");
1422 else if (r != -ERESTARTSYS && r != -EAGAIN)
1423 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1427 r = amdgpu_cs_patch_jobs(&parser);
1431 r = amdgpu_cs_vm_handling(&parser);
1435 r = amdgpu_cs_sync_rings(&parser);
1439 trace_amdgpu_cs_ibs(&parser);
1441 r = amdgpu_cs_submit(&parser, data);
1445 amdgpu_cs_parser_fini(&parser);
1449 mutex_unlock(&parser.bo_list->bo_list_mutex);
1452 amdgpu_cs_parser_fini(&parser);
1457 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1460 * @data: data from userspace
1461 * @filp: file private
1463 * Wait for the command submission identified by handle to finish.
1465 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1466 struct drm_file *filp)
1468 union drm_amdgpu_wait_cs *wait = data;
1469 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1470 struct drm_sched_entity *entity;
1471 struct amdgpu_ctx *ctx;
1472 struct dma_fence *fence;
1475 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1479 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1480 wait->in.ring, &entity);
1482 amdgpu_ctx_put(ctx);
1486 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1490 r = dma_fence_wait_timeout(fence, true, timeout);
1491 if (r > 0 && fence->error)
1493 dma_fence_put(fence);
1497 amdgpu_ctx_put(ctx);
1501 memset(wait, 0, sizeof(*wait));
1502 wait->out.status = (r == 0);
1508 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1510 * @adev: amdgpu device
1511 * @filp: file private
1512 * @user: drm_amdgpu_fence copied from user space
1514 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1515 struct drm_file *filp,
1516 struct drm_amdgpu_fence *user)
1518 struct drm_sched_entity *entity;
1519 struct amdgpu_ctx *ctx;
1520 struct dma_fence *fence;
1523 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1525 return ERR_PTR(-EINVAL);
1527 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1528 user->ring, &entity);
1530 amdgpu_ctx_put(ctx);
1534 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1535 amdgpu_ctx_put(ctx);
1540 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1541 struct drm_file *filp)
1543 struct amdgpu_device *adev = drm_to_adev(dev);
1544 union drm_amdgpu_fence_to_handle *info = data;
1545 struct dma_fence *fence;
1546 struct drm_syncobj *syncobj;
1547 struct sync_file *sync_file;
1550 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1552 return PTR_ERR(fence);
1555 fence = dma_fence_get_stub();
1557 switch (info->in.what) {
1558 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1559 r = drm_syncobj_create(&syncobj, 0, fence);
1560 dma_fence_put(fence);
1563 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1564 drm_syncobj_put(syncobj);
1567 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1568 r = drm_syncobj_create(&syncobj, 0, fence);
1569 dma_fence_put(fence);
1572 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1573 drm_syncobj_put(syncobj);
1576 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1577 fd = get_unused_fd_flags(O_CLOEXEC);
1579 dma_fence_put(fence);
1583 sync_file = sync_file_create(fence);
1584 dma_fence_put(fence);
1590 fd_install(fd, sync_file->file);
1591 info->out.handle = fd;
1595 dma_fence_put(fence);
1601 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1603 * @adev: amdgpu device
1604 * @filp: file private
1605 * @wait: wait parameters
1606 * @fences: array of drm_amdgpu_fence
1608 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1609 struct drm_file *filp,
1610 union drm_amdgpu_wait_fences *wait,
1611 struct drm_amdgpu_fence *fences)
1613 uint32_t fence_count = wait->in.fence_count;
1617 for (i = 0; i < fence_count; i++) {
1618 struct dma_fence *fence;
1619 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1621 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1623 return PTR_ERR(fence);
1627 r = dma_fence_wait_timeout(fence, true, timeout);
1628 if (r > 0 && fence->error)
1631 dma_fence_put(fence);
1639 memset(wait, 0, sizeof(*wait));
1640 wait->out.status = (r > 0);
1646 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1648 * @adev: amdgpu device
1649 * @filp: file private
1650 * @wait: wait parameters
1651 * @fences: array of drm_amdgpu_fence
1653 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1654 struct drm_file *filp,
1655 union drm_amdgpu_wait_fences *wait,
1656 struct drm_amdgpu_fence *fences)
1658 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1659 uint32_t fence_count = wait->in.fence_count;
1660 uint32_t first = ~0;
1661 struct dma_fence **array;
1665 /* Prepare the fence array */
1666 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1671 for (i = 0; i < fence_count; i++) {
1672 struct dma_fence *fence;
1674 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1675 if (IS_ERR(fence)) {
1677 goto err_free_fence_array;
1680 } else { /* NULL, the fence has been already signaled */
1687 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1690 goto err_free_fence_array;
1693 memset(wait, 0, sizeof(*wait));
1694 wait->out.status = (r > 0);
1695 wait->out.first_signaled = first;
1697 if (first < fence_count && array[first])
1698 r = array[first]->error;
1702 err_free_fence_array:
1703 for (i = 0; i < fence_count; i++)
1704 dma_fence_put(array[i]);
1711 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1714 * @data: data from userspace
1715 * @filp: file private
1717 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1718 struct drm_file *filp)
1720 struct amdgpu_device *adev = drm_to_adev(dev);
1721 union drm_amdgpu_wait_fences *wait = data;
1722 uint32_t fence_count = wait->in.fence_count;
1723 struct drm_amdgpu_fence *fences_user;
1724 struct drm_amdgpu_fence *fences;
1727 /* Get the fences from userspace */
1728 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1733 fences_user = u64_to_user_ptr(wait->in.fences);
1734 if (copy_from_user(fences, fences_user,
1735 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1737 goto err_free_fences;
1740 if (wait->in.wait_all)
1741 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1743 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1752 * amdgpu_cs_find_mapping - find bo_va for VM address
1754 * @parser: command submission parser context
1756 * @bo: resulting BO of the mapping found
1757 * @map: Placeholder to return found BO mapping
1759 * Search the buffer objects in the command submission context for a certain
1760 * virtual memory address. Returns allocation structure when found, NULL
1763 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1764 uint64_t addr, struct amdgpu_bo **bo,
1765 struct amdgpu_bo_va_mapping **map)
1767 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1768 struct ttm_operation_ctx ctx = { false, false };
1769 struct amdgpu_vm *vm = &fpriv->vm;
1770 struct amdgpu_bo_va_mapping *mapping;
1773 addr /= AMDGPU_GPU_PAGE_SIZE;
1775 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1776 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1779 *bo = mapping->bo_va->base.bo;
1782 /* Double check that the BO is reserved by this CS */
1783 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1786 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1787 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1788 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1789 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1794 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);