1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <drm/ttm/ttm_tt.h>
27 #include "amdgpu_sync.h"
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_hmm.h"
32 #include "amdgpu_xgmi.h"
35 #include "kfd_migrate.h"
36 #include "kfd_smi_events.h"
41 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
43 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
45 /* Long enough to ensure no retry fault comes after svm range is restored and
46 * page table is updated.
48 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
50 /* Giant svm range split into smaller ranges based on this, it is decided using
51 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
54 static uint64_t max_svm_range_pages;
56 struct criu_svm_metadata {
57 struct list_head list;
58 struct kfd_criu_svm_range_priv_data data;
61 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
63 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
64 const struct mmu_notifier_range *range,
65 unsigned long cur_seq);
67 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
68 uint64_t *bo_s, uint64_t *bo_l);
69 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
70 .invalidate = svm_range_cpu_invalidate_pagetables,
74 * svm_range_unlink - unlink svm_range from lists and interval tree
75 * @prange: svm range structure to be removed
77 * Remove the svm_range from the svms and svm_bo lists and the svms
80 * Context: The caller must hold svms->lock
82 static void svm_range_unlink(struct svm_range *prange)
84 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
85 prange, prange->start, prange->last);
88 spin_lock(&prange->svm_bo->list_lock);
89 list_del(&prange->svm_bo_list);
90 spin_unlock(&prange->svm_bo->list_lock);
93 list_del(&prange->list);
94 if (prange->it_node.start != 0 && prange->it_node.last != 0)
95 interval_tree_remove(&prange->it_node, &prange->svms->objects);
99 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
101 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
102 prange, prange->start, prange->last);
104 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
105 prange->start << PAGE_SHIFT,
106 prange->npages << PAGE_SHIFT,
111 * svm_range_add_to_svms - add svm range to svms
112 * @prange: svm range structure to be added
114 * Add the svm range to svms interval tree and link list
116 * Context: The caller must hold svms->lock
118 static void svm_range_add_to_svms(struct svm_range *prange)
120 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
121 prange, prange->start, prange->last);
123 list_move_tail(&prange->list, &prange->svms->list);
124 prange->it_node.start = prange->start;
125 prange->it_node.last = prange->last;
126 interval_tree_insert(&prange->it_node, &prange->svms->objects);
129 static void svm_range_remove_notifier(struct svm_range *prange)
131 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
132 prange->svms, prange,
133 prange->notifier.interval_tree.start >> PAGE_SHIFT,
134 prange->notifier.interval_tree.last >> PAGE_SHIFT);
136 if (prange->notifier.interval_tree.start != 0 &&
137 prange->notifier.interval_tree.last != 0)
138 mmu_interval_notifier_remove(&prange->notifier);
142 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
144 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
145 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
149 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
150 unsigned long offset, unsigned long npages,
151 unsigned long *hmm_pfns, uint32_t gpuidx)
153 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
154 dma_addr_t *addr = prange->dma_addr[gpuidx];
155 struct device *dev = adev->dev;
160 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
163 prange->dma_addr[gpuidx] = addr;
167 for (i = 0; i < npages; i++) {
168 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
169 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
171 page = hmm_pfn_to_page(hmm_pfns[i]);
172 if (is_zone_device_page(page)) {
173 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
175 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
176 bo_adev->vm_manager.vram_base_offset -
177 bo_adev->kfd.pgmap.range.start;
178 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
179 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
182 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
183 r = dma_mapping_error(dev, addr[i]);
185 dev_err(dev, "failed %d dma_map_page\n", r);
188 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
189 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
195 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
196 unsigned long offset, unsigned long npages,
197 unsigned long *hmm_pfns)
199 struct kfd_process *p;
203 p = container_of(prange->svms, struct kfd_process, svms);
205 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
206 struct kfd_process_device *pdd;
208 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
209 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
211 pr_debug("failed to find device idx %d\n", gpuidx);
215 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
224 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
225 unsigned long offset, unsigned long npages)
227 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
233 for (i = offset; i < offset + npages; i++) {
234 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
236 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
237 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
242 void svm_range_free_dma_mappings(struct svm_range *prange)
244 struct kfd_process_device *pdd;
245 dma_addr_t *dma_addr;
247 struct kfd_process *p;
250 p = container_of(prange->svms, struct kfd_process, svms);
252 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
253 dma_addr = prange->dma_addr[gpuidx];
257 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
259 pr_debug("failed to find device idx %d\n", gpuidx);
262 dev = &pdd->dev->adev->pdev->dev;
263 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
265 prange->dma_addr[gpuidx] = NULL;
269 static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
271 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
272 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
274 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
275 prange->start, prange->last);
277 svm_range_vram_node_free(prange);
278 svm_range_free_dma_mappings(prange);
280 if (update_mem_usage && !p->xnack_enabled) {
281 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
282 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
283 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
285 mutex_destroy(&prange->lock);
286 mutex_destroy(&prange->migrate_mutex);
291 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
292 uint8_t *granularity, uint32_t *flags)
294 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
295 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
298 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
302 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
303 uint64_t last, bool update_mem_usage)
305 uint64_t size = last - start + 1;
306 struct svm_range *prange;
307 struct kfd_process *p;
309 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
313 p = container_of(svms, struct kfd_process, svms);
314 if (!p->xnack_enabled && update_mem_usage &&
315 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
316 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
317 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
321 prange->npages = size;
323 prange->start = start;
325 INIT_LIST_HEAD(&prange->list);
326 INIT_LIST_HEAD(&prange->update_list);
327 INIT_LIST_HEAD(&prange->svm_bo_list);
328 INIT_LIST_HEAD(&prange->deferred_list);
329 INIT_LIST_HEAD(&prange->child_list);
330 atomic_set(&prange->invalid, 0);
331 prange->validate_timestamp = 0;
332 mutex_init(&prange->migrate_mutex);
333 mutex_init(&prange->lock);
335 if (p->xnack_enabled)
336 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
339 svm_range_set_default_attributes(&prange->preferred_loc,
340 &prange->prefetch_loc,
341 &prange->granularity, &prange->flags);
343 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
348 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
350 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
356 static void svm_range_bo_release(struct kref *kref)
358 struct svm_range_bo *svm_bo;
360 svm_bo = container_of(kref, struct svm_range_bo, kref);
361 pr_debug("svm_bo 0x%p\n", svm_bo);
363 spin_lock(&svm_bo->list_lock);
364 while (!list_empty(&svm_bo->range_list)) {
365 struct svm_range *prange =
366 list_first_entry(&svm_bo->range_list,
367 struct svm_range, svm_bo_list);
368 /* list_del_init tells a concurrent svm_range_vram_node_new when
369 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
371 list_del_init(&prange->svm_bo_list);
372 spin_unlock(&svm_bo->list_lock);
374 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
375 prange->start, prange->last);
376 mutex_lock(&prange->lock);
377 prange->svm_bo = NULL;
378 mutex_unlock(&prange->lock);
380 spin_lock(&svm_bo->list_lock);
382 spin_unlock(&svm_bo->list_lock);
383 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
384 /* We're not in the eviction worker.
385 * Signal the fence and synchronize with any
386 * pending eviction work.
388 dma_fence_signal(&svm_bo->eviction_fence->base);
389 cancel_work_sync(&svm_bo->eviction_work);
391 dma_fence_put(&svm_bo->eviction_fence->base);
392 amdgpu_bo_unref(&svm_bo->bo);
396 static void svm_range_bo_wq_release(struct work_struct *work)
398 struct svm_range_bo *svm_bo;
400 svm_bo = container_of(work, struct svm_range_bo, release_work);
401 svm_range_bo_release(&svm_bo->kref);
404 static void svm_range_bo_release_async(struct kref *kref)
406 struct svm_range_bo *svm_bo;
408 svm_bo = container_of(kref, struct svm_range_bo, kref);
409 pr_debug("svm_bo 0x%p\n", svm_bo);
410 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
411 schedule_work(&svm_bo->release_work);
414 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
416 kref_put(&svm_bo->kref, svm_range_bo_release_async);
419 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
422 kref_put(&svm_bo->kref, svm_range_bo_release);
426 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
428 mutex_lock(&prange->lock);
429 if (!prange->svm_bo) {
430 mutex_unlock(&prange->lock);
433 if (prange->ttm_res) {
434 /* We still have a reference, all is well */
435 mutex_unlock(&prange->lock);
438 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
440 * Migrate from GPU to GPU, remove range from source svm_bo->node
441 * range list, and return false to allocate svm_bo from destination
444 if (prange->svm_bo->node != node) {
445 mutex_unlock(&prange->lock);
447 spin_lock(&prange->svm_bo->list_lock);
448 list_del_init(&prange->svm_bo_list);
449 spin_unlock(&prange->svm_bo->list_lock);
451 svm_range_bo_unref(prange->svm_bo);
454 if (READ_ONCE(prange->svm_bo->evicting)) {
456 struct svm_range_bo *svm_bo;
457 /* The BO is getting evicted,
458 * we need to get a new one
460 mutex_unlock(&prange->lock);
461 svm_bo = prange->svm_bo;
462 f = dma_fence_get(&svm_bo->eviction_fence->base);
463 svm_range_bo_unref(prange->svm_bo);
464 /* wait for the fence to avoid long spin-loop
465 * at list_empty_careful
467 dma_fence_wait(f, false);
470 /* The BO was still around and we got
471 * a new reference to it
473 mutex_unlock(&prange->lock);
474 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
475 prange->svms, prange->start, prange->last);
477 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
482 mutex_unlock(&prange->lock);
485 /* We need a new svm_bo. Spin-loop to wait for concurrent
486 * svm_range_bo_release to finish removing this range from
487 * its range list. After this, it is safe to reuse the
488 * svm_bo pointer and svm_bo_list head.
490 while (!list_empty_careful(&prange->svm_bo_list))
496 static struct svm_range_bo *svm_range_bo_new(void)
498 struct svm_range_bo *svm_bo;
500 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
504 kref_init(&svm_bo->kref);
505 INIT_LIST_HEAD(&svm_bo->range_list);
506 spin_lock_init(&svm_bo->list_lock);
512 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
515 struct amdgpu_bo_param bp;
516 struct svm_range_bo *svm_bo;
517 struct amdgpu_bo_user *ubo;
518 struct amdgpu_bo *bo;
519 struct kfd_process *p;
520 struct mm_struct *mm;
523 p = container_of(prange->svms, struct kfd_process, svms);
524 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
525 prange->start, prange->last);
527 if (svm_range_validate_svm_bo(node, prange))
530 svm_bo = svm_range_bo_new();
532 pr_debug("failed to alloc svm bo\n");
535 mm = get_task_mm(p->lead_thread);
537 pr_debug("failed to get mm\n");
542 svm_bo->eviction_fence =
543 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
547 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
548 svm_bo->evicting = 0;
549 memset(&bp, 0, sizeof(bp));
550 bp.size = prange->npages * PAGE_SIZE;
551 bp.byte_align = PAGE_SIZE;
552 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
553 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
554 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
555 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
556 bp.type = ttm_bo_type_device;
559 /* TODO: Allocate memory from the right memory partition. We can sort
560 * out the details later, once basic memory partitioning is working
562 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
564 pr_debug("failed %d to create bo\n", r);
565 goto create_bo_failed;
568 r = amdgpu_bo_reserve(bo, true);
570 pr_debug("failed %d to reserve bo\n", r);
571 goto reserve_bo_failed;
575 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
577 pr_debug("failed %d to sync bo\n", r);
578 amdgpu_bo_unreserve(bo);
579 goto reserve_bo_failed;
583 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
585 pr_debug("failed %d to reserve bo\n", r);
586 amdgpu_bo_unreserve(bo);
587 goto reserve_bo_failed;
589 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
591 amdgpu_bo_unreserve(bo);
594 prange->svm_bo = svm_bo;
595 prange->ttm_res = bo->tbo.resource;
598 spin_lock(&svm_bo->list_lock);
599 list_add(&prange->svm_bo_list, &svm_bo->range_list);
600 spin_unlock(&svm_bo->list_lock);
605 amdgpu_bo_unref(&bo);
607 dma_fence_put(&svm_bo->eviction_fence->base);
609 prange->ttm_res = NULL;
614 void svm_range_vram_node_free(struct svm_range *prange)
616 svm_range_bo_unref(prange->svm_bo);
617 prange->ttm_res = NULL;
621 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
623 struct kfd_process *p;
624 struct kfd_process_device *pdd;
626 p = container_of(prange->svms, struct kfd_process, svms);
627 pdd = kfd_process_device_data_by_id(p, gpu_id);
629 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
636 struct kfd_process_device *
637 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
639 struct kfd_process *p;
641 p = container_of(prange->svms, struct kfd_process, svms);
643 return kfd_get_process_device_data(node, p);
646 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
648 struct ttm_operation_ctx ctx = { false, false };
650 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
652 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
656 svm_range_check_attr(struct kfd_process *p,
657 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
661 for (i = 0; i < nattr; i++) {
662 uint32_t val = attrs[i].value;
663 int gpuidx = MAX_GPU_INSTANCE;
665 switch (attrs[i].type) {
666 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
667 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
668 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
669 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
671 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
672 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
673 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
675 case KFD_IOCTL_SVM_ATTR_ACCESS:
676 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
677 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
678 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
680 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
682 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
684 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
687 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
692 pr_debug("no GPU 0x%x found\n", val);
694 } else if (gpuidx < MAX_GPU_INSTANCE &&
695 !test_bit(gpuidx, p->svms.bitmap_supported)) {
696 pr_debug("GPU 0x%x not supported\n", val);
705 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
706 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
707 bool *update_mapping)
712 for (i = 0; i < nattr; i++) {
713 switch (attrs[i].type) {
714 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
715 prange->preferred_loc = attrs[i].value;
717 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
718 prange->prefetch_loc = attrs[i].value;
720 case KFD_IOCTL_SVM_ATTR_ACCESS:
721 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
722 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
723 if (!p->xnack_enabled)
724 *update_mapping = true;
726 gpuidx = kfd_process_gpuidx_from_gpuid(p,
728 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
729 bitmap_clear(prange->bitmap_access, gpuidx, 1);
730 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
731 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
732 bitmap_set(prange->bitmap_access, gpuidx, 1);
733 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
735 bitmap_clear(prange->bitmap_access, gpuidx, 1);
736 bitmap_set(prange->bitmap_aip, gpuidx, 1);
739 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
740 *update_mapping = true;
741 prange->flags |= attrs[i].value;
743 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
744 *update_mapping = true;
745 prange->flags &= ~attrs[i].value;
747 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
748 prange->granularity = attrs[i].value;
751 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
757 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
758 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
763 for (i = 0; i < nattr; i++) {
764 switch (attrs[i].type) {
765 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
766 if (prange->preferred_loc != attrs[i].value)
769 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
770 /* Prefetch should always trigger a migration even
771 * if the value of the attribute didn't change.
774 case KFD_IOCTL_SVM_ATTR_ACCESS:
775 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
776 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
777 gpuidx = kfd_process_gpuidx_from_gpuid(p,
779 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
780 if (test_bit(gpuidx, prange->bitmap_access) ||
781 test_bit(gpuidx, prange->bitmap_aip))
783 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
784 if (!test_bit(gpuidx, prange->bitmap_access))
787 if (!test_bit(gpuidx, prange->bitmap_aip))
791 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
792 if ((prange->flags & attrs[i].value) != attrs[i].value)
795 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
796 if ((prange->flags & attrs[i].value) != 0)
799 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
800 if (prange->granularity != attrs[i].value)
804 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
812 * svm_range_debug_dump - print all range information from svms
813 * @svms: svm range list header
815 * debug output svm range start, end, prefetch location from svms
816 * interval tree and link list
818 * Context: The caller must hold svms->lock
820 static void svm_range_debug_dump(struct svm_range_list *svms)
822 struct interval_tree_node *node;
823 struct svm_range *prange;
825 pr_debug("dump svms 0x%p list\n", svms);
826 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
828 list_for_each_entry(prange, &svms->list, list) {
829 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
830 prange, prange->start, prange->npages,
831 prange->start + prange->npages - 1,
835 pr_debug("dump svms 0x%p interval tree\n", svms);
836 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
837 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
839 prange = container_of(node, struct svm_range, it_node);
840 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
841 prange, prange->start, prange->npages,
842 prange->start + prange->npages - 1,
844 node = interval_tree_iter_next(node, 0, ~0ULL);
849 svm_range_split_array(void *ppnew, void *ppold, size_t size,
850 uint64_t old_start, uint64_t old_n,
851 uint64_t new_start, uint64_t new_n)
853 unsigned char *new, *old, *pold;
858 pold = *(unsigned char **)ppold;
862 new = kvmalloc_array(new_n, size, GFP_KERNEL);
866 d = (new_start - old_start) * size;
867 memcpy(new, pold + d, new_n * size);
869 old = kvmalloc_array(old_n, size, GFP_KERNEL);
875 d = (new_start == old_start) ? new_n * size : 0;
876 memcpy(old, pold + d, old_n * size);
879 *(void **)ppold = old;
880 *(void **)ppnew = new;
886 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
887 uint64_t start, uint64_t last)
889 uint64_t npages = last - start + 1;
892 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
893 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
894 sizeof(*old->dma_addr[i]), old->start,
895 npages, new->start, new->npages);
904 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
905 uint64_t start, uint64_t last)
907 uint64_t npages = last - start + 1;
909 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
910 new->svms, new, new->start, start, last);
912 if (new->start == old->start) {
913 new->offset = old->offset;
914 old->offset += new->npages;
916 new->offset = old->offset + npages;
919 new->svm_bo = svm_range_bo_ref(old->svm_bo);
920 new->ttm_res = old->ttm_res;
922 spin_lock(&new->svm_bo->list_lock);
923 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
924 spin_unlock(&new->svm_bo->list_lock);
930 * svm_range_split_adjust - split range and adjust
933 * @old: the old range
934 * @start: the old range adjust to start address in pages
935 * @last: the old range adjust to last address in pages
937 * Copy system memory dma_addr or vram ttm_res in old range to new
938 * range from new_start up to size new->npages, the remaining old range is from
942 * 0 - OK, -ENOMEM - out of memory
945 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
946 uint64_t start, uint64_t last)
950 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
951 new->svms, new->start, old->start, old->last, start, last);
953 if (new->start < old->start ||
954 new->last > old->last) {
955 WARN_ONCE(1, "invalid new range start or last\n");
959 r = svm_range_split_pages(new, old, start, last);
963 if (old->actual_loc && old->ttm_res) {
964 r = svm_range_split_nodes(new, old, start, last);
969 old->npages = last - start + 1;
972 new->flags = old->flags;
973 new->preferred_loc = old->preferred_loc;
974 new->prefetch_loc = old->prefetch_loc;
975 new->actual_loc = old->actual_loc;
976 new->granularity = old->granularity;
977 new->mapped_to_gpu = old->mapped_to_gpu;
978 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
979 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
985 * svm_range_split - split a range in 2 ranges
987 * @prange: the svm range to split
988 * @start: the remaining range start address in pages
989 * @last: the remaining range last address in pages
990 * @new: the result new range generated
993 * case 1: if start == prange->start
994 * prange ==> prange[start, last]
995 * new range [last + 1, prange->last]
997 * case 2: if last == prange->last
998 * prange ==> prange[start, last]
999 * new range [prange->start, start - 1]
1002 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1005 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1006 struct svm_range **new)
1008 uint64_t old_start = prange->start;
1009 uint64_t old_last = prange->last;
1010 struct svm_range_list *svms;
1013 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1014 old_start, old_last, start, last);
1016 if (old_start != start && old_last != last)
1018 if (start < old_start || last > old_last)
1021 svms = prange->svms;
1022 if (old_start == start)
1023 *new = svm_range_new(svms, last + 1, old_last, false);
1025 *new = svm_range_new(svms, old_start, start - 1, false);
1029 r = svm_range_split_adjust(*new, prange, start, last);
1031 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1032 r, old_start, old_last, start, last);
1033 svm_range_free(*new, false);
1041 svm_range_split_tail(struct svm_range *prange,
1042 uint64_t new_last, struct list_head *insert_list)
1044 struct svm_range *tail;
1045 int r = svm_range_split(prange, prange->start, new_last, &tail);
1048 list_add(&tail->list, insert_list);
1053 svm_range_split_head(struct svm_range *prange,
1054 uint64_t new_start, struct list_head *insert_list)
1056 struct svm_range *head;
1057 int r = svm_range_split(prange, new_start, prange->last, &head);
1060 list_add(&head->list, insert_list);
1065 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1066 struct svm_range *pchild, enum svm_work_list_ops op)
1068 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1069 pchild, pchild->start, pchild->last, prange, op);
1071 pchild->work_item.mm = mm;
1072 pchild->work_item.op = op;
1073 list_add_tail(&pchild->child_list, &prange->child_list);
1077 * svm_range_split_by_granularity - collect ranges within granularity boundary
1079 * @p: the process with svms list
1081 * @addr: the vm fault address in pages, to split the prange
1082 * @parent: parent range if prange is from child list
1083 * @prange: prange to split
1085 * Trims @prange to be a single aligned block of prange->granularity if
1086 * possible. The head and tail are added to the child_list in @parent.
1088 * Context: caller must hold mmap_read_lock and prange->lock
1091 * 0 - OK, otherwise error code
1094 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1095 unsigned long addr, struct svm_range *parent,
1096 struct svm_range *prange)
1098 struct svm_range *head, *tail;
1099 unsigned long start, last, size;
1102 /* Align splited range start and size to granularity size, then a single
1103 * PTE will be used for whole range, this reduces the number of PTE
1104 * updated and the L1 TLB space used for translation.
1106 size = 1UL << prange->granularity;
1107 start = ALIGN_DOWN(addr, size);
1108 last = ALIGN(addr + 1, size) - 1;
1110 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1111 prange->svms, prange->start, prange->last, start, last, size);
1113 if (start > prange->start) {
1114 r = svm_range_split(prange, start, prange->last, &head);
1117 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1120 if (last < prange->last) {
1121 r = svm_range_split(prange, prange->start, last, &tail);
1124 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1127 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1128 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1129 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1130 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1131 prange, prange->start, prange->last,
1132 SVM_OP_ADD_RANGE_AND_MAP);
1137 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1139 return (node_a->adev == node_b->adev ||
1140 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1144 svm_range_get_pte_flags(struct kfd_node *node,
1145 struct svm_range *prange, int domain)
1147 struct kfd_node *bo_node;
1148 uint32_t flags = prange->flags;
1149 uint32_t mapping_flags = 0;
1151 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1152 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1153 bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED;
1154 unsigned int mtype_local;
1156 if (domain == SVM_RANGE_VRAM_DOMAIN)
1157 bo_node = prange->svm_bo->node;
1159 switch (node->adev->ip_versions[GC_HWIP][0]) {
1160 case IP_VERSION(9, 4, 1):
1161 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1162 if (bo_node == node) {
1163 mapping_flags |= coherent ?
1164 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1166 mapping_flags |= coherent ?
1167 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1168 if (svm_nodes_in_same_hive(node, bo_node))
1172 mapping_flags |= coherent ?
1173 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1176 case IP_VERSION(9, 4, 2):
1177 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1178 if (bo_node == node) {
1179 mapping_flags |= coherent ?
1180 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1181 if (node->adev->gmc.xgmi.connected_to_cpu)
1184 mapping_flags |= coherent ?
1185 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1186 if (svm_nodes_in_same_hive(node, bo_node))
1190 mapping_flags |= coherent ?
1191 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1194 case IP_VERSION(9, 4, 3):
1195 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_CC);
1198 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1199 } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
1200 /* local HBM region close to partition */
1201 if (bo_node->adev == node->adev /* TODO: memory partitions &&
1202 bo_node->mem_id == node->mem_id*/)
1203 mapping_flags |= mtype_local;
1204 /* local HBM region far from partition or remote XGMI GPU */
1205 else if (svm_nodes_in_same_hive(bo_node, node))
1206 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1209 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1210 /* system memory accessed by the APU */
1211 } else if (node->adev->flags & AMD_IS_APU) {
1212 /* On NUMA systems, locality is determined per-page
1213 * in amdgpu_gmc_override_vm_pte_flags
1215 if (num_possible_nodes() <= 1)
1216 mapping_flags |= mtype_local;
1218 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1219 /* system memory accessed by the dGPU */
1221 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1225 mapping_flags |= coherent ?
1226 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1229 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1231 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1232 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1233 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1234 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1236 pte_flags = AMDGPU_PTE_VALID;
1237 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1238 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1240 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1245 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1246 uint64_t start, uint64_t last,
1247 struct dma_fence **fence)
1249 uint64_t init_pte_value = 0;
1251 pr_debug("[0x%llx 0x%llx]\n", start, last);
1253 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1254 last, init_pte_value, 0, 0, NULL, NULL,
1259 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1260 unsigned long last, uint32_t trigger)
1262 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1263 struct kfd_process_device *pdd;
1264 struct dma_fence *fence = NULL;
1265 struct kfd_process *p;
1269 if (!prange->mapped_to_gpu) {
1270 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1271 prange, prange->start, prange->last);
1275 if (prange->start == start && prange->last == last) {
1276 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1277 prange->mapped_to_gpu = false;
1280 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1282 p = container_of(prange->svms, struct kfd_process, svms);
1284 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1285 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1286 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1288 pr_debug("failed to find device idx %d\n", gpuidx);
1292 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1293 start, last, trigger);
1295 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1296 drm_priv_to_vm(pdd->drm_priv),
1297 start, last, &fence);
1302 r = dma_fence_wait(fence, false);
1303 dma_fence_put(fence);
1308 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1315 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1316 unsigned long offset, unsigned long npages, bool readonly,
1317 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1318 struct dma_fence **fence, bool flush_tlb)
1320 struct amdgpu_device *adev = pdd->dev->adev;
1321 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1323 unsigned long last_start;
1328 last_start = prange->start + offset;
1330 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1331 last_start, last_start + npages - 1, readonly);
1333 for (i = offset; i < offset + npages; i++) {
1334 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1335 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1337 /* Collect all pages in the same address range and memory domain
1338 * that can be mapped with a single call to update mapping.
1340 if (i < offset + npages - 1 &&
1341 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1344 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1345 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1347 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1349 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1351 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1352 prange->svms, last_start, prange->start + i,
1353 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1356 /* TODO: we still need to determine the vm_manager.vram_base_offset based on
1357 * the memory partition.
1359 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1360 last_start, prange->start + i,
1362 (last_start - prange->start) << PAGE_SHIFT,
1363 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1364 NULL, dma_addr, &vm->last_update);
1366 for (j = last_start - prange->start; j <= i; j++)
1367 dma_addr[j] |= last_domain;
1370 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1373 last_start = prange->start + i + 1;
1376 r = amdgpu_vm_update_pdes(adev, vm, false);
1378 pr_debug("failed %d to update directories 0x%lx\n", r,
1384 *fence = dma_fence_get(vm->last_update);
1391 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1392 unsigned long npages, bool readonly,
1393 unsigned long *bitmap, bool wait, bool flush_tlb)
1395 struct kfd_process_device *pdd;
1396 struct amdgpu_device *bo_adev = NULL;
1397 struct kfd_process *p;
1398 struct dma_fence *fence = NULL;
1402 if (prange->svm_bo && prange->ttm_res)
1403 bo_adev = prange->svm_bo->node->adev;
1405 p = container_of(prange->svms, struct kfd_process, svms);
1406 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1407 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1408 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1410 pr_debug("failed to find device idx %d\n", gpuidx);
1414 pdd = kfd_bind_process_to_device(pdd->dev, p);
1418 if (bo_adev && pdd->dev->adev != bo_adev &&
1419 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1420 pr_debug("cannot map to device idx %d\n", gpuidx);
1424 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1425 prange->dma_addr[gpuidx],
1426 bo_adev, wait ? &fence : NULL,
1432 r = dma_fence_wait(fence, false);
1433 dma_fence_put(fence);
1436 pr_debug("failed %d to dma fence wait\n", r);
1441 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1447 struct svm_validate_context {
1448 struct kfd_process *process;
1449 struct svm_range *prange;
1451 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1452 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1453 struct list_head validate_list;
1454 struct ww_acquire_ctx ticket;
1457 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1459 struct kfd_process_device *pdd;
1460 struct amdgpu_vm *vm;
1464 INIT_LIST_HEAD(&ctx->validate_list);
1465 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1466 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1468 pr_debug("failed to find device idx %d\n", gpuidx);
1471 vm = drm_priv_to_vm(pdd->drm_priv);
1473 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1474 ctx->tv[gpuidx].num_shared = 4;
1475 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1478 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1481 pr_debug("failed %d to reserve bo\n", r);
1485 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1486 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1488 pr_debug("failed to find device idx %d\n", gpuidx);
1493 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1494 drm_priv_to_vm(pdd->drm_priv),
1495 svm_range_bo_validate, NULL);
1497 pr_debug("failed %d validate pt bos\n", r);
1505 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1509 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1511 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1514 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1516 struct kfd_process_device *pdd;
1518 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1520 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1524 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1526 * To prevent concurrent destruction or change of range attributes, the
1527 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1528 * because that would block concurrent evictions and lead to deadlocks. To
1529 * serialize concurrent migrations or validations of the same range, the
1530 * prange->migrate_mutex must be held.
1532 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1535 * The following sequence ensures race-free validation and GPU mapping:
1537 * 1. Reserve page table (and SVM BO if range is in VRAM)
1538 * 2. hmm_range_fault to get page addresses (if system memory)
1539 * 3. DMA-map pages (if system memory)
1540 * 4-a. Take notifier lock
1541 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1542 * 4-c. Check that the range was not split or otherwise invalidated
1543 * 4-d. Update GPU page table
1544 * 4.e. Release notifier lock
1545 * 5. Release page table (and SVM BO) reservation
1547 static int svm_range_validate_and_map(struct mm_struct *mm,
1548 struct svm_range *prange, int32_t gpuidx,
1549 bool intr, bool wait, bool flush_tlb)
1551 struct svm_validate_context ctx;
1552 unsigned long start, end, addr;
1553 struct kfd_process *p;
1558 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1559 ctx.prange = prange;
1562 if (gpuidx < MAX_GPU_INSTANCE) {
1563 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1564 bitmap_set(ctx.bitmap, gpuidx, 1);
1565 } else if (ctx.process->xnack_enabled) {
1566 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1568 /* If prefetch range to GPU, or GPU retry fault migrate range to
1569 * GPU, which has ACCESS attribute to the range, create mapping
1572 if (prange->actual_loc) {
1573 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1574 prange->actual_loc);
1576 WARN_ONCE(1, "failed get device by id 0x%x\n",
1577 prange->actual_loc);
1580 if (test_bit(gpuidx, prange->bitmap_access))
1581 bitmap_set(ctx.bitmap, gpuidx, 1);
1584 bitmap_or(ctx.bitmap, prange->bitmap_access,
1585 prange->bitmap_aip, MAX_GPU_INSTANCE);
1588 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
1589 if (!prange->mapped_to_gpu)
1592 bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1595 if (prange->actual_loc && !prange->ttm_res) {
1596 /* This should never happen. actual_loc gets set by
1597 * svm_migrate_ram_to_vram after allocating a BO.
1599 WARN_ONCE(1, "VRAM BO missing during validation\n");
1603 svm_range_reserve_bos(&ctx);
1605 p = container_of(prange->svms, struct kfd_process, svms);
1606 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1608 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1609 if (kfd_svm_page_owner(p, idx) != owner) {
1615 start = prange->start << PAGE_SHIFT;
1616 end = (prange->last + 1) << PAGE_SHIFT;
1617 for (addr = start; addr < end && !r; ) {
1618 struct hmm_range *hmm_range;
1619 struct vm_area_struct *vma;
1621 unsigned long offset;
1622 unsigned long npages;
1625 vma = vma_lookup(mm, addr);
1630 readonly = !(vma->vm_flags & VM_WRITE);
1632 next = min(vma->vm_end, end);
1633 npages = (next - addr) >> PAGE_SHIFT;
1634 WRITE_ONCE(p->svms.faulting_task, current);
1635 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1636 readonly, owner, NULL,
1638 WRITE_ONCE(p->svms.faulting_task, NULL);
1640 pr_debug("failed %d to get svm range pages\n", r);
1644 offset = (addr - start) >> PAGE_SHIFT;
1645 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1646 hmm_range->hmm_pfns);
1648 pr_debug("failed %d to dma map range\n", r);
1652 svm_range_lock(prange);
1653 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1654 pr_debug("hmm update the range, need validate again\n");
1658 if (!list_empty(&prange->child_list)) {
1659 pr_debug("range split by unmap in parallel, validate again\n");
1664 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1665 ctx.bitmap, wait, flush_tlb);
1668 svm_range_unlock(prange);
1674 prange->validated_once = true;
1675 prange->mapped_to_gpu = true;
1679 svm_range_unreserve_bos(&ctx);
1682 prange->validate_timestamp = ktime_get_boottime();
1688 * svm_range_list_lock_and_flush_work - flush pending deferred work
1690 * @svms: the svm range list
1691 * @mm: the mm structure
1693 * Context: Returns with mmap write lock held, pending deferred work flushed
1697 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1698 struct mm_struct *mm)
1701 flush_work(&svms->deferred_list_work);
1702 mmap_write_lock(mm);
1704 if (list_empty(&svms->deferred_range_list))
1706 mmap_write_unlock(mm);
1707 pr_debug("retry flush\n");
1708 goto retry_flush_work;
1711 static void svm_range_restore_work(struct work_struct *work)
1713 struct delayed_work *dwork = to_delayed_work(work);
1714 struct amdkfd_process_info *process_info;
1715 struct svm_range_list *svms;
1716 struct svm_range *prange;
1717 struct kfd_process *p;
1718 struct mm_struct *mm;
1723 svms = container_of(dwork, struct svm_range_list, restore_work);
1724 evicted_ranges = atomic_read(&svms->evicted_ranges);
1725 if (!evicted_ranges)
1728 pr_debug("restore svm ranges\n");
1730 p = container_of(svms, struct kfd_process, svms);
1731 process_info = p->kgd_process_info;
1733 /* Keep mm reference when svm_range_validate_and_map ranges */
1734 mm = get_task_mm(p->lead_thread);
1736 pr_debug("svms 0x%p process mm gone\n", svms);
1740 mutex_lock(&process_info->lock);
1741 svm_range_list_lock_and_flush_work(svms, mm);
1742 mutex_lock(&svms->lock);
1744 evicted_ranges = atomic_read(&svms->evicted_ranges);
1746 list_for_each_entry(prange, &svms->list, list) {
1747 invalid = atomic_read(&prange->invalid);
1751 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1752 prange->svms, prange, prange->start, prange->last,
1756 * If range is migrating, wait for migration is done.
1758 mutex_lock(&prange->migrate_mutex);
1760 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1761 false, true, false);
1763 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1766 mutex_unlock(&prange->migrate_mutex);
1768 goto out_reschedule;
1770 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1771 goto out_reschedule;
1774 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1776 goto out_reschedule;
1780 r = kgd2kfd_resume_mm(mm);
1782 /* No recovery from this failure. Probably the CP is
1783 * hanging. No point trying again.
1785 pr_debug("failed %d to resume KFD\n", r);
1788 pr_debug("restore svm ranges successfully\n");
1791 mutex_unlock(&svms->lock);
1792 mmap_write_unlock(mm);
1793 mutex_unlock(&process_info->lock);
1795 /* If validation failed, reschedule another attempt */
1796 if (evicted_ranges) {
1797 pr_debug("reschedule to restore svm range\n");
1798 schedule_delayed_work(&svms->restore_work,
1799 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1801 kfd_smi_event_queue_restore_rescheduled(mm);
1807 * svm_range_evict - evict svm range
1808 * @prange: svm range structure
1809 * @mm: current process mm_struct
1810 * @start: starting process queue number
1811 * @last: last process queue number
1813 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1814 * return to let CPU evict the buffer and proceed CPU pagetable update.
1816 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1817 * If invalidation happens while restore work is running, restore work will
1818 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1822 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1823 unsigned long start, unsigned long last,
1824 enum mmu_notifier_event event)
1826 struct svm_range_list *svms = prange->svms;
1827 struct svm_range *pchild;
1828 struct kfd_process *p;
1831 p = container_of(svms, struct kfd_process, svms);
1833 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1834 svms, prange->start, prange->last, start, last);
1836 if (!p->xnack_enabled ||
1837 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1839 bool mapped = prange->mapped_to_gpu;
1841 list_for_each_entry(pchild, &prange->child_list, child_list) {
1842 if (!pchild->mapped_to_gpu)
1845 mutex_lock_nested(&pchild->lock, 1);
1846 if (pchild->start <= last && pchild->last >= start) {
1847 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1848 pchild->start, pchild->last);
1849 atomic_inc(&pchild->invalid);
1851 mutex_unlock(&pchild->lock);
1857 if (prange->start <= last && prange->last >= start)
1858 atomic_inc(&prange->invalid);
1860 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1861 if (evicted_ranges != 1)
1864 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1865 prange->svms, prange->start, prange->last);
1867 /* First eviction, stop the queues */
1868 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1870 pr_debug("failed to quiesce KFD\n");
1872 pr_debug("schedule to restore svm %p ranges\n", svms);
1873 schedule_delayed_work(&svms->restore_work,
1874 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1879 if (event == MMU_NOTIFY_MIGRATE)
1880 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1882 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1884 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1885 prange->svms, start, last);
1886 list_for_each_entry(pchild, &prange->child_list, child_list) {
1887 mutex_lock_nested(&pchild->lock, 1);
1888 s = max(start, pchild->start);
1889 l = min(last, pchild->last);
1891 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1892 mutex_unlock(&pchild->lock);
1894 s = max(start, prange->start);
1895 l = min(last, prange->last);
1897 svm_range_unmap_from_gpus(prange, s, l, trigger);
1903 static struct svm_range *svm_range_clone(struct svm_range *old)
1905 struct svm_range *new;
1907 new = svm_range_new(old->svms, old->start, old->last, false);
1912 new->ttm_res = old->ttm_res;
1913 new->offset = old->offset;
1914 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1915 spin_lock(&new->svm_bo->list_lock);
1916 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1917 spin_unlock(&new->svm_bo->list_lock);
1919 new->flags = old->flags;
1920 new->preferred_loc = old->preferred_loc;
1921 new->prefetch_loc = old->prefetch_loc;
1922 new->actual_loc = old->actual_loc;
1923 new->granularity = old->granularity;
1924 new->mapped_to_gpu = old->mapped_to_gpu;
1925 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1926 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1931 void svm_range_set_max_pages(struct amdgpu_device *adev)
1934 uint64_t pages, _pages;
1936 /* 1/32 VRAM size in pages */
1937 pages = adev->gmc.real_vram_size >> 17;
1938 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
1939 pages = rounddown_pow_of_two(pages);
1941 max_pages = READ_ONCE(max_svm_range_pages);
1942 _pages = min_not_zero(max_pages, pages);
1943 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
1947 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
1948 uint64_t max_pages, struct list_head *insert_list,
1949 struct list_head *update_list)
1951 struct svm_range *prange;
1954 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
1955 max_pages, start, last);
1957 while (last >= start) {
1958 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
1960 prange = svm_range_new(svms, start, l, true);
1963 list_add(&prange->list, insert_list);
1964 list_add(&prange->update_list, update_list);
1972 * svm_range_add - add svm range and handle overlap
1973 * @p: the range add to this process svms
1974 * @start: page size aligned
1975 * @size: page size aligned
1976 * @nattr: number of attributes
1977 * @attrs: array of attributes
1978 * @update_list: output, the ranges need validate and update GPU mapping
1979 * @insert_list: output, the ranges need insert to svms
1980 * @remove_list: output, the ranges are replaced and need remove from svms
1982 * Check if the virtual address range has overlap with any existing ranges,
1983 * split partly overlapping ranges and add new ranges in the gaps. All changes
1984 * should be applied to the range_list and interval tree transactionally. If
1985 * any range split or allocation fails, the entire update fails. Therefore any
1986 * existing overlapping svm_ranges are cloned and the original svm_ranges left
1989 * If the transaction succeeds, the caller can update and insert clones and
1990 * new ranges, then free the originals.
1992 * Otherwise the caller can free the clones and new ranges, while the old
1993 * svm_ranges remain unchanged.
1995 * Context: Process context, caller must hold svms->lock
1998 * 0 - OK, otherwise error code
2001 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2002 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2003 struct list_head *update_list, struct list_head *insert_list,
2004 struct list_head *remove_list)
2006 unsigned long last = start + size - 1UL;
2007 struct svm_range_list *svms = &p->svms;
2008 struct interval_tree_node *node;
2009 struct svm_range *prange;
2010 struct svm_range *tmp;
2011 struct list_head new_list;
2014 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2016 INIT_LIST_HEAD(update_list);
2017 INIT_LIST_HEAD(insert_list);
2018 INIT_LIST_HEAD(remove_list);
2019 INIT_LIST_HEAD(&new_list);
2021 node = interval_tree_iter_first(&svms->objects, start, last);
2023 struct interval_tree_node *next;
2024 unsigned long next_start;
2026 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2029 prange = container_of(node, struct svm_range, it_node);
2030 next = interval_tree_iter_next(node, start, last);
2031 next_start = min(node->last, last) + 1;
2033 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
2035 } else if (node->start < start || node->last > last) {
2036 /* node intersects the update range and its attributes
2037 * will change. Clone and split it, apply updates only
2038 * to the overlapping part
2040 struct svm_range *old = prange;
2042 prange = svm_range_clone(old);
2048 list_add(&old->update_list, remove_list);
2049 list_add(&prange->list, insert_list);
2050 list_add(&prange->update_list, update_list);
2052 if (node->start < start) {
2053 pr_debug("change old range start\n");
2054 r = svm_range_split_head(prange, start,
2059 if (node->last > last) {
2060 pr_debug("change old range last\n");
2061 r = svm_range_split_tail(prange, last,
2067 /* The node is contained within start..last,
2070 list_add(&prange->update_list, update_list);
2073 /* insert a new node if needed */
2074 if (node->start > start) {
2075 r = svm_range_split_new(svms, start, node->start - 1,
2076 READ_ONCE(max_svm_range_pages),
2077 &new_list, update_list);
2086 /* add a final range at the end if needed */
2088 r = svm_range_split_new(svms, start, last,
2089 READ_ONCE(max_svm_range_pages),
2090 &new_list, update_list);
2094 list_for_each_entry_safe(prange, tmp, insert_list, list)
2095 svm_range_free(prange, false);
2096 list_for_each_entry_safe(prange, tmp, &new_list, list)
2097 svm_range_free(prange, true);
2099 list_splice(&new_list, insert_list);
2106 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2107 struct svm_range *prange)
2109 unsigned long start;
2112 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2113 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2115 if (prange->start == start && prange->last == last)
2118 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2119 prange->svms, prange, start, last, prange->start,
2122 if (start != 0 && last != 0) {
2123 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2124 svm_range_remove_notifier(prange);
2126 prange->it_node.start = prange->start;
2127 prange->it_node.last = prange->last;
2129 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2130 svm_range_add_notifier_locked(mm, prange);
2134 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2135 struct mm_struct *mm)
2137 switch (prange->work_item.op) {
2139 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2140 svms, prange, prange->start, prange->last);
2142 case SVM_OP_UNMAP_RANGE:
2143 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2144 svms, prange, prange->start, prange->last);
2145 svm_range_unlink(prange);
2146 svm_range_remove_notifier(prange);
2147 svm_range_free(prange, true);
2149 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2150 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2151 svms, prange, prange->start, prange->last);
2152 svm_range_update_notifier_and_interval_tree(mm, prange);
2154 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2155 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2156 svms, prange, prange->start, prange->last);
2157 svm_range_update_notifier_and_interval_tree(mm, prange);
2158 /* TODO: implement deferred validation and mapping */
2160 case SVM_OP_ADD_RANGE:
2161 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2162 prange->start, prange->last);
2163 svm_range_add_to_svms(prange);
2164 svm_range_add_notifier_locked(mm, prange);
2166 case SVM_OP_ADD_RANGE_AND_MAP:
2167 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2168 prange, prange->start, prange->last);
2169 svm_range_add_to_svms(prange);
2170 svm_range_add_notifier_locked(mm, prange);
2171 /* TODO: implement deferred validation and mapping */
2174 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2175 prange->work_item.op);
2179 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2181 struct kfd_process_device *pdd;
2182 struct kfd_process *p;
2186 p = container_of(svms, struct kfd_process, svms);
2189 drain = atomic_read(&svms->drain_pagefaults);
2193 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2198 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2200 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2201 pdd->dev->adev->irq.retry_cam_enabled ?
2202 &pdd->dev->adev->irq.ih :
2203 &pdd->dev->adev->irq.ih1);
2205 if (pdd->dev->adev->irq.retry_cam_enabled)
2206 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2207 &pdd->dev->adev->irq.ih_soft);
2210 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2212 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2216 static void svm_range_deferred_list_work(struct work_struct *work)
2218 struct svm_range_list *svms;
2219 struct svm_range *prange;
2220 struct mm_struct *mm;
2222 svms = container_of(work, struct svm_range_list, deferred_list_work);
2223 pr_debug("enter svms 0x%p\n", svms);
2225 spin_lock(&svms->deferred_list_lock);
2226 while (!list_empty(&svms->deferred_range_list)) {
2227 prange = list_first_entry(&svms->deferred_range_list,
2228 struct svm_range, deferred_list);
2229 spin_unlock(&svms->deferred_list_lock);
2231 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2232 prange->start, prange->last, prange->work_item.op);
2234 mm = prange->work_item.mm;
2236 mmap_write_lock(mm);
2238 /* Checking for the need to drain retry faults must be inside
2239 * mmap write lock to serialize with munmap notifiers.
2241 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2242 mmap_write_unlock(mm);
2243 svm_range_drain_retry_fault(svms);
2247 /* Remove from deferred_list must be inside mmap write lock, for
2249 * 1. unmap_from_cpu may change work_item.op and add the range
2250 * to deferred_list again, cause use after free bug.
2251 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2252 * lock and continue because deferred_list is empty, but
2253 * deferred_list work is actually waiting for mmap lock.
2255 spin_lock(&svms->deferred_list_lock);
2256 list_del_init(&prange->deferred_list);
2257 spin_unlock(&svms->deferred_list_lock);
2259 mutex_lock(&svms->lock);
2260 mutex_lock(&prange->migrate_mutex);
2261 while (!list_empty(&prange->child_list)) {
2262 struct svm_range *pchild;
2264 pchild = list_first_entry(&prange->child_list,
2265 struct svm_range, child_list);
2266 pr_debug("child prange 0x%p op %d\n", pchild,
2267 pchild->work_item.op);
2268 list_del_init(&pchild->child_list);
2269 svm_range_handle_list_op(svms, pchild, mm);
2271 mutex_unlock(&prange->migrate_mutex);
2273 svm_range_handle_list_op(svms, prange, mm);
2274 mutex_unlock(&svms->lock);
2275 mmap_write_unlock(mm);
2277 /* Pairs with mmget in svm_range_add_list_work */
2280 spin_lock(&svms->deferred_list_lock);
2282 spin_unlock(&svms->deferred_list_lock);
2283 pr_debug("exit svms 0x%p\n", svms);
2287 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2288 struct mm_struct *mm, enum svm_work_list_ops op)
2290 spin_lock(&svms->deferred_list_lock);
2291 /* if prange is on the deferred list */
2292 if (!list_empty(&prange->deferred_list)) {
2293 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2294 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2295 if (op != SVM_OP_NULL &&
2296 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2297 prange->work_item.op = op;
2299 prange->work_item.op = op;
2301 /* Pairs with mmput in deferred_list_work */
2303 prange->work_item.mm = mm;
2304 list_add_tail(&prange->deferred_list,
2305 &prange->svms->deferred_range_list);
2306 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2307 prange, prange->start, prange->last, op);
2309 spin_unlock(&svms->deferred_list_lock);
2312 void schedule_deferred_list_work(struct svm_range_list *svms)
2314 spin_lock(&svms->deferred_list_lock);
2315 if (!list_empty(&svms->deferred_range_list))
2316 schedule_work(&svms->deferred_list_work);
2317 spin_unlock(&svms->deferred_list_lock);
2321 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2322 struct svm_range *prange, unsigned long start,
2325 struct svm_range *head;
2326 struct svm_range *tail;
2328 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2329 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2330 prange->start, prange->last);
2333 if (start > prange->last || last < prange->start)
2336 head = tail = prange;
2337 if (start > prange->start)
2338 svm_range_split(prange, prange->start, start - 1, &tail);
2339 if (last < tail->last)
2340 svm_range_split(tail, last + 1, tail->last, &head);
2342 if (head != prange && tail != prange) {
2343 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2344 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2345 } else if (tail != prange) {
2346 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2347 } else if (head != prange) {
2348 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2349 } else if (parent != prange) {
2350 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2355 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2356 unsigned long start, unsigned long last)
2358 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2359 struct svm_range_list *svms;
2360 struct svm_range *pchild;
2361 struct kfd_process *p;
2365 p = kfd_lookup_process_by_mm(mm);
2370 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2371 prange, prange->start, prange->last, start, last);
2373 /* Make sure pending page faults are drained in the deferred worker
2374 * before the range is freed to avoid straggler interrupts on
2375 * unmapped memory causing "phantom faults".
2377 atomic_inc(&svms->drain_pagefaults);
2379 unmap_parent = start <= prange->start && last >= prange->last;
2381 list_for_each_entry(pchild, &prange->child_list, child_list) {
2382 mutex_lock_nested(&pchild->lock, 1);
2383 s = max(start, pchild->start);
2384 l = min(last, pchild->last);
2386 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2387 svm_range_unmap_split(mm, prange, pchild, start, last);
2388 mutex_unlock(&pchild->lock);
2390 s = max(start, prange->start);
2391 l = min(last, prange->last);
2393 svm_range_unmap_from_gpus(prange, s, l, trigger);
2394 svm_range_unmap_split(mm, prange, prange, start, last);
2397 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2399 svm_range_add_list_work(svms, prange, mm,
2400 SVM_OP_UPDATE_RANGE_NOTIFIER);
2401 schedule_deferred_list_work(svms);
2403 kfd_unref_process(p);
2407 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2408 * @mni: mmu_interval_notifier struct
2409 * @range: mmu_notifier_range struct
2410 * @cur_seq: value to pass to mmu_interval_set_seq()
2412 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2413 * is from migration, or CPU page invalidation callback.
2415 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2416 * work thread, and split prange if only part of prange is unmapped.
2418 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2419 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2420 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2421 * update GPU mapping to recover.
2423 * Context: mmap lock, notifier_invalidate_start lock are held
2424 * for invalidate event, prange lock is held if this is from migration
2427 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2428 const struct mmu_notifier_range *range,
2429 unsigned long cur_seq)
2431 struct svm_range *prange;
2432 unsigned long start;
2435 if (range->event == MMU_NOTIFY_RELEASE)
2437 if (!mmget_not_zero(mni->mm))
2440 start = mni->interval_tree.start;
2441 last = mni->interval_tree.last;
2442 start = max(start, range->start) >> PAGE_SHIFT;
2443 last = min(last, range->end - 1) >> PAGE_SHIFT;
2444 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2445 start, last, range->start >> PAGE_SHIFT,
2446 (range->end - 1) >> PAGE_SHIFT,
2447 mni->interval_tree.start >> PAGE_SHIFT,
2448 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2450 prange = container_of(mni, struct svm_range, notifier);
2452 svm_range_lock(prange);
2453 mmu_interval_set_seq(mni, cur_seq);
2455 switch (range->event) {
2456 case MMU_NOTIFY_UNMAP:
2457 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2460 svm_range_evict(prange, mni->mm, start, last, range->event);
2464 svm_range_unlock(prange);
2471 * svm_range_from_addr - find svm range from fault address
2472 * @svms: svm range list header
2473 * @addr: address to search range interval tree, in pages
2474 * @parent: parent range if range is on child list
2476 * Context: The caller must hold svms->lock
2478 * Return: the svm_range found or NULL
2481 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2482 struct svm_range **parent)
2484 struct interval_tree_node *node;
2485 struct svm_range *prange;
2486 struct svm_range *pchild;
2488 node = interval_tree_iter_first(&svms->objects, addr, addr);
2492 prange = container_of(node, struct svm_range, it_node);
2493 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2494 addr, prange->start, prange->last, node->start, node->last);
2496 if (addr >= prange->start && addr <= prange->last) {
2501 list_for_each_entry(pchild, &prange->child_list, child_list)
2502 if (addr >= pchild->start && addr <= pchild->last) {
2503 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2504 addr, pchild->start, pchild->last);
2513 /* svm_range_best_restore_location - decide the best fault restore location
2514 * @prange: svm range structure
2515 * @adev: the GPU on which vm fault happened
2517 * This is only called when xnack is on, to decide the best location to restore
2518 * the range mapping after GPU vm fault. Caller uses the best location to do
2519 * migration if actual loc is not best location, then update GPU page table
2520 * mapping to the best location.
2522 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2523 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2524 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2525 * if range actual loc is cpu, best_loc is cpu
2526 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2528 * Otherwise, GPU no access, best_loc is -1.
2531 * -1 means vm fault GPU no access
2532 * 0 for CPU or GPU id
2535 svm_range_best_restore_location(struct svm_range *prange,
2536 struct kfd_node *node,
2539 struct kfd_node *bo_node, *preferred_node;
2540 struct kfd_process *p;
2544 p = container_of(prange->svms, struct kfd_process, svms);
2546 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2548 pr_debug("failed to get gpuid from kgd\n");
2552 if (node->adev->gmc.is_app_apu)
2555 if (prange->preferred_loc == gpuid ||
2556 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2557 return prange->preferred_loc;
2558 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2559 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2560 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2561 return prange->preferred_loc;
2565 if (test_bit(*gpuidx, prange->bitmap_access))
2568 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2569 if (!prange->actual_loc)
2572 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2573 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2574 return prange->actual_loc;
2583 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2584 unsigned long *start, unsigned long *last,
2585 bool *is_heap_stack)
2587 struct vm_area_struct *vma;
2588 struct interval_tree_node *node;
2589 unsigned long start_limit, end_limit;
2591 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2593 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2597 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2598 vma->vm_end >= vma->vm_mm->start_brk) ||
2599 (vma->vm_start <= vma->vm_mm->start_stack &&
2600 vma->vm_end >= vma->vm_mm->start_stack);
2602 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2603 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2604 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2605 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2606 /* First range that starts after the fault address */
2607 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2609 end_limit = min(end_limit, node->start);
2610 /* Last range that ends before the fault address */
2611 node = container_of(rb_prev(&node->rb),
2612 struct interval_tree_node, rb);
2614 /* Last range must end before addr because
2615 * there was no range after addr
2617 node = container_of(rb_last(&p->svms.objects.rb_root),
2618 struct interval_tree_node, rb);
2621 if (node->last >= addr) {
2622 WARN(1, "Overlap with prev node and page fault addr\n");
2625 start_limit = max(start_limit, node->last + 1);
2628 *start = start_limit;
2629 *last = end_limit - 1;
2631 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2632 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2633 *start, *last, *is_heap_stack);
2639 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2640 uint64_t *bo_s, uint64_t *bo_l)
2642 struct amdgpu_bo_va_mapping *mapping;
2643 struct interval_tree_node *node;
2644 struct amdgpu_bo *bo = NULL;
2645 unsigned long userptr;
2649 for (i = 0; i < p->n_pdds; i++) {
2650 struct amdgpu_vm *vm;
2652 if (!p->pdds[i]->drm_priv)
2655 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2656 r = amdgpu_bo_reserve(vm->root.bo, false);
2660 /* Check userptr by searching entire vm->va interval tree */
2661 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2663 mapping = container_of((struct rb_node *)node,
2664 struct amdgpu_bo_va_mapping, rb);
2665 bo = mapping->bo_va->base.bo;
2667 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2668 start << PAGE_SHIFT,
2671 node = interval_tree_iter_next(node, 0, ~0ULL);
2675 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2678 *bo_s = userptr >> PAGE_SHIFT;
2679 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2681 amdgpu_bo_unreserve(vm->root.bo);
2684 amdgpu_bo_unreserve(vm->root.bo);
2690 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2691 struct kfd_process *p,
2692 struct mm_struct *mm,
2695 struct svm_range *prange = NULL;
2696 unsigned long start, last;
2697 uint32_t gpuid, gpuidx;
2703 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2707 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2708 if (r != -EADDRINUSE)
2709 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2711 if (r == -EADDRINUSE) {
2712 if (addr >= bo_s && addr <= bo_l)
2715 /* Create one page svm range if 2MB range overlapping */
2720 prange = svm_range_new(&p->svms, start, last, true);
2722 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2725 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2726 pr_debug("failed to get gpuid from kgd\n");
2727 svm_range_free(prange, true);
2732 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2734 svm_range_add_to_svms(prange);
2735 svm_range_add_notifier_locked(mm, prange);
2740 /* svm_range_skip_recover - decide if prange can be recovered
2741 * @prange: svm range structure
2743 * GPU vm retry fault handle skip recover the range for cases:
2744 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2745 * deferred list work will drain the stale fault before free the prange.
2746 * 2. prange is on deferred list to add interval notifier after split, or
2747 * 3. prange is child range, it is split from parent prange, recover later
2748 * after interval notifier is added.
2750 * Return: true to skip recover, false to recover
2752 static bool svm_range_skip_recover(struct svm_range *prange)
2754 struct svm_range_list *svms = prange->svms;
2756 spin_lock(&svms->deferred_list_lock);
2757 if (list_empty(&prange->deferred_list) &&
2758 list_empty(&prange->child_list)) {
2759 spin_unlock(&svms->deferred_list_lock);
2762 spin_unlock(&svms->deferred_list_lock);
2764 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2765 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2766 svms, prange, prange->start, prange->last);
2769 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2770 prange->work_item.op == SVM_OP_ADD_RANGE) {
2771 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2772 svms, prange, prange->start, prange->last);
2779 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2782 struct kfd_process_device *pdd;
2784 /* fault is on different page of same range
2785 * or fault is skipped to recover later
2786 * or fault is on invalid virtual address
2788 if (gpuidx == MAX_GPU_INSTANCE) {
2792 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2797 /* fault is recovered
2798 * or fault cannot recover because GPU no access on the range
2800 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2802 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2806 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2808 unsigned long requested = VM_READ;
2811 requested |= VM_WRITE;
2813 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2815 return (vma->vm_flags & requested) == requested;
2819 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2820 uint32_t vmid, uint32_t node_id,
2821 uint64_t addr, bool write_fault)
2823 struct mm_struct *mm = NULL;
2824 struct svm_range_list *svms;
2825 struct svm_range *prange;
2826 struct kfd_process *p;
2827 ktime_t timestamp = ktime_get_boottime();
2828 struct kfd_node *node;
2830 int32_t gpuidx = MAX_GPU_INSTANCE;
2831 bool write_locked = false;
2832 struct vm_area_struct *vma;
2833 bool migration = false;
2836 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2837 pr_debug("device does not support SVM\n");
2841 p = kfd_lookup_process_by_pasid(pasid);
2843 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2848 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2850 if (atomic_read(&svms->drain_pagefaults)) {
2851 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2856 if (!p->xnack_enabled) {
2857 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2862 /* p->lead_thread is available as kfd_process_wq_release flush the work
2863 * before releasing task ref.
2865 mm = get_task_mm(p->lead_thread);
2867 pr_debug("svms 0x%p failed to get mm\n", svms);
2872 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2874 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2881 mutex_lock(&svms->lock);
2882 prange = svm_range_from_addr(svms, addr, NULL);
2884 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2886 if (!write_locked) {
2887 /* Need the write lock to create new range with MMU notifier.
2888 * Also flush pending deferred work to make sure the interval
2889 * tree is up to date before we add a new range
2891 mutex_unlock(&svms->lock);
2892 mmap_read_unlock(mm);
2893 mmap_write_lock(mm);
2894 write_locked = true;
2895 goto retry_write_locked;
2897 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2899 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2901 mmap_write_downgrade(mm);
2903 goto out_unlock_svms;
2907 mmap_write_downgrade(mm);
2909 mutex_lock(&prange->migrate_mutex);
2911 if (svm_range_skip_recover(prange)) {
2912 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
2914 goto out_unlock_range;
2917 /* skip duplicate vm fault on different pages of same range */
2918 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2919 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2920 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2921 svms, prange->start, prange->last);
2923 goto out_unlock_range;
2926 /* __do_munmap removed VMA, return success as we are handling stale
2929 vma = vma_lookup(mm, addr << PAGE_SHIFT);
2931 pr_debug("address 0x%llx VMA is removed\n", addr);
2933 goto out_unlock_range;
2936 if (!svm_fault_allowed(vma, write_fault)) {
2937 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2938 write_fault ? "write" : "read");
2940 goto out_unlock_range;
2943 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
2944 if (best_loc == -1) {
2945 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2946 svms, prange->start, prange->last);
2948 goto out_unlock_range;
2951 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2952 svms, prange->start, prange->last, best_loc,
2953 prange->actual_loc);
2955 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
2956 write_fault, timestamp);
2958 if (prange->actual_loc != best_loc) {
2961 r = svm_migrate_to_vram(prange, best_loc, mm,
2962 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2964 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2966 /* Fallback to system memory if migration to
2969 if (prange->actual_loc)
2970 r = svm_migrate_vram_to_ram(prange, mm,
2971 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2977 r = svm_migrate_vram_to_ram(prange, mm,
2978 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2982 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2983 r, svms, prange->start, prange->last);
2984 goto out_unlock_range;
2988 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
2990 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2991 r, svms, prange->start, prange->last);
2993 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
2997 mutex_unlock(&prange->migrate_mutex);
2999 mutex_unlock(&svms->lock);
3000 mmap_read_unlock(mm);
3002 svm_range_count_fault(node, p, gpuidx);
3006 kfd_unref_process(p);
3009 pr_debug("recover vm fault later\n");
3010 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3017 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3019 struct svm_range *prange, *pchild;
3020 uint64_t reserved_size = 0;
3024 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3026 mutex_lock(&p->svms.lock);
3028 list_for_each_entry(prange, &p->svms.list, list) {
3029 svm_range_lock(prange);
3030 list_for_each_entry(pchild, &prange->child_list, child_list) {
3031 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3032 if (xnack_enabled) {
3033 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3034 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3036 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3037 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3040 reserved_size += size;
3044 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3045 if (xnack_enabled) {
3046 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3047 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3049 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3050 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3053 reserved_size += size;
3056 svm_range_unlock(prange);
3062 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3063 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3065 /* Change xnack mode must be inside svms lock, to avoid race with
3066 * svm_range_deferred_list_work unreserve memory in parallel.
3068 p->xnack_enabled = xnack_enabled;
3070 mutex_unlock(&p->svms.lock);
3074 void svm_range_list_fini(struct kfd_process *p)
3076 struct svm_range *prange;
3077 struct svm_range *next;
3079 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3081 cancel_delayed_work_sync(&p->svms.restore_work);
3083 /* Ensure list work is finished before process is destroyed */
3084 flush_work(&p->svms.deferred_list_work);
3087 * Ensure no retry fault comes in afterwards, as page fault handler will
3088 * not find kfd process and take mm lock to recover fault.
3090 atomic_inc(&p->svms.drain_pagefaults);
3091 svm_range_drain_retry_fault(&p->svms);
3093 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3094 svm_range_unlink(prange);
3095 svm_range_remove_notifier(prange);
3096 svm_range_free(prange, true);
3099 mutex_destroy(&p->svms.lock);
3101 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3104 int svm_range_list_init(struct kfd_process *p)
3106 struct svm_range_list *svms = &p->svms;
3109 svms->objects = RB_ROOT_CACHED;
3110 mutex_init(&svms->lock);
3111 INIT_LIST_HEAD(&svms->list);
3112 atomic_set(&svms->evicted_ranges, 0);
3113 atomic_set(&svms->drain_pagefaults, 0);
3114 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3115 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3116 INIT_LIST_HEAD(&svms->deferred_range_list);
3117 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3118 spin_lock_init(&svms->deferred_list_lock);
3120 for (i = 0; i < p->n_pdds; i++)
3121 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3122 bitmap_set(svms->bitmap_supported, i, 1);
3128 * svm_range_check_vm - check if virtual address range mapped already
3129 * @p: current kfd_process
3130 * @start: range start address, in pages
3131 * @last: range last address, in pages
3132 * @bo_s: mapping start address in pages if address range already mapped
3133 * @bo_l: mapping last address in pages if address range already mapped
3135 * The purpose is to avoid virtual address ranges already allocated by
3136 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3137 * It looks for each pdd in the kfd_process.
3139 * Context: Process context
3141 * Return 0 - OK, if the range is not mapped.
3142 * Otherwise error code:
3143 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3144 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3145 * a signal. Release all buffer reservations and return to user-space.
3148 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3149 uint64_t *bo_s, uint64_t *bo_l)
3151 struct amdgpu_bo_va_mapping *mapping;
3152 struct interval_tree_node *node;
3156 for (i = 0; i < p->n_pdds; i++) {
3157 struct amdgpu_vm *vm;
3159 if (!p->pdds[i]->drm_priv)
3162 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3163 r = amdgpu_bo_reserve(vm->root.bo, false);
3167 node = interval_tree_iter_first(&vm->va, start, last);
3169 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3171 mapping = container_of((struct rb_node *)node,
3172 struct amdgpu_bo_va_mapping, rb);
3174 *bo_s = mapping->start;
3175 *bo_l = mapping->last;
3177 amdgpu_bo_unreserve(vm->root.bo);
3180 amdgpu_bo_unreserve(vm->root.bo);
3187 * svm_range_is_valid - check if virtual address range is valid
3188 * @p: current kfd_process
3189 * @start: range start address, in pages
3190 * @size: range size, in pages
3192 * Valid virtual address range means it belongs to one or more VMAs
3194 * Context: Process context
3197 * 0 - OK, otherwise error code
3200 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3202 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3203 struct vm_area_struct *vma;
3205 unsigned long start_unchg = start;
3207 start <<= PAGE_SHIFT;
3208 end = start + (size << PAGE_SHIFT);
3210 vma = vma_lookup(p->mm, start);
3211 if (!vma || (vma->vm_flags & device_vma))
3213 start = min(end, vma->vm_end);
3214 } while (start < end);
3216 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3221 * svm_range_best_prefetch_location - decide the best prefetch location
3222 * @prange: svm range structure
3225 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3226 * can be CPU or GPU.
3228 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3229 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3230 * the best prefetch location is always CPU, because GPU can not have coherent
3231 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3234 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3235 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3237 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3238 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3239 * prefetch location is always CPU.
3241 * Context: Process context
3244 * 0 for CPU or GPU id
3247 svm_range_best_prefetch_location(struct svm_range *prange)
3249 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3250 uint32_t best_loc = prange->prefetch_loc;
3251 struct kfd_process_device *pdd;
3252 struct kfd_node *bo_node;
3253 struct kfd_process *p;
3256 p = container_of(prange->svms, struct kfd_process, svms);
3258 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3261 bo_node = svm_range_get_node_by_id(prange, best_loc);
3263 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3268 if (bo_node->adev->gmc.is_app_apu) {
3273 if (p->xnack_enabled)
3274 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3276 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3279 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3280 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3282 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3286 if (pdd->dev->adev == bo_node->adev)
3289 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3296 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3297 p->xnack_enabled, &p->svms, prange->start, prange->last,
3303 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3304 * @mm: current process mm_struct
3305 * @prange: svm range structure
3306 * @migrated: output, true if migration is triggered
3308 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3310 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3313 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3315 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3316 * stops all queues, schedule restore work
3317 * 2. svm_range_restore_work wait for migration is done by
3318 * a. svm_range_validate_vram takes prange->migrate_mutex
3319 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3320 * 3. restore work update mappings of GPU, resume all queues.
3322 * Context: Process context
3325 * 0 - OK, otherwise - error code of migration
3328 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3335 best_loc = svm_range_best_prefetch_location(prange);
3337 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3338 best_loc == prange->actual_loc)
3342 r = svm_migrate_vram_to_ram(prange, mm,
3343 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3348 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3354 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3359 if (dma_fence_is_signaled(&fence->base))
3362 if (fence->svm_bo) {
3363 WRITE_ONCE(fence->svm_bo->evicting, 1);
3364 schedule_work(&fence->svm_bo->eviction_work);
3370 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3372 struct svm_range_bo *svm_bo;
3373 struct mm_struct *mm;
3376 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3377 if (!svm_bo_ref_unless_zero(svm_bo))
3378 return; /* svm_bo was freed while eviction was pending */
3380 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3381 mm = svm_bo->eviction_fence->mm;
3383 svm_range_bo_unref(svm_bo);
3388 spin_lock(&svm_bo->list_lock);
3389 while (!list_empty(&svm_bo->range_list) && !r) {
3390 struct svm_range *prange =
3391 list_first_entry(&svm_bo->range_list,
3392 struct svm_range, svm_bo_list);
3395 list_del_init(&prange->svm_bo_list);
3396 spin_unlock(&svm_bo->list_lock);
3398 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3399 prange->start, prange->last);
3401 mutex_lock(&prange->migrate_mutex);
3403 r = svm_migrate_vram_to_ram(prange, mm,
3404 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3405 } while (!r && prange->actual_loc && --retries);
3407 if (!r && prange->actual_loc)
3408 pr_info_once("Migration failed during eviction");
3410 if (!prange->actual_loc) {
3411 mutex_lock(&prange->lock);
3412 prange->svm_bo = NULL;
3413 mutex_unlock(&prange->lock);
3415 mutex_unlock(&prange->migrate_mutex);
3417 spin_lock(&svm_bo->list_lock);
3419 spin_unlock(&svm_bo->list_lock);
3420 mmap_read_unlock(mm);
3423 dma_fence_signal(&svm_bo->eviction_fence->base);
3425 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3426 * has been called in svm_migrate_vram_to_ram
3428 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3429 svm_range_bo_unref(svm_bo);
3433 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3434 uint64_t start, uint64_t size, uint32_t nattr,
3435 struct kfd_ioctl_svm_attribute *attrs)
3437 struct amdkfd_process_info *process_info = p->kgd_process_info;
3438 struct list_head update_list;
3439 struct list_head insert_list;
3440 struct list_head remove_list;
3441 struct svm_range_list *svms;
3442 struct svm_range *prange;
3443 struct svm_range *next;
3444 bool update_mapping = false;
3448 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3449 p->pasid, &p->svms, start, start + size - 1, size);
3451 r = svm_range_check_attr(p, nattr, attrs);
3457 mutex_lock(&process_info->lock);
3459 svm_range_list_lock_and_flush_work(svms, mm);
3461 r = svm_range_is_valid(p, start, size);
3463 pr_debug("invalid range r=%d\n", r);
3464 mmap_write_unlock(mm);
3468 mutex_lock(&svms->lock);
3470 /* Add new range and split existing ranges as needed */
3471 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3472 &insert_list, &remove_list);
3474 mutex_unlock(&svms->lock);
3475 mmap_write_unlock(mm);
3478 /* Apply changes as a transaction */
3479 list_for_each_entry_safe(prange, next, &insert_list, list) {
3480 svm_range_add_to_svms(prange);
3481 svm_range_add_notifier_locked(mm, prange);
3483 list_for_each_entry(prange, &update_list, update_list) {
3484 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3485 /* TODO: unmap ranges from GPU that lost access */
3487 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3488 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3489 prange->svms, prange, prange->start,
3491 svm_range_unlink(prange);
3492 svm_range_remove_notifier(prange);
3493 svm_range_free(prange, false);
3496 mmap_write_downgrade(mm);
3497 /* Trigger migrations and revalidate and map to GPUs as needed. If
3498 * this fails we may be left with partially completed actions. There
3499 * is no clean way of rolling back to the previous state in such a
3500 * case because the rollback wouldn't be guaranteed to work either.
3502 list_for_each_entry(prange, &update_list, update_list) {
3505 mutex_lock(&prange->migrate_mutex);
3507 r = svm_range_trigger_migration(mm, prange, &migrated);
3509 goto out_unlock_range;
3511 if (migrated && (!p->xnack_enabled ||
3512 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3513 prange->mapped_to_gpu) {
3514 pr_debug("restore_work will update mappings of GPUs\n");
3515 mutex_unlock(&prange->migrate_mutex);
3519 if (!migrated && !update_mapping) {
3520 mutex_unlock(&prange->migrate_mutex);
3524 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3526 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3527 true, true, flush_tlb);
3529 pr_debug("failed %d to map svm range\n", r);
3532 mutex_unlock(&prange->migrate_mutex);
3537 svm_range_debug_dump(svms);
3539 mutex_unlock(&svms->lock);
3540 mmap_read_unlock(mm);
3542 mutex_unlock(&process_info->lock);
3544 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3545 &p->svms, start, start + size - 1, r);
3551 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3552 uint64_t start, uint64_t size, uint32_t nattr,
3553 struct kfd_ioctl_svm_attribute *attrs)
3555 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3556 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3557 bool get_preferred_loc = false;
3558 bool get_prefetch_loc = false;
3559 bool get_granularity = false;
3560 bool get_accessible = false;
3561 bool get_flags = false;
3562 uint64_t last = start + size - 1UL;
3563 uint8_t granularity = 0xff;
3564 struct interval_tree_node *node;
3565 struct svm_range_list *svms;
3566 struct svm_range *prange;
3567 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3568 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3569 uint32_t flags_and = 0xffffffff;
3570 uint32_t flags_or = 0;
3575 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3576 start + size - 1, nattr);
3578 /* Flush pending deferred work to avoid racing with deferred actions from
3579 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3580 * can still race with get_attr because we don't hold the mmap lock. But that
3581 * would be a race condition in the application anyway, and undefined
3582 * behaviour is acceptable in that case.
3584 flush_work(&p->svms.deferred_list_work);
3587 r = svm_range_is_valid(p, start, size);
3588 mmap_read_unlock(mm);
3590 pr_debug("invalid range r=%d\n", r);
3594 for (i = 0; i < nattr; i++) {
3595 switch (attrs[i].type) {
3596 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3597 get_preferred_loc = true;
3599 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3600 get_prefetch_loc = true;
3602 case KFD_IOCTL_SVM_ATTR_ACCESS:
3603 get_accessible = true;
3605 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3606 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3609 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3610 get_granularity = true;
3612 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3613 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3616 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3623 mutex_lock(&svms->lock);
3625 node = interval_tree_iter_first(&svms->objects, start, last);
3627 pr_debug("range attrs not found return default values\n");
3628 svm_range_set_default_attributes(&location, &prefetch_loc,
3629 &granularity, &flags_and);
3630 flags_or = flags_and;
3631 if (p->xnack_enabled)
3632 bitmap_copy(bitmap_access, svms->bitmap_supported,
3635 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3636 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3639 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3640 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3643 struct interval_tree_node *next;
3645 prange = container_of(node, struct svm_range, it_node);
3646 next = interval_tree_iter_next(node, start, last);
3648 if (get_preferred_loc) {
3649 if (prange->preferred_loc ==
3650 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3651 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3652 location != prange->preferred_loc)) {
3653 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3654 get_preferred_loc = false;
3656 location = prange->preferred_loc;
3659 if (get_prefetch_loc) {
3660 if (prange->prefetch_loc ==
3661 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3662 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3663 prefetch_loc != prange->prefetch_loc)) {
3664 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3665 get_prefetch_loc = false;
3667 prefetch_loc = prange->prefetch_loc;
3670 if (get_accessible) {
3671 bitmap_and(bitmap_access, bitmap_access,
3672 prange->bitmap_access, MAX_GPU_INSTANCE);
3673 bitmap_and(bitmap_aip, bitmap_aip,
3674 prange->bitmap_aip, MAX_GPU_INSTANCE);
3677 flags_and &= prange->flags;
3678 flags_or |= prange->flags;
3681 if (get_granularity && prange->granularity < granularity)
3682 granularity = prange->granularity;
3687 mutex_unlock(&svms->lock);
3689 for (i = 0; i < nattr; i++) {
3690 switch (attrs[i].type) {
3691 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3692 attrs[i].value = location;
3694 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3695 attrs[i].value = prefetch_loc;
3697 case KFD_IOCTL_SVM_ATTR_ACCESS:
3698 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3701 pr_debug("invalid gpuid %x\n", attrs[i].value);
3704 if (test_bit(gpuidx, bitmap_access))
3705 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3706 else if (test_bit(gpuidx, bitmap_aip))
3708 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3710 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3712 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3713 attrs[i].value = flags_and;
3715 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3716 attrs[i].value = ~flags_or;
3718 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3719 attrs[i].value = (uint32_t)granularity;
3727 int kfd_criu_resume_svm(struct kfd_process *p)
3729 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3730 int nattr_common = 4, nattr_accessibility = 1;
3731 struct criu_svm_metadata *criu_svm_md = NULL;
3732 struct svm_range_list *svms = &p->svms;
3733 struct criu_svm_metadata *next = NULL;
3734 uint32_t set_flags = 0xffffffff;
3735 int i, j, num_attrs, ret = 0;
3736 uint64_t set_attr_size;
3737 struct mm_struct *mm;
3739 if (list_empty(&svms->criu_svm_metadata_list)) {
3740 pr_debug("No SVM data from CRIU restore stage 2\n");
3744 mm = get_task_mm(p->lead_thread);
3746 pr_err("failed to get mm for the target process\n");
3750 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3753 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3754 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3755 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3757 for (j = 0; j < num_attrs; j++) {
3758 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3759 i, j, criu_svm_md->data.attrs[j].type,
3760 i, j, criu_svm_md->data.attrs[j].value);
3761 switch (criu_svm_md->data.attrs[j].type) {
3762 /* During Checkpoint operation, the query for
3763 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3764 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3765 * not used by the range which was checkpointed. Care
3766 * must be taken to not restore with an invalid value
3767 * otherwise the gpuidx value will be invalid and
3768 * set_attr would eventually fail so just replace those
3769 * with another dummy attribute such as
3770 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3772 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3773 if (criu_svm_md->data.attrs[j].value ==
3774 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3775 criu_svm_md->data.attrs[j].type =
3776 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3777 criu_svm_md->data.attrs[j].value = 0;
3780 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3781 set_flags = criu_svm_md->data.attrs[j].value;
3788 /* CLR_FLAGS is not available via get_attr during checkpoint but
3789 * it needs to be inserted before restoring the ranges so
3790 * allocate extra space for it before calling set_attr
3792 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3794 set_attr_new = krealloc(set_attr, set_attr_size,
3796 if (!set_attr_new) {
3800 set_attr = set_attr_new;
3802 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3803 sizeof(struct kfd_ioctl_svm_attribute));
3804 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3805 set_attr[num_attrs].value = ~set_flags;
3807 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3808 criu_svm_md->data.size, num_attrs + 1,
3811 pr_err("CRIU: failed to set range attributes\n");
3819 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3820 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3821 criu_svm_md->data.start_addr);
3830 int kfd_criu_restore_svm(struct kfd_process *p,
3831 uint8_t __user *user_priv_ptr,
3832 uint64_t *priv_data_offset,
3833 uint64_t max_priv_data_size)
3835 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3836 int nattr_common = 4, nattr_accessibility = 1;
3837 struct criu_svm_metadata *criu_svm_md = NULL;
3838 struct svm_range_list *svms = &p->svms;
3839 uint32_t num_devices;
3842 num_devices = p->n_pdds;
3843 /* Handle one SVM range object at a time, also the number of gpus are
3844 * assumed to be same on the restore node, checking must be done while
3845 * evaluating the topology earlier
3848 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3849 (nattr_common + nattr_accessibility * num_devices);
3850 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3852 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3855 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3857 pr_err("failed to allocate memory to store svm metadata\n");
3860 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3865 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3866 svm_priv_data_size);
3871 *priv_data_offset += svm_priv_data_size;
3873 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3883 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3884 uint64_t *svm_priv_data_size)
3886 uint64_t total_size, accessibility_size, common_attr_size;
3887 int nattr_common = 4, nattr_accessibility = 1;
3888 int num_devices = p->n_pdds;
3889 struct svm_range_list *svms;
3890 struct svm_range *prange;
3893 *svm_priv_data_size = 0;
3899 mutex_lock(&svms->lock);
3900 list_for_each_entry(prange, &svms->list, list) {
3901 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3902 prange, prange->start, prange->npages,
3903 prange->start + prange->npages - 1);
3906 mutex_unlock(&svms->lock);
3908 *num_svm_ranges = count;
3909 /* Only the accessbility attributes need to be queried for all the gpus
3910 * individually, remaining ones are spanned across the entire process
3911 * regardless of the various gpu nodes. Of the remaining attributes,
3912 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3914 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3915 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3916 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3917 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3919 * ** ACCESSBILITY ATTRIBUTES **
3920 * (Considered as one, type is altered during query, value is gpuid)
3921 * KFD_IOCTL_SVM_ATTR_ACCESS
3922 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3923 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3925 if (*num_svm_ranges > 0) {
3926 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3928 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3929 nattr_accessibility * num_devices;
3931 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3932 common_attr_size + accessibility_size;
3934 *svm_priv_data_size = *num_svm_ranges * total_size;
3937 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3938 *svm_priv_data_size);
3942 int kfd_criu_checkpoint_svm(struct kfd_process *p,
3943 uint8_t __user *user_priv_data,
3944 uint64_t *priv_data_offset)
3946 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3947 struct kfd_ioctl_svm_attribute *query_attr = NULL;
3948 uint64_t svm_priv_data_size, query_attr_size = 0;
3949 int index, nattr_common = 4, ret = 0;
3950 struct svm_range_list *svms;
3951 int num_devices = p->n_pdds;
3952 struct svm_range *prange;
3953 struct mm_struct *mm;
3959 mm = get_task_mm(p->lead_thread);
3961 pr_err("failed to get mm for the target process\n");
3965 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3966 (nattr_common + num_devices);
3968 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
3974 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
3975 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
3976 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3977 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
3979 for (index = 0; index < num_devices; index++) {
3980 struct kfd_process_device *pdd = p->pdds[index];
3982 query_attr[index + nattr_common].type =
3983 KFD_IOCTL_SVM_ATTR_ACCESS;
3984 query_attr[index + nattr_common].value = pdd->user_gpu_id;
3987 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
3989 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
3996 list_for_each_entry(prange, &svms->list, list) {
3998 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
3999 svm_priv->start_addr = prange->start;
4000 svm_priv->size = prange->npages;
4001 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4002 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4003 prange, prange->start, prange->npages,
4004 prange->start + prange->npages - 1,
4005 prange->npages * PAGE_SIZE);
4007 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4009 (nattr_common + num_devices),
4012 pr_err("CRIU: failed to obtain range attributes\n");
4016 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4017 svm_priv_data_size)) {
4018 pr_err("Failed to copy svm priv to user\n");
4023 *priv_data_offset += svm_priv_data_size;
4038 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4039 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4041 struct mm_struct *mm = current->mm;
4044 start >>= PAGE_SHIFT;
4045 size >>= PAGE_SHIFT;
4048 case KFD_IOCTL_SVM_OP_SET_ATTR:
4049 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4051 case KFD_IOCTL_SVM_OP_GET_ATTR:
4052 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);