1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <drm/ttm/ttm_tt.h>
27 #include <drm/drm_exec.h>
29 #include "amdgpu_sync.h"
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_hmm.h"
34 #include "amdgpu_xgmi.h"
37 #include "kfd_migrate.h"
38 #include "kfd_smi_events.h"
43 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47 /* Long enough to ensure no retry fault comes after svm range is restored and
48 * page table is updated.
50 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
52 /* Giant svm range split into smaller ranges based on this, it is decided using
53 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
56 static uint64_t max_svm_range_pages;
58 struct criu_svm_metadata {
59 struct list_head list;
60 struct kfd_criu_svm_range_priv_data data;
63 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
65 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
66 const struct mmu_notifier_range *range,
67 unsigned long cur_seq);
69 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
70 uint64_t *bo_s, uint64_t *bo_l);
71 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
72 .invalidate = svm_range_cpu_invalidate_pagetables,
76 * svm_range_unlink - unlink svm_range from lists and interval tree
77 * @prange: svm range structure to be removed
79 * Remove the svm_range from the svms and svm_bo lists and the svms
82 * Context: The caller must hold svms->lock
84 static void svm_range_unlink(struct svm_range *prange)
86 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
87 prange, prange->start, prange->last);
90 spin_lock(&prange->svm_bo->list_lock);
91 list_del(&prange->svm_bo_list);
92 spin_unlock(&prange->svm_bo->list_lock);
95 list_del(&prange->list);
96 if (prange->it_node.start != 0 && prange->it_node.last != 0)
97 interval_tree_remove(&prange->it_node, &prange->svms->objects);
101 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
103 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
104 prange, prange->start, prange->last);
106 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
107 prange->start << PAGE_SHIFT,
108 prange->npages << PAGE_SHIFT,
113 * svm_range_add_to_svms - add svm range to svms
114 * @prange: svm range structure to be added
116 * Add the svm range to svms interval tree and link list
118 * Context: The caller must hold svms->lock
120 static void svm_range_add_to_svms(struct svm_range *prange)
122 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
123 prange, prange->start, prange->last);
125 list_move_tail(&prange->list, &prange->svms->list);
126 prange->it_node.start = prange->start;
127 prange->it_node.last = prange->last;
128 interval_tree_insert(&prange->it_node, &prange->svms->objects);
131 static void svm_range_remove_notifier(struct svm_range *prange)
133 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
134 prange->svms, prange,
135 prange->notifier.interval_tree.start >> PAGE_SHIFT,
136 prange->notifier.interval_tree.last >> PAGE_SHIFT);
138 if (prange->notifier.interval_tree.start != 0 &&
139 prange->notifier.interval_tree.last != 0)
140 mmu_interval_notifier_remove(&prange->notifier);
144 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
146 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
147 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
151 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
152 unsigned long offset, unsigned long npages,
153 unsigned long *hmm_pfns, uint32_t gpuidx)
155 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
156 dma_addr_t *addr = prange->dma_addr[gpuidx];
157 struct device *dev = adev->dev;
162 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
165 prange->dma_addr[gpuidx] = addr;
169 for (i = 0; i < npages; i++) {
170 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
171 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
173 page = hmm_pfn_to_page(hmm_pfns[i]);
174 if (is_zone_device_page(page)) {
175 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
177 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
178 bo_adev->vm_manager.vram_base_offset -
179 bo_adev->kfd.pgmap.range.start;
180 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
181 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
184 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
185 r = dma_mapping_error(dev, addr[i]);
187 dev_err(dev, "failed %d dma_map_page\n", r);
190 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
191 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
197 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
198 unsigned long offset, unsigned long npages,
199 unsigned long *hmm_pfns)
201 struct kfd_process *p;
205 p = container_of(prange->svms, struct kfd_process, svms);
207 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
208 struct kfd_process_device *pdd;
210 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
211 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
213 pr_debug("failed to find device idx %d\n", gpuidx);
217 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
226 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
227 unsigned long offset, unsigned long npages)
229 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
235 for (i = offset; i < offset + npages; i++) {
236 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
238 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
239 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
244 void svm_range_free_dma_mappings(struct svm_range *prange)
246 struct kfd_process_device *pdd;
247 dma_addr_t *dma_addr;
249 struct kfd_process *p;
252 p = container_of(prange->svms, struct kfd_process, svms);
254 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
255 dma_addr = prange->dma_addr[gpuidx];
259 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
261 pr_debug("failed to find device idx %d\n", gpuidx);
264 dev = &pdd->dev->adev->pdev->dev;
265 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
267 prange->dma_addr[gpuidx] = NULL;
271 static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
273 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
274 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
276 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
277 prange->start, prange->last);
279 svm_range_vram_node_free(prange);
280 svm_range_free_dma_mappings(prange);
282 if (update_mem_usage && !p->xnack_enabled) {
283 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
284 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
285 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
287 mutex_destroy(&prange->lock);
288 mutex_destroy(&prange->migrate_mutex);
293 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
294 uint8_t *granularity, uint32_t *flags)
296 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
297 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
300 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
304 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
305 uint64_t last, bool update_mem_usage)
307 uint64_t size = last - start + 1;
308 struct svm_range *prange;
309 struct kfd_process *p;
311 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
315 p = container_of(svms, struct kfd_process, svms);
316 if (!p->xnack_enabled && update_mem_usage &&
317 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
318 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
319 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
323 prange->npages = size;
325 prange->start = start;
327 INIT_LIST_HEAD(&prange->list);
328 INIT_LIST_HEAD(&prange->update_list);
329 INIT_LIST_HEAD(&prange->svm_bo_list);
330 INIT_LIST_HEAD(&prange->deferred_list);
331 INIT_LIST_HEAD(&prange->child_list);
332 atomic_set(&prange->invalid, 0);
333 prange->validate_timestamp = 0;
334 mutex_init(&prange->migrate_mutex);
335 mutex_init(&prange->lock);
337 if (p->xnack_enabled)
338 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
341 svm_range_set_default_attributes(&prange->preferred_loc,
342 &prange->prefetch_loc,
343 &prange->granularity, &prange->flags);
345 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
350 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
352 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
358 static void svm_range_bo_release(struct kref *kref)
360 struct svm_range_bo *svm_bo;
362 svm_bo = container_of(kref, struct svm_range_bo, kref);
363 pr_debug("svm_bo 0x%p\n", svm_bo);
365 spin_lock(&svm_bo->list_lock);
366 while (!list_empty(&svm_bo->range_list)) {
367 struct svm_range *prange =
368 list_first_entry(&svm_bo->range_list,
369 struct svm_range, svm_bo_list);
370 /* list_del_init tells a concurrent svm_range_vram_node_new when
371 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
373 list_del_init(&prange->svm_bo_list);
374 spin_unlock(&svm_bo->list_lock);
376 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
377 prange->start, prange->last);
378 mutex_lock(&prange->lock);
379 prange->svm_bo = NULL;
380 mutex_unlock(&prange->lock);
382 spin_lock(&svm_bo->list_lock);
384 spin_unlock(&svm_bo->list_lock);
385 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
386 /* We're not in the eviction worker.
387 * Signal the fence and synchronize with any
388 * pending eviction work.
390 dma_fence_signal(&svm_bo->eviction_fence->base);
391 cancel_work_sync(&svm_bo->eviction_work);
393 dma_fence_put(&svm_bo->eviction_fence->base);
394 amdgpu_bo_unref(&svm_bo->bo);
398 static void svm_range_bo_wq_release(struct work_struct *work)
400 struct svm_range_bo *svm_bo;
402 svm_bo = container_of(work, struct svm_range_bo, release_work);
403 svm_range_bo_release(&svm_bo->kref);
406 static void svm_range_bo_release_async(struct kref *kref)
408 struct svm_range_bo *svm_bo;
410 svm_bo = container_of(kref, struct svm_range_bo, kref);
411 pr_debug("svm_bo 0x%p\n", svm_bo);
412 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
413 schedule_work(&svm_bo->release_work);
416 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
418 kref_put(&svm_bo->kref, svm_range_bo_release_async);
421 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
424 kref_put(&svm_bo->kref, svm_range_bo_release);
428 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
430 mutex_lock(&prange->lock);
431 if (!prange->svm_bo) {
432 mutex_unlock(&prange->lock);
435 if (prange->ttm_res) {
436 /* We still have a reference, all is well */
437 mutex_unlock(&prange->lock);
440 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
442 * Migrate from GPU to GPU, remove range from source svm_bo->node
443 * range list, and return false to allocate svm_bo from destination
446 if (prange->svm_bo->node != node) {
447 mutex_unlock(&prange->lock);
449 spin_lock(&prange->svm_bo->list_lock);
450 list_del_init(&prange->svm_bo_list);
451 spin_unlock(&prange->svm_bo->list_lock);
453 svm_range_bo_unref(prange->svm_bo);
456 if (READ_ONCE(prange->svm_bo->evicting)) {
458 struct svm_range_bo *svm_bo;
459 /* The BO is getting evicted,
460 * we need to get a new one
462 mutex_unlock(&prange->lock);
463 svm_bo = prange->svm_bo;
464 f = dma_fence_get(&svm_bo->eviction_fence->base);
465 svm_range_bo_unref(prange->svm_bo);
466 /* wait for the fence to avoid long spin-loop
467 * at list_empty_careful
469 dma_fence_wait(f, false);
472 /* The BO was still around and we got
473 * a new reference to it
475 mutex_unlock(&prange->lock);
476 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
477 prange->svms, prange->start, prange->last);
479 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
484 mutex_unlock(&prange->lock);
487 /* We need a new svm_bo. Spin-loop to wait for concurrent
488 * svm_range_bo_release to finish removing this range from
489 * its range list. After this, it is safe to reuse the
490 * svm_bo pointer and svm_bo_list head.
492 while (!list_empty_careful(&prange->svm_bo_list))
498 static struct svm_range_bo *svm_range_bo_new(void)
500 struct svm_range_bo *svm_bo;
502 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
506 kref_init(&svm_bo->kref);
507 INIT_LIST_HEAD(&svm_bo->range_list);
508 spin_lock_init(&svm_bo->list_lock);
514 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
517 struct amdgpu_bo_param bp;
518 struct svm_range_bo *svm_bo;
519 struct amdgpu_bo_user *ubo;
520 struct amdgpu_bo *bo;
521 struct kfd_process *p;
522 struct mm_struct *mm;
525 p = container_of(prange->svms, struct kfd_process, svms);
526 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
527 prange->start, prange->last);
529 if (svm_range_validate_svm_bo(node, prange))
532 svm_bo = svm_range_bo_new();
534 pr_debug("failed to alloc svm bo\n");
537 mm = get_task_mm(p->lead_thread);
539 pr_debug("failed to get mm\n");
544 svm_bo->eviction_fence =
545 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
549 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
550 svm_bo->evicting = 0;
551 memset(&bp, 0, sizeof(bp));
552 bp.size = prange->npages * PAGE_SIZE;
553 bp.byte_align = PAGE_SIZE;
554 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
555 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
556 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
557 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
558 bp.type = ttm_bo_type_device;
561 bp.xcp_id_plus1 = node->xcp->id + 1;
563 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
565 pr_debug("failed %d to create bo\n", r);
566 goto create_bo_failed;
570 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
571 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
572 bp.xcp_id_plus1 - 1);
574 r = amdgpu_bo_reserve(bo, true);
576 pr_debug("failed %d to reserve bo\n", r);
577 goto reserve_bo_failed;
581 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
583 pr_debug("failed %d to sync bo\n", r);
584 amdgpu_bo_unreserve(bo);
585 goto reserve_bo_failed;
589 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
591 pr_debug("failed %d to reserve bo\n", r);
592 amdgpu_bo_unreserve(bo);
593 goto reserve_bo_failed;
595 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
597 amdgpu_bo_unreserve(bo);
600 prange->svm_bo = svm_bo;
601 prange->ttm_res = bo->tbo.resource;
604 spin_lock(&svm_bo->list_lock);
605 list_add(&prange->svm_bo_list, &svm_bo->range_list);
606 spin_unlock(&svm_bo->list_lock);
611 amdgpu_bo_unref(&bo);
613 dma_fence_put(&svm_bo->eviction_fence->base);
615 prange->ttm_res = NULL;
620 void svm_range_vram_node_free(struct svm_range *prange)
622 svm_range_bo_unref(prange->svm_bo);
623 prange->ttm_res = NULL;
627 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
629 struct kfd_process *p;
630 struct kfd_process_device *pdd;
632 p = container_of(prange->svms, struct kfd_process, svms);
633 pdd = kfd_process_device_data_by_id(p, gpu_id);
635 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
642 struct kfd_process_device *
643 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
645 struct kfd_process *p;
647 p = container_of(prange->svms, struct kfd_process, svms);
649 return kfd_get_process_device_data(node, p);
652 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
654 struct ttm_operation_ctx ctx = { false, false };
656 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
658 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
662 svm_range_check_attr(struct kfd_process *p,
663 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
667 for (i = 0; i < nattr; i++) {
668 uint32_t val = attrs[i].value;
669 int gpuidx = MAX_GPU_INSTANCE;
671 switch (attrs[i].type) {
672 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
673 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
674 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
675 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
677 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
678 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
679 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
681 case KFD_IOCTL_SVM_ATTR_ACCESS:
682 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
683 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
684 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
686 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
688 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
690 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
693 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
698 pr_debug("no GPU 0x%x found\n", val);
700 } else if (gpuidx < MAX_GPU_INSTANCE &&
701 !test_bit(gpuidx, p->svms.bitmap_supported)) {
702 pr_debug("GPU 0x%x not supported\n", val);
711 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
712 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
713 bool *update_mapping)
718 for (i = 0; i < nattr; i++) {
719 switch (attrs[i].type) {
720 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
721 prange->preferred_loc = attrs[i].value;
723 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
724 prange->prefetch_loc = attrs[i].value;
726 case KFD_IOCTL_SVM_ATTR_ACCESS:
727 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
728 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
729 if (!p->xnack_enabled)
730 *update_mapping = true;
732 gpuidx = kfd_process_gpuidx_from_gpuid(p,
734 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
735 bitmap_clear(prange->bitmap_access, gpuidx, 1);
736 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
737 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
738 bitmap_set(prange->bitmap_access, gpuidx, 1);
739 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
741 bitmap_clear(prange->bitmap_access, gpuidx, 1);
742 bitmap_set(prange->bitmap_aip, gpuidx, 1);
745 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
746 *update_mapping = true;
747 prange->flags |= attrs[i].value;
749 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
750 *update_mapping = true;
751 prange->flags &= ~attrs[i].value;
753 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
754 prange->granularity = attrs[i].value;
757 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
763 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
764 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
769 for (i = 0; i < nattr; i++) {
770 switch (attrs[i].type) {
771 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
772 if (prange->preferred_loc != attrs[i].value)
775 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
776 /* Prefetch should always trigger a migration even
777 * if the value of the attribute didn't change.
780 case KFD_IOCTL_SVM_ATTR_ACCESS:
781 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
782 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
783 gpuidx = kfd_process_gpuidx_from_gpuid(p,
785 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
786 if (test_bit(gpuidx, prange->bitmap_access) ||
787 test_bit(gpuidx, prange->bitmap_aip))
789 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
790 if (!test_bit(gpuidx, prange->bitmap_access))
793 if (!test_bit(gpuidx, prange->bitmap_aip))
797 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
798 if ((prange->flags & attrs[i].value) != attrs[i].value)
801 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
802 if ((prange->flags & attrs[i].value) != 0)
805 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
806 if (prange->granularity != attrs[i].value)
810 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
814 return !prange->is_error_flag;
818 * svm_range_debug_dump - print all range information from svms
819 * @svms: svm range list header
821 * debug output svm range start, end, prefetch location from svms
822 * interval tree and link list
824 * Context: The caller must hold svms->lock
826 static void svm_range_debug_dump(struct svm_range_list *svms)
828 struct interval_tree_node *node;
829 struct svm_range *prange;
831 pr_debug("dump svms 0x%p list\n", svms);
832 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
834 list_for_each_entry(prange, &svms->list, list) {
835 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
836 prange, prange->start, prange->npages,
837 prange->start + prange->npages - 1,
841 pr_debug("dump svms 0x%p interval tree\n", svms);
842 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
843 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
845 prange = container_of(node, struct svm_range, it_node);
846 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
847 prange, prange->start, prange->npages,
848 prange->start + prange->npages - 1,
850 node = interval_tree_iter_next(node, 0, ~0ULL);
855 svm_range_split_array(void *ppnew, void *ppold, size_t size,
856 uint64_t old_start, uint64_t old_n,
857 uint64_t new_start, uint64_t new_n)
859 unsigned char *new, *old, *pold;
864 pold = *(unsigned char **)ppold;
868 new = kvmalloc_array(new_n, size, GFP_KERNEL);
872 d = (new_start - old_start) * size;
873 memcpy(new, pold + d, new_n * size);
875 old = kvmalloc_array(old_n, size, GFP_KERNEL);
881 d = (new_start == old_start) ? new_n * size : 0;
882 memcpy(old, pold + d, old_n * size);
885 *(void **)ppold = old;
886 *(void **)ppnew = new;
892 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
893 uint64_t start, uint64_t last)
895 uint64_t npages = last - start + 1;
898 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
899 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
900 sizeof(*old->dma_addr[i]), old->start,
901 npages, new->start, new->npages);
910 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
911 uint64_t start, uint64_t last)
913 uint64_t npages = last - start + 1;
915 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
916 new->svms, new, new->start, start, last);
918 if (new->start == old->start) {
919 new->offset = old->offset;
920 old->offset += new->npages;
922 new->offset = old->offset + npages;
925 new->svm_bo = svm_range_bo_ref(old->svm_bo);
926 new->ttm_res = old->ttm_res;
928 spin_lock(&new->svm_bo->list_lock);
929 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
930 spin_unlock(&new->svm_bo->list_lock);
936 * svm_range_split_adjust - split range and adjust
939 * @old: the old range
940 * @start: the old range adjust to start address in pages
941 * @last: the old range adjust to last address in pages
943 * Copy system memory dma_addr or vram ttm_res in old range to new
944 * range from new_start up to size new->npages, the remaining old range is from
948 * 0 - OK, -ENOMEM - out of memory
951 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
952 uint64_t start, uint64_t last)
956 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
957 new->svms, new->start, old->start, old->last, start, last);
959 if (new->start < old->start ||
960 new->last > old->last) {
961 WARN_ONCE(1, "invalid new range start or last\n");
965 r = svm_range_split_pages(new, old, start, last);
969 if (old->actual_loc && old->ttm_res) {
970 r = svm_range_split_nodes(new, old, start, last);
975 old->npages = last - start + 1;
978 new->flags = old->flags;
979 new->preferred_loc = old->preferred_loc;
980 new->prefetch_loc = old->prefetch_loc;
981 new->actual_loc = old->actual_loc;
982 new->granularity = old->granularity;
983 new->mapped_to_gpu = old->mapped_to_gpu;
984 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
985 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
991 * svm_range_split - split a range in 2 ranges
993 * @prange: the svm range to split
994 * @start: the remaining range start address in pages
995 * @last: the remaining range last address in pages
996 * @new: the result new range generated
999 * case 1: if start == prange->start
1000 * prange ==> prange[start, last]
1001 * new range [last + 1, prange->last]
1003 * case 2: if last == prange->last
1004 * prange ==> prange[start, last]
1005 * new range [prange->start, start - 1]
1008 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1011 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1012 struct svm_range **new)
1014 uint64_t old_start = prange->start;
1015 uint64_t old_last = prange->last;
1016 struct svm_range_list *svms;
1019 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1020 old_start, old_last, start, last);
1022 if (old_start != start && old_last != last)
1024 if (start < old_start || last > old_last)
1027 svms = prange->svms;
1028 if (old_start == start)
1029 *new = svm_range_new(svms, last + 1, old_last, false);
1031 *new = svm_range_new(svms, old_start, start - 1, false);
1035 r = svm_range_split_adjust(*new, prange, start, last);
1037 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1038 r, old_start, old_last, start, last);
1039 svm_range_free(*new, false);
1047 svm_range_split_tail(struct svm_range *prange,
1048 uint64_t new_last, struct list_head *insert_list)
1050 struct svm_range *tail;
1051 int r = svm_range_split(prange, prange->start, new_last, &tail);
1054 list_add(&tail->list, insert_list);
1059 svm_range_split_head(struct svm_range *prange,
1060 uint64_t new_start, struct list_head *insert_list)
1062 struct svm_range *head;
1063 int r = svm_range_split(prange, new_start, prange->last, &head);
1066 list_add(&head->list, insert_list);
1071 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1072 struct svm_range *pchild, enum svm_work_list_ops op)
1074 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1075 pchild, pchild->start, pchild->last, prange, op);
1077 pchild->work_item.mm = mm;
1078 pchild->work_item.op = op;
1079 list_add_tail(&pchild->child_list, &prange->child_list);
1083 * svm_range_split_by_granularity - collect ranges within granularity boundary
1085 * @p: the process with svms list
1087 * @addr: the vm fault address in pages, to split the prange
1088 * @parent: parent range if prange is from child list
1089 * @prange: prange to split
1091 * Trims @prange to be a single aligned block of prange->granularity if
1092 * possible. The head and tail are added to the child_list in @parent.
1094 * Context: caller must hold mmap_read_lock and prange->lock
1097 * 0 - OK, otherwise error code
1100 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1101 unsigned long addr, struct svm_range *parent,
1102 struct svm_range *prange)
1104 struct svm_range *head, *tail;
1105 unsigned long start, last, size;
1108 /* Align splited range start and size to granularity size, then a single
1109 * PTE will be used for whole range, this reduces the number of PTE
1110 * updated and the L1 TLB space used for translation.
1112 size = 1UL << prange->granularity;
1113 start = ALIGN_DOWN(addr, size);
1114 last = ALIGN(addr + 1, size) - 1;
1116 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1117 prange->svms, prange->start, prange->last, start, last, size);
1119 if (start > prange->start) {
1120 r = svm_range_split(prange, start, prange->last, &head);
1123 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1126 if (last < prange->last) {
1127 r = svm_range_split(prange, prange->start, last, &tail);
1130 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1133 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1134 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1135 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1136 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1137 prange, prange->start, prange->last,
1138 SVM_OP_ADD_RANGE_AND_MAP);
1143 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1145 return (node_a->adev == node_b->adev ||
1146 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1150 svm_range_get_pte_flags(struct kfd_node *node,
1151 struct svm_range *prange, int domain)
1153 struct kfd_node *bo_node;
1154 uint32_t flags = prange->flags;
1155 uint32_t mapping_flags = 0;
1157 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1158 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1159 bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
1160 unsigned int mtype_local;
1162 if (domain == SVM_RANGE_VRAM_DOMAIN)
1163 bo_node = prange->svm_bo->node;
1165 switch (node->adev->ip_versions[GC_HWIP][0]) {
1166 case IP_VERSION(9, 4, 1):
1167 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1168 if (bo_node == node) {
1169 mapping_flags |= coherent ?
1170 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1172 mapping_flags |= coherent ?
1173 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1174 if (svm_nodes_in_same_hive(node, bo_node))
1178 mapping_flags |= coherent ?
1179 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1182 case IP_VERSION(9, 4, 2):
1183 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1184 if (bo_node == node) {
1185 mapping_flags |= coherent ?
1186 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1187 if (node->adev->gmc.xgmi.connected_to_cpu)
1190 mapping_flags |= coherent ?
1191 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1192 if (svm_nodes_in_same_hive(node, bo_node))
1196 mapping_flags |= coherent ?
1197 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1200 case IP_VERSION(9, 4, 3):
1201 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1202 (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
1205 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1206 } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
1207 /* local HBM region close to partition */
1208 if (bo_node->adev == node->adev &&
1209 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1210 mapping_flags |= mtype_local;
1211 /* local HBM region far from partition or remote XGMI GPU */
1212 else if (svm_nodes_in_same_hive(bo_node, node))
1213 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1216 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1217 /* system memory accessed by the APU */
1218 } else if (node->adev->flags & AMD_IS_APU) {
1219 /* On NUMA systems, locality is determined per-page
1220 * in amdgpu_gmc_override_vm_pte_flags
1222 if (num_possible_nodes() <= 1)
1223 mapping_flags |= mtype_local;
1225 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1226 /* system memory accessed by the dGPU */
1228 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1232 mapping_flags |= coherent ?
1233 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1236 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1238 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1239 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1240 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1241 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1243 pte_flags = AMDGPU_PTE_VALID;
1244 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1245 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1247 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1252 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1253 uint64_t start, uint64_t last,
1254 struct dma_fence **fence)
1256 uint64_t init_pte_value = 0;
1258 pr_debug("[0x%llx 0x%llx]\n", start, last);
1260 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1261 last, init_pte_value, 0, 0, NULL, NULL,
1266 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1267 unsigned long last, uint32_t trigger)
1269 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1270 struct kfd_process_device *pdd;
1271 struct dma_fence *fence = NULL;
1272 struct kfd_process *p;
1276 if (!prange->mapped_to_gpu) {
1277 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1278 prange, prange->start, prange->last);
1282 if (prange->start == start && prange->last == last) {
1283 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1284 prange->mapped_to_gpu = false;
1287 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1289 p = container_of(prange->svms, struct kfd_process, svms);
1291 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1292 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1293 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1295 pr_debug("failed to find device idx %d\n", gpuidx);
1299 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1300 start, last, trigger);
1302 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1303 drm_priv_to_vm(pdd->drm_priv),
1304 start, last, &fence);
1309 r = dma_fence_wait(fence, false);
1310 dma_fence_put(fence);
1315 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1322 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1323 unsigned long offset, unsigned long npages, bool readonly,
1324 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1325 struct dma_fence **fence, bool flush_tlb)
1327 struct amdgpu_device *adev = pdd->dev->adev;
1328 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1330 unsigned long last_start;
1335 last_start = prange->start + offset;
1337 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1338 last_start, last_start + npages - 1, readonly);
1340 for (i = offset; i < offset + npages; i++) {
1341 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1342 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1344 /* Collect all pages in the same address range and memory domain
1345 * that can be mapped with a single call to update mapping.
1347 if (i < offset + npages - 1 &&
1348 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1351 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1352 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1354 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1356 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1358 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1359 prange->svms, last_start, prange->start + i,
1360 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1363 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1364 * different memory partition based on fpfn/lpfn, we should use
1365 * same vm_manager.vram_base_offset regardless memory partition.
1367 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1368 last_start, prange->start + i,
1370 (last_start - prange->start) << PAGE_SHIFT,
1371 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1372 NULL, dma_addr, &vm->last_update);
1374 for (j = last_start - prange->start; j <= i; j++)
1375 dma_addr[j] |= last_domain;
1378 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1381 last_start = prange->start + i + 1;
1384 r = amdgpu_vm_update_pdes(adev, vm, false);
1386 pr_debug("failed %d to update directories 0x%lx\n", r,
1392 *fence = dma_fence_get(vm->last_update);
1399 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1400 unsigned long npages, bool readonly,
1401 unsigned long *bitmap, bool wait, bool flush_tlb)
1403 struct kfd_process_device *pdd;
1404 struct amdgpu_device *bo_adev = NULL;
1405 struct kfd_process *p;
1406 struct dma_fence *fence = NULL;
1410 if (prange->svm_bo && prange->ttm_res)
1411 bo_adev = prange->svm_bo->node->adev;
1413 p = container_of(prange->svms, struct kfd_process, svms);
1414 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1415 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1416 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1418 pr_debug("failed to find device idx %d\n", gpuidx);
1422 pdd = kfd_bind_process_to_device(pdd->dev, p);
1426 if (bo_adev && pdd->dev->adev != bo_adev &&
1427 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1428 pr_debug("cannot map to device idx %d\n", gpuidx);
1432 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1433 prange->dma_addr[gpuidx],
1434 bo_adev, wait ? &fence : NULL,
1440 r = dma_fence_wait(fence, false);
1441 dma_fence_put(fence);
1444 pr_debug("failed %d to dma fence wait\n", r);
1449 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1455 struct svm_validate_context {
1456 struct kfd_process *process;
1457 struct svm_range *prange;
1459 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1460 struct drm_exec exec;
1463 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1465 struct kfd_process_device *pdd;
1466 struct amdgpu_vm *vm;
1470 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
1471 drm_exec_until_all_locked(&ctx->exec) {
1472 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1473 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1475 pr_debug("failed to find device idx %d\n", gpuidx);
1479 vm = drm_priv_to_vm(pdd->drm_priv);
1481 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1482 drm_exec_retry_on_contention(&ctx->exec);
1484 pr_debug("failed %d to reserve bo\n", r);
1490 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1491 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1493 pr_debug("failed to find device idx %d\n", gpuidx);
1498 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1499 drm_priv_to_vm(pdd->drm_priv),
1500 svm_range_bo_validate, NULL);
1502 pr_debug("failed %d validate pt bos\n", r);
1510 drm_exec_fini(&ctx->exec);
1514 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1516 drm_exec_fini(&ctx->exec);
1519 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1521 struct kfd_process_device *pdd;
1523 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1525 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1529 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1531 * To prevent concurrent destruction or change of range attributes, the
1532 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1533 * because that would block concurrent evictions and lead to deadlocks. To
1534 * serialize concurrent migrations or validations of the same range, the
1535 * prange->migrate_mutex must be held.
1537 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1540 * The following sequence ensures race-free validation and GPU mapping:
1542 * 1. Reserve page table (and SVM BO if range is in VRAM)
1543 * 2. hmm_range_fault to get page addresses (if system memory)
1544 * 3. DMA-map pages (if system memory)
1545 * 4-a. Take notifier lock
1546 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1547 * 4-c. Check that the range was not split or otherwise invalidated
1548 * 4-d. Update GPU page table
1549 * 4.e. Release notifier lock
1550 * 5. Release page table (and SVM BO) reservation
1552 static int svm_range_validate_and_map(struct mm_struct *mm,
1553 struct svm_range *prange, int32_t gpuidx,
1554 bool intr, bool wait, bool flush_tlb)
1556 struct svm_validate_context *ctx;
1557 unsigned long start, end, addr;
1558 struct kfd_process *p;
1563 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1566 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1567 ctx->prange = prange;
1570 if (gpuidx < MAX_GPU_INSTANCE) {
1571 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1572 bitmap_set(ctx->bitmap, gpuidx, 1);
1573 } else if (ctx->process->xnack_enabled) {
1574 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1576 /* If prefetch range to GPU, or GPU retry fault migrate range to
1577 * GPU, which has ACCESS attribute to the range, create mapping
1580 if (prange->actual_loc) {
1581 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1582 prange->actual_loc);
1584 WARN_ONCE(1, "failed get device by id 0x%x\n",
1585 prange->actual_loc);
1589 if (test_bit(gpuidx, prange->bitmap_access))
1590 bitmap_set(ctx->bitmap, gpuidx, 1);
1593 bitmap_or(ctx->bitmap, prange->bitmap_access,
1594 prange->bitmap_aip, MAX_GPU_INSTANCE);
1597 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1598 if (!prange->mapped_to_gpu) {
1603 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1606 if (prange->actual_loc && !prange->ttm_res) {
1607 /* This should never happen. actual_loc gets set by
1608 * svm_migrate_ram_to_vram after allocating a BO.
1610 WARN_ONCE(1, "VRAM BO missing during validation\n");
1615 svm_range_reserve_bos(ctx, intr);
1617 p = container_of(prange->svms, struct kfd_process, svms);
1618 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1620 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1621 if (kfd_svm_page_owner(p, idx) != owner) {
1627 start = prange->start << PAGE_SHIFT;
1628 end = (prange->last + 1) << PAGE_SHIFT;
1629 for (addr = start; addr < end && !r; ) {
1630 struct hmm_range *hmm_range;
1631 struct vm_area_struct *vma;
1633 unsigned long offset;
1634 unsigned long npages;
1637 vma = vma_lookup(mm, addr);
1642 readonly = !(vma->vm_flags & VM_WRITE);
1644 next = min(vma->vm_end, end);
1645 npages = (next - addr) >> PAGE_SHIFT;
1646 WRITE_ONCE(p->svms.faulting_task, current);
1647 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1648 readonly, owner, NULL,
1650 WRITE_ONCE(p->svms.faulting_task, NULL);
1652 pr_debug("failed %d to get svm range pages\n", r);
1656 offset = (addr - start) >> PAGE_SHIFT;
1657 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1658 hmm_range->hmm_pfns);
1660 pr_debug("failed %d to dma map range\n", r);
1664 svm_range_lock(prange);
1665 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1666 pr_debug("hmm update the range, need validate again\n");
1670 if (!list_empty(&prange->child_list)) {
1671 pr_debug("range split by unmap in parallel, validate again\n");
1676 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1677 ctx->bitmap, wait, flush_tlb);
1680 svm_range_unlock(prange);
1686 prange->validated_once = true;
1687 prange->mapped_to_gpu = true;
1691 svm_range_unreserve_bos(ctx);
1693 prange->is_error_flag = !!r;
1695 prange->validate_timestamp = ktime_get_boottime();
1704 * svm_range_list_lock_and_flush_work - flush pending deferred work
1706 * @svms: the svm range list
1707 * @mm: the mm structure
1709 * Context: Returns with mmap write lock held, pending deferred work flushed
1713 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1714 struct mm_struct *mm)
1717 flush_work(&svms->deferred_list_work);
1718 mmap_write_lock(mm);
1720 if (list_empty(&svms->deferred_range_list))
1722 mmap_write_unlock(mm);
1723 pr_debug("retry flush\n");
1724 goto retry_flush_work;
1727 static void svm_range_restore_work(struct work_struct *work)
1729 struct delayed_work *dwork = to_delayed_work(work);
1730 struct amdkfd_process_info *process_info;
1731 struct svm_range_list *svms;
1732 struct svm_range *prange;
1733 struct kfd_process *p;
1734 struct mm_struct *mm;
1739 svms = container_of(dwork, struct svm_range_list, restore_work);
1740 evicted_ranges = atomic_read(&svms->evicted_ranges);
1741 if (!evicted_ranges)
1744 pr_debug("restore svm ranges\n");
1746 p = container_of(svms, struct kfd_process, svms);
1747 process_info = p->kgd_process_info;
1749 /* Keep mm reference when svm_range_validate_and_map ranges */
1750 mm = get_task_mm(p->lead_thread);
1752 pr_debug("svms 0x%p process mm gone\n", svms);
1756 mutex_lock(&process_info->lock);
1757 svm_range_list_lock_and_flush_work(svms, mm);
1758 mutex_lock(&svms->lock);
1760 evicted_ranges = atomic_read(&svms->evicted_ranges);
1762 list_for_each_entry(prange, &svms->list, list) {
1763 invalid = atomic_read(&prange->invalid);
1767 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1768 prange->svms, prange, prange->start, prange->last,
1772 * If range is migrating, wait for migration is done.
1774 mutex_lock(&prange->migrate_mutex);
1776 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1777 false, true, false);
1779 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1782 mutex_unlock(&prange->migrate_mutex);
1784 goto out_reschedule;
1786 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1787 goto out_reschedule;
1790 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1792 goto out_reschedule;
1796 r = kgd2kfd_resume_mm(mm);
1798 /* No recovery from this failure. Probably the CP is
1799 * hanging. No point trying again.
1801 pr_debug("failed %d to resume KFD\n", r);
1804 pr_debug("restore svm ranges successfully\n");
1807 mutex_unlock(&svms->lock);
1808 mmap_write_unlock(mm);
1809 mutex_unlock(&process_info->lock);
1811 /* If validation failed, reschedule another attempt */
1812 if (evicted_ranges) {
1813 pr_debug("reschedule to restore svm range\n");
1814 schedule_delayed_work(&svms->restore_work,
1815 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1817 kfd_smi_event_queue_restore_rescheduled(mm);
1823 * svm_range_evict - evict svm range
1824 * @prange: svm range structure
1825 * @mm: current process mm_struct
1826 * @start: starting process queue number
1827 * @last: last process queue number
1828 * @event: mmu notifier event when range is evicted or migrated
1830 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1831 * return to let CPU evict the buffer and proceed CPU pagetable update.
1833 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1834 * If invalidation happens while restore work is running, restore work will
1835 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1839 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1840 unsigned long start, unsigned long last,
1841 enum mmu_notifier_event event)
1843 struct svm_range_list *svms = prange->svms;
1844 struct svm_range *pchild;
1845 struct kfd_process *p;
1848 p = container_of(svms, struct kfd_process, svms);
1850 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1851 svms, prange->start, prange->last, start, last);
1853 if (!p->xnack_enabled ||
1854 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1856 bool mapped = prange->mapped_to_gpu;
1858 list_for_each_entry(pchild, &prange->child_list, child_list) {
1859 if (!pchild->mapped_to_gpu)
1862 mutex_lock_nested(&pchild->lock, 1);
1863 if (pchild->start <= last && pchild->last >= start) {
1864 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1865 pchild->start, pchild->last);
1866 atomic_inc(&pchild->invalid);
1868 mutex_unlock(&pchild->lock);
1874 if (prange->start <= last && prange->last >= start)
1875 atomic_inc(&prange->invalid);
1877 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1878 if (evicted_ranges != 1)
1881 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1882 prange->svms, prange->start, prange->last);
1884 /* First eviction, stop the queues */
1885 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1887 pr_debug("failed to quiesce KFD\n");
1889 pr_debug("schedule to restore svm %p ranges\n", svms);
1890 schedule_delayed_work(&svms->restore_work,
1891 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1896 if (event == MMU_NOTIFY_MIGRATE)
1897 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1899 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1901 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1902 prange->svms, start, last);
1903 list_for_each_entry(pchild, &prange->child_list, child_list) {
1904 mutex_lock_nested(&pchild->lock, 1);
1905 s = max(start, pchild->start);
1906 l = min(last, pchild->last);
1908 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1909 mutex_unlock(&pchild->lock);
1911 s = max(start, prange->start);
1912 l = min(last, prange->last);
1914 svm_range_unmap_from_gpus(prange, s, l, trigger);
1920 static struct svm_range *svm_range_clone(struct svm_range *old)
1922 struct svm_range *new;
1924 new = svm_range_new(old->svms, old->start, old->last, false);
1929 new->ttm_res = old->ttm_res;
1930 new->offset = old->offset;
1931 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1932 spin_lock(&new->svm_bo->list_lock);
1933 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1934 spin_unlock(&new->svm_bo->list_lock);
1936 new->flags = old->flags;
1937 new->preferred_loc = old->preferred_loc;
1938 new->prefetch_loc = old->prefetch_loc;
1939 new->actual_loc = old->actual_loc;
1940 new->granularity = old->granularity;
1941 new->mapped_to_gpu = old->mapped_to_gpu;
1942 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1943 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1948 void svm_range_set_max_pages(struct amdgpu_device *adev)
1951 uint64_t pages, _pages;
1952 uint64_t min_pages = 0;
1955 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
1956 if (adev->kfd.dev->nodes[i]->xcp)
1957 id = adev->kfd.dev->nodes[i]->xcp->id;
1960 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
1961 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
1962 pages = rounddown_pow_of_two(pages);
1963 min_pages = min_not_zero(min_pages, pages);
1967 max_pages = READ_ONCE(max_svm_range_pages);
1968 _pages = min_not_zero(max_pages, min_pages);
1969 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
1973 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
1974 uint64_t max_pages, struct list_head *insert_list,
1975 struct list_head *update_list)
1977 struct svm_range *prange;
1980 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
1981 max_pages, start, last);
1983 while (last >= start) {
1984 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
1986 prange = svm_range_new(svms, start, l, true);
1989 list_add(&prange->list, insert_list);
1990 list_add(&prange->update_list, update_list);
1998 * svm_range_add - add svm range and handle overlap
1999 * @p: the range add to this process svms
2000 * @start: page size aligned
2001 * @size: page size aligned
2002 * @nattr: number of attributes
2003 * @attrs: array of attributes
2004 * @update_list: output, the ranges need validate and update GPU mapping
2005 * @insert_list: output, the ranges need insert to svms
2006 * @remove_list: output, the ranges are replaced and need remove from svms
2008 * Check if the virtual address range has overlap with any existing ranges,
2009 * split partly overlapping ranges and add new ranges in the gaps. All changes
2010 * should be applied to the range_list and interval tree transactionally. If
2011 * any range split or allocation fails, the entire update fails. Therefore any
2012 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2015 * If the transaction succeeds, the caller can update and insert clones and
2016 * new ranges, then free the originals.
2018 * Otherwise the caller can free the clones and new ranges, while the old
2019 * svm_ranges remain unchanged.
2021 * Context: Process context, caller must hold svms->lock
2024 * 0 - OK, otherwise error code
2027 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2028 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2029 struct list_head *update_list, struct list_head *insert_list,
2030 struct list_head *remove_list)
2032 unsigned long last = start + size - 1UL;
2033 struct svm_range_list *svms = &p->svms;
2034 struct interval_tree_node *node;
2035 struct svm_range *prange;
2036 struct svm_range *tmp;
2037 struct list_head new_list;
2040 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2042 INIT_LIST_HEAD(update_list);
2043 INIT_LIST_HEAD(insert_list);
2044 INIT_LIST_HEAD(remove_list);
2045 INIT_LIST_HEAD(&new_list);
2047 node = interval_tree_iter_first(&svms->objects, start, last);
2049 struct interval_tree_node *next;
2050 unsigned long next_start;
2052 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2055 prange = container_of(node, struct svm_range, it_node);
2056 next = interval_tree_iter_next(node, start, last);
2057 next_start = min(node->last, last) + 1;
2059 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
2061 } else if (node->start < start || node->last > last) {
2062 /* node intersects the update range and its attributes
2063 * will change. Clone and split it, apply updates only
2064 * to the overlapping part
2066 struct svm_range *old = prange;
2068 prange = svm_range_clone(old);
2074 list_add(&old->update_list, remove_list);
2075 list_add(&prange->list, insert_list);
2076 list_add(&prange->update_list, update_list);
2078 if (node->start < start) {
2079 pr_debug("change old range start\n");
2080 r = svm_range_split_head(prange, start,
2085 if (node->last > last) {
2086 pr_debug("change old range last\n");
2087 r = svm_range_split_tail(prange, last,
2093 /* The node is contained within start..last,
2096 list_add(&prange->update_list, update_list);
2099 /* insert a new node if needed */
2100 if (node->start > start) {
2101 r = svm_range_split_new(svms, start, node->start - 1,
2102 READ_ONCE(max_svm_range_pages),
2103 &new_list, update_list);
2112 /* add a final range at the end if needed */
2114 r = svm_range_split_new(svms, start, last,
2115 READ_ONCE(max_svm_range_pages),
2116 &new_list, update_list);
2120 list_for_each_entry_safe(prange, tmp, insert_list, list)
2121 svm_range_free(prange, false);
2122 list_for_each_entry_safe(prange, tmp, &new_list, list)
2123 svm_range_free(prange, true);
2125 list_splice(&new_list, insert_list);
2132 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2133 struct svm_range *prange)
2135 unsigned long start;
2138 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2139 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2141 if (prange->start == start && prange->last == last)
2144 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2145 prange->svms, prange, start, last, prange->start,
2148 if (start != 0 && last != 0) {
2149 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2150 svm_range_remove_notifier(prange);
2152 prange->it_node.start = prange->start;
2153 prange->it_node.last = prange->last;
2155 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2156 svm_range_add_notifier_locked(mm, prange);
2160 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2161 struct mm_struct *mm)
2163 switch (prange->work_item.op) {
2165 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2166 svms, prange, prange->start, prange->last);
2168 case SVM_OP_UNMAP_RANGE:
2169 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2170 svms, prange, prange->start, prange->last);
2171 svm_range_unlink(prange);
2172 svm_range_remove_notifier(prange);
2173 svm_range_free(prange, true);
2175 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2176 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2177 svms, prange, prange->start, prange->last);
2178 svm_range_update_notifier_and_interval_tree(mm, prange);
2180 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2181 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2182 svms, prange, prange->start, prange->last);
2183 svm_range_update_notifier_and_interval_tree(mm, prange);
2184 /* TODO: implement deferred validation and mapping */
2186 case SVM_OP_ADD_RANGE:
2187 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2188 prange->start, prange->last);
2189 svm_range_add_to_svms(prange);
2190 svm_range_add_notifier_locked(mm, prange);
2192 case SVM_OP_ADD_RANGE_AND_MAP:
2193 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2194 prange, prange->start, prange->last);
2195 svm_range_add_to_svms(prange);
2196 svm_range_add_notifier_locked(mm, prange);
2197 /* TODO: implement deferred validation and mapping */
2200 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2201 prange->work_item.op);
2205 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2207 struct kfd_process_device *pdd;
2208 struct kfd_process *p;
2212 p = container_of(svms, struct kfd_process, svms);
2215 drain = atomic_read(&svms->drain_pagefaults);
2219 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2224 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2226 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2227 pdd->dev->adev->irq.retry_cam_enabled ?
2228 &pdd->dev->adev->irq.ih :
2229 &pdd->dev->adev->irq.ih1);
2231 if (pdd->dev->adev->irq.retry_cam_enabled)
2232 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2233 &pdd->dev->adev->irq.ih_soft);
2236 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2238 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2242 static void svm_range_deferred_list_work(struct work_struct *work)
2244 struct svm_range_list *svms;
2245 struct svm_range *prange;
2246 struct mm_struct *mm;
2248 svms = container_of(work, struct svm_range_list, deferred_list_work);
2249 pr_debug("enter svms 0x%p\n", svms);
2251 spin_lock(&svms->deferred_list_lock);
2252 while (!list_empty(&svms->deferred_range_list)) {
2253 prange = list_first_entry(&svms->deferred_range_list,
2254 struct svm_range, deferred_list);
2255 spin_unlock(&svms->deferred_list_lock);
2257 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2258 prange->start, prange->last, prange->work_item.op);
2260 mm = prange->work_item.mm;
2262 mmap_write_lock(mm);
2264 /* Checking for the need to drain retry faults must be inside
2265 * mmap write lock to serialize with munmap notifiers.
2267 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2268 mmap_write_unlock(mm);
2269 svm_range_drain_retry_fault(svms);
2273 /* Remove from deferred_list must be inside mmap write lock, for
2275 * 1. unmap_from_cpu may change work_item.op and add the range
2276 * to deferred_list again, cause use after free bug.
2277 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2278 * lock and continue because deferred_list is empty, but
2279 * deferred_list work is actually waiting for mmap lock.
2281 spin_lock(&svms->deferred_list_lock);
2282 list_del_init(&prange->deferred_list);
2283 spin_unlock(&svms->deferred_list_lock);
2285 mutex_lock(&svms->lock);
2286 mutex_lock(&prange->migrate_mutex);
2287 while (!list_empty(&prange->child_list)) {
2288 struct svm_range *pchild;
2290 pchild = list_first_entry(&prange->child_list,
2291 struct svm_range, child_list);
2292 pr_debug("child prange 0x%p op %d\n", pchild,
2293 pchild->work_item.op);
2294 list_del_init(&pchild->child_list);
2295 svm_range_handle_list_op(svms, pchild, mm);
2297 mutex_unlock(&prange->migrate_mutex);
2299 svm_range_handle_list_op(svms, prange, mm);
2300 mutex_unlock(&svms->lock);
2301 mmap_write_unlock(mm);
2303 /* Pairs with mmget in svm_range_add_list_work */
2306 spin_lock(&svms->deferred_list_lock);
2308 spin_unlock(&svms->deferred_list_lock);
2309 pr_debug("exit svms 0x%p\n", svms);
2313 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2314 struct mm_struct *mm, enum svm_work_list_ops op)
2316 spin_lock(&svms->deferred_list_lock);
2317 /* if prange is on the deferred list */
2318 if (!list_empty(&prange->deferred_list)) {
2319 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2320 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2321 if (op != SVM_OP_NULL &&
2322 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2323 prange->work_item.op = op;
2325 prange->work_item.op = op;
2327 /* Pairs with mmput in deferred_list_work */
2329 prange->work_item.mm = mm;
2330 list_add_tail(&prange->deferred_list,
2331 &prange->svms->deferred_range_list);
2332 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2333 prange, prange->start, prange->last, op);
2335 spin_unlock(&svms->deferred_list_lock);
2338 void schedule_deferred_list_work(struct svm_range_list *svms)
2340 spin_lock(&svms->deferred_list_lock);
2341 if (!list_empty(&svms->deferred_range_list))
2342 schedule_work(&svms->deferred_list_work);
2343 spin_unlock(&svms->deferred_list_lock);
2347 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2348 struct svm_range *prange, unsigned long start,
2351 struct svm_range *head;
2352 struct svm_range *tail;
2354 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2355 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2356 prange->start, prange->last);
2359 if (start > prange->last || last < prange->start)
2362 head = tail = prange;
2363 if (start > prange->start)
2364 svm_range_split(prange, prange->start, start - 1, &tail);
2365 if (last < tail->last)
2366 svm_range_split(tail, last + 1, tail->last, &head);
2368 if (head != prange && tail != prange) {
2369 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2370 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2371 } else if (tail != prange) {
2372 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2373 } else if (head != prange) {
2374 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2375 } else if (parent != prange) {
2376 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2381 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2382 unsigned long start, unsigned long last)
2384 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2385 struct svm_range_list *svms;
2386 struct svm_range *pchild;
2387 struct kfd_process *p;
2391 p = kfd_lookup_process_by_mm(mm);
2396 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2397 prange, prange->start, prange->last, start, last);
2399 /* Make sure pending page faults are drained in the deferred worker
2400 * before the range is freed to avoid straggler interrupts on
2401 * unmapped memory causing "phantom faults".
2403 atomic_inc(&svms->drain_pagefaults);
2405 unmap_parent = start <= prange->start && last >= prange->last;
2407 list_for_each_entry(pchild, &prange->child_list, child_list) {
2408 mutex_lock_nested(&pchild->lock, 1);
2409 s = max(start, pchild->start);
2410 l = min(last, pchild->last);
2412 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2413 svm_range_unmap_split(mm, prange, pchild, start, last);
2414 mutex_unlock(&pchild->lock);
2416 s = max(start, prange->start);
2417 l = min(last, prange->last);
2419 svm_range_unmap_from_gpus(prange, s, l, trigger);
2420 svm_range_unmap_split(mm, prange, prange, start, last);
2423 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2425 svm_range_add_list_work(svms, prange, mm,
2426 SVM_OP_UPDATE_RANGE_NOTIFIER);
2427 schedule_deferred_list_work(svms);
2429 kfd_unref_process(p);
2433 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2434 * @mni: mmu_interval_notifier struct
2435 * @range: mmu_notifier_range struct
2436 * @cur_seq: value to pass to mmu_interval_set_seq()
2438 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2439 * is from migration, or CPU page invalidation callback.
2441 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2442 * work thread, and split prange if only part of prange is unmapped.
2444 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2445 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2446 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2447 * update GPU mapping to recover.
2449 * Context: mmap lock, notifier_invalidate_start lock are held
2450 * for invalidate event, prange lock is held if this is from migration
2453 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2454 const struct mmu_notifier_range *range,
2455 unsigned long cur_seq)
2457 struct svm_range *prange;
2458 unsigned long start;
2461 if (range->event == MMU_NOTIFY_RELEASE)
2463 if (!mmget_not_zero(mni->mm))
2466 start = mni->interval_tree.start;
2467 last = mni->interval_tree.last;
2468 start = max(start, range->start) >> PAGE_SHIFT;
2469 last = min(last, range->end - 1) >> PAGE_SHIFT;
2470 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2471 start, last, range->start >> PAGE_SHIFT,
2472 (range->end - 1) >> PAGE_SHIFT,
2473 mni->interval_tree.start >> PAGE_SHIFT,
2474 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2476 prange = container_of(mni, struct svm_range, notifier);
2478 svm_range_lock(prange);
2479 mmu_interval_set_seq(mni, cur_seq);
2481 switch (range->event) {
2482 case MMU_NOTIFY_UNMAP:
2483 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2486 svm_range_evict(prange, mni->mm, start, last, range->event);
2490 svm_range_unlock(prange);
2497 * svm_range_from_addr - find svm range from fault address
2498 * @svms: svm range list header
2499 * @addr: address to search range interval tree, in pages
2500 * @parent: parent range if range is on child list
2502 * Context: The caller must hold svms->lock
2504 * Return: the svm_range found or NULL
2507 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2508 struct svm_range **parent)
2510 struct interval_tree_node *node;
2511 struct svm_range *prange;
2512 struct svm_range *pchild;
2514 node = interval_tree_iter_first(&svms->objects, addr, addr);
2518 prange = container_of(node, struct svm_range, it_node);
2519 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2520 addr, prange->start, prange->last, node->start, node->last);
2522 if (addr >= prange->start && addr <= prange->last) {
2527 list_for_each_entry(pchild, &prange->child_list, child_list)
2528 if (addr >= pchild->start && addr <= pchild->last) {
2529 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2530 addr, pchild->start, pchild->last);
2539 /* svm_range_best_restore_location - decide the best fault restore location
2540 * @prange: svm range structure
2541 * @adev: the GPU on which vm fault happened
2543 * This is only called when xnack is on, to decide the best location to restore
2544 * the range mapping after GPU vm fault. Caller uses the best location to do
2545 * migration if actual loc is not best location, then update GPU page table
2546 * mapping to the best location.
2548 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2549 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2550 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2551 * if range actual loc is cpu, best_loc is cpu
2552 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2554 * Otherwise, GPU no access, best_loc is -1.
2557 * -1 means vm fault GPU no access
2558 * 0 for CPU or GPU id
2561 svm_range_best_restore_location(struct svm_range *prange,
2562 struct kfd_node *node,
2565 struct kfd_node *bo_node, *preferred_node;
2566 struct kfd_process *p;
2570 p = container_of(prange->svms, struct kfd_process, svms);
2572 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2574 pr_debug("failed to get gpuid from kgd\n");
2578 if (node->adev->gmc.is_app_apu)
2581 if (prange->preferred_loc == gpuid ||
2582 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2583 return prange->preferred_loc;
2584 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2585 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2586 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2587 return prange->preferred_loc;
2591 if (test_bit(*gpuidx, prange->bitmap_access))
2594 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2595 if (!prange->actual_loc)
2598 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2599 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2600 return prange->actual_loc;
2609 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2610 unsigned long *start, unsigned long *last,
2611 bool *is_heap_stack)
2613 struct vm_area_struct *vma;
2614 struct interval_tree_node *node;
2615 unsigned long start_limit, end_limit;
2617 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2619 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2623 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2624 vma->vm_end >= vma->vm_mm->start_brk) ||
2625 (vma->vm_start <= vma->vm_mm->start_stack &&
2626 vma->vm_end >= vma->vm_mm->start_stack);
2628 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2629 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2630 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2631 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2632 /* First range that starts after the fault address */
2633 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2635 end_limit = min(end_limit, node->start);
2636 /* Last range that ends before the fault address */
2637 node = container_of(rb_prev(&node->rb),
2638 struct interval_tree_node, rb);
2640 /* Last range must end before addr because
2641 * there was no range after addr
2643 node = container_of(rb_last(&p->svms.objects.rb_root),
2644 struct interval_tree_node, rb);
2647 if (node->last >= addr) {
2648 WARN(1, "Overlap with prev node and page fault addr\n");
2651 start_limit = max(start_limit, node->last + 1);
2654 *start = start_limit;
2655 *last = end_limit - 1;
2657 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2658 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2659 *start, *last, *is_heap_stack);
2665 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2666 uint64_t *bo_s, uint64_t *bo_l)
2668 struct amdgpu_bo_va_mapping *mapping;
2669 struct interval_tree_node *node;
2670 struct amdgpu_bo *bo = NULL;
2671 unsigned long userptr;
2675 for (i = 0; i < p->n_pdds; i++) {
2676 struct amdgpu_vm *vm;
2678 if (!p->pdds[i]->drm_priv)
2681 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2682 r = amdgpu_bo_reserve(vm->root.bo, false);
2686 /* Check userptr by searching entire vm->va interval tree */
2687 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2689 mapping = container_of((struct rb_node *)node,
2690 struct amdgpu_bo_va_mapping, rb);
2691 bo = mapping->bo_va->base.bo;
2693 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2694 start << PAGE_SHIFT,
2697 node = interval_tree_iter_next(node, 0, ~0ULL);
2701 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2704 *bo_s = userptr >> PAGE_SHIFT;
2705 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2707 amdgpu_bo_unreserve(vm->root.bo);
2710 amdgpu_bo_unreserve(vm->root.bo);
2716 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2717 struct kfd_process *p,
2718 struct mm_struct *mm,
2721 struct svm_range *prange = NULL;
2722 unsigned long start, last;
2723 uint32_t gpuid, gpuidx;
2729 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2733 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2734 if (r != -EADDRINUSE)
2735 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2737 if (r == -EADDRINUSE) {
2738 if (addr >= bo_s && addr <= bo_l)
2741 /* Create one page svm range if 2MB range overlapping */
2746 prange = svm_range_new(&p->svms, start, last, true);
2748 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2751 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2752 pr_debug("failed to get gpuid from kgd\n");
2753 svm_range_free(prange, true);
2758 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2760 svm_range_add_to_svms(prange);
2761 svm_range_add_notifier_locked(mm, prange);
2766 /* svm_range_skip_recover - decide if prange can be recovered
2767 * @prange: svm range structure
2769 * GPU vm retry fault handle skip recover the range for cases:
2770 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2771 * deferred list work will drain the stale fault before free the prange.
2772 * 2. prange is on deferred list to add interval notifier after split, or
2773 * 3. prange is child range, it is split from parent prange, recover later
2774 * after interval notifier is added.
2776 * Return: true to skip recover, false to recover
2778 static bool svm_range_skip_recover(struct svm_range *prange)
2780 struct svm_range_list *svms = prange->svms;
2782 spin_lock(&svms->deferred_list_lock);
2783 if (list_empty(&prange->deferred_list) &&
2784 list_empty(&prange->child_list)) {
2785 spin_unlock(&svms->deferred_list_lock);
2788 spin_unlock(&svms->deferred_list_lock);
2790 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2791 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2792 svms, prange, prange->start, prange->last);
2795 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2796 prange->work_item.op == SVM_OP_ADD_RANGE) {
2797 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2798 svms, prange, prange->start, prange->last);
2805 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2808 struct kfd_process_device *pdd;
2810 /* fault is on different page of same range
2811 * or fault is skipped to recover later
2812 * or fault is on invalid virtual address
2814 if (gpuidx == MAX_GPU_INSTANCE) {
2818 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2823 /* fault is recovered
2824 * or fault cannot recover because GPU no access on the range
2826 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2828 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2832 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2834 unsigned long requested = VM_READ;
2837 requested |= VM_WRITE;
2839 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2841 return (vma->vm_flags & requested) == requested;
2845 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2846 uint32_t vmid, uint32_t node_id,
2847 uint64_t addr, bool write_fault)
2849 struct mm_struct *mm = NULL;
2850 struct svm_range_list *svms;
2851 struct svm_range *prange;
2852 struct kfd_process *p;
2853 ktime_t timestamp = ktime_get_boottime();
2854 struct kfd_node *node;
2856 int32_t gpuidx = MAX_GPU_INSTANCE;
2857 bool write_locked = false;
2858 struct vm_area_struct *vma;
2859 bool migration = false;
2862 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2863 pr_debug("device does not support SVM\n");
2867 p = kfd_lookup_process_by_pasid(pasid);
2869 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2874 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2876 if (atomic_read(&svms->drain_pagefaults)) {
2877 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2882 if (!p->xnack_enabled) {
2883 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2888 /* p->lead_thread is available as kfd_process_wq_release flush the work
2889 * before releasing task ref.
2891 mm = get_task_mm(p->lead_thread);
2893 pr_debug("svms 0x%p failed to get mm\n", svms);
2898 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2900 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2907 mutex_lock(&svms->lock);
2908 prange = svm_range_from_addr(svms, addr, NULL);
2910 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2912 if (!write_locked) {
2913 /* Need the write lock to create new range with MMU notifier.
2914 * Also flush pending deferred work to make sure the interval
2915 * tree is up to date before we add a new range
2917 mutex_unlock(&svms->lock);
2918 mmap_read_unlock(mm);
2919 mmap_write_lock(mm);
2920 write_locked = true;
2921 goto retry_write_locked;
2923 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2925 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2927 mmap_write_downgrade(mm);
2929 goto out_unlock_svms;
2933 mmap_write_downgrade(mm);
2935 mutex_lock(&prange->migrate_mutex);
2937 if (svm_range_skip_recover(prange)) {
2938 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
2940 goto out_unlock_range;
2943 /* skip duplicate vm fault on different pages of same range */
2944 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2945 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2946 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2947 svms, prange->start, prange->last);
2949 goto out_unlock_range;
2952 /* __do_munmap removed VMA, return success as we are handling stale
2955 vma = vma_lookup(mm, addr << PAGE_SHIFT);
2957 pr_debug("address 0x%llx VMA is removed\n", addr);
2959 goto out_unlock_range;
2962 if (!svm_fault_allowed(vma, write_fault)) {
2963 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2964 write_fault ? "write" : "read");
2966 goto out_unlock_range;
2969 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
2970 if (best_loc == -1) {
2971 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2972 svms, prange->start, prange->last);
2974 goto out_unlock_range;
2977 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2978 svms, prange->start, prange->last, best_loc,
2979 prange->actual_loc);
2981 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
2982 write_fault, timestamp);
2984 if (prange->actual_loc != best_loc) {
2987 r = svm_migrate_to_vram(prange, best_loc, mm,
2988 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2990 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2992 /* Fallback to system memory if migration to
2995 if (prange->actual_loc)
2996 r = svm_migrate_vram_to_ram(prange, mm,
2997 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
3003 r = svm_migrate_vram_to_ram(prange, mm,
3004 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
3008 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3009 r, svms, prange->start, prange->last);
3010 goto out_unlock_range;
3014 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
3016 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3017 r, svms, prange->start, prange->last);
3019 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3023 mutex_unlock(&prange->migrate_mutex);
3025 mutex_unlock(&svms->lock);
3026 mmap_read_unlock(mm);
3028 svm_range_count_fault(node, p, gpuidx);
3032 kfd_unref_process(p);
3035 pr_debug("recover vm fault later\n");
3036 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3043 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3045 struct svm_range *prange, *pchild;
3046 uint64_t reserved_size = 0;
3050 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3052 mutex_lock(&p->svms.lock);
3054 list_for_each_entry(prange, &p->svms.list, list) {
3055 svm_range_lock(prange);
3056 list_for_each_entry(pchild, &prange->child_list, child_list) {
3057 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3058 if (xnack_enabled) {
3059 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3060 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3062 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3063 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3066 reserved_size += size;
3070 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3071 if (xnack_enabled) {
3072 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3073 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3075 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3076 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3079 reserved_size += size;
3082 svm_range_unlock(prange);
3088 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3089 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3091 /* Change xnack mode must be inside svms lock, to avoid race with
3092 * svm_range_deferred_list_work unreserve memory in parallel.
3094 p->xnack_enabled = xnack_enabled;
3096 mutex_unlock(&p->svms.lock);
3100 void svm_range_list_fini(struct kfd_process *p)
3102 struct svm_range *prange;
3103 struct svm_range *next;
3105 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3107 cancel_delayed_work_sync(&p->svms.restore_work);
3109 /* Ensure list work is finished before process is destroyed */
3110 flush_work(&p->svms.deferred_list_work);
3113 * Ensure no retry fault comes in afterwards, as page fault handler will
3114 * not find kfd process and take mm lock to recover fault.
3116 atomic_inc(&p->svms.drain_pagefaults);
3117 svm_range_drain_retry_fault(&p->svms);
3119 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3120 svm_range_unlink(prange);
3121 svm_range_remove_notifier(prange);
3122 svm_range_free(prange, true);
3125 mutex_destroy(&p->svms.lock);
3127 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3130 int svm_range_list_init(struct kfd_process *p)
3132 struct svm_range_list *svms = &p->svms;
3135 svms->objects = RB_ROOT_CACHED;
3136 mutex_init(&svms->lock);
3137 INIT_LIST_HEAD(&svms->list);
3138 atomic_set(&svms->evicted_ranges, 0);
3139 atomic_set(&svms->drain_pagefaults, 0);
3140 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3141 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3142 INIT_LIST_HEAD(&svms->deferred_range_list);
3143 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3144 spin_lock_init(&svms->deferred_list_lock);
3146 for (i = 0; i < p->n_pdds; i++)
3147 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3148 bitmap_set(svms->bitmap_supported, i, 1);
3154 * svm_range_check_vm - check if virtual address range mapped already
3155 * @p: current kfd_process
3156 * @start: range start address, in pages
3157 * @last: range last address, in pages
3158 * @bo_s: mapping start address in pages if address range already mapped
3159 * @bo_l: mapping last address in pages if address range already mapped
3161 * The purpose is to avoid virtual address ranges already allocated by
3162 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3163 * It looks for each pdd in the kfd_process.
3165 * Context: Process context
3167 * Return 0 - OK, if the range is not mapped.
3168 * Otherwise error code:
3169 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3170 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3171 * a signal. Release all buffer reservations and return to user-space.
3174 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3175 uint64_t *bo_s, uint64_t *bo_l)
3177 struct amdgpu_bo_va_mapping *mapping;
3178 struct interval_tree_node *node;
3182 for (i = 0; i < p->n_pdds; i++) {
3183 struct amdgpu_vm *vm;
3185 if (!p->pdds[i]->drm_priv)
3188 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3189 r = amdgpu_bo_reserve(vm->root.bo, false);
3193 node = interval_tree_iter_first(&vm->va, start, last);
3195 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3197 mapping = container_of((struct rb_node *)node,
3198 struct amdgpu_bo_va_mapping, rb);
3200 *bo_s = mapping->start;
3201 *bo_l = mapping->last;
3203 amdgpu_bo_unreserve(vm->root.bo);
3206 amdgpu_bo_unreserve(vm->root.bo);
3213 * svm_range_is_valid - check if virtual address range is valid
3214 * @p: current kfd_process
3215 * @start: range start address, in pages
3216 * @size: range size, in pages
3218 * Valid virtual address range means it belongs to one or more VMAs
3220 * Context: Process context
3223 * 0 - OK, otherwise error code
3226 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3228 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3229 struct vm_area_struct *vma;
3231 unsigned long start_unchg = start;
3233 start <<= PAGE_SHIFT;
3234 end = start + (size << PAGE_SHIFT);
3236 vma = vma_lookup(p->mm, start);
3237 if (!vma || (vma->vm_flags & device_vma))
3239 start = min(end, vma->vm_end);
3240 } while (start < end);
3242 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3247 * svm_range_best_prefetch_location - decide the best prefetch location
3248 * @prange: svm range structure
3251 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3252 * can be CPU or GPU.
3254 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3255 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3256 * the best prefetch location is always CPU, because GPU can not have coherent
3257 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3260 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3261 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3263 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3264 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3265 * prefetch location is always CPU.
3267 * Context: Process context
3270 * 0 for CPU or GPU id
3273 svm_range_best_prefetch_location(struct svm_range *prange)
3275 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3276 uint32_t best_loc = prange->prefetch_loc;
3277 struct kfd_process_device *pdd;
3278 struct kfd_node *bo_node;
3279 struct kfd_process *p;
3282 p = container_of(prange->svms, struct kfd_process, svms);
3284 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3287 bo_node = svm_range_get_node_by_id(prange, best_loc);
3289 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3294 if (bo_node->adev->gmc.is_app_apu) {
3299 if (p->xnack_enabled)
3300 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3302 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3305 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3306 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3308 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3312 if (pdd->dev->adev == bo_node->adev)
3315 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3322 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3323 p->xnack_enabled, &p->svms, prange->start, prange->last,
3329 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3330 * @mm: current process mm_struct
3331 * @prange: svm range structure
3332 * @migrated: output, true if migration is triggered
3334 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3336 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3339 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3341 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3342 * stops all queues, schedule restore work
3343 * 2. svm_range_restore_work wait for migration is done by
3344 * a. svm_range_validate_vram takes prange->migrate_mutex
3345 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3346 * 3. restore work update mappings of GPU, resume all queues.
3348 * Context: Process context
3351 * 0 - OK, otherwise - error code of migration
3354 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3361 best_loc = svm_range_best_prefetch_location(prange);
3363 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3364 best_loc == prange->actual_loc)
3368 r = svm_migrate_vram_to_ram(prange, mm,
3369 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3374 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3380 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3385 if (dma_fence_is_signaled(&fence->base))
3388 if (fence->svm_bo) {
3389 WRITE_ONCE(fence->svm_bo->evicting, 1);
3390 schedule_work(&fence->svm_bo->eviction_work);
3396 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3398 struct svm_range_bo *svm_bo;
3399 struct mm_struct *mm;
3402 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3403 if (!svm_bo_ref_unless_zero(svm_bo))
3404 return; /* svm_bo was freed while eviction was pending */
3406 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3407 mm = svm_bo->eviction_fence->mm;
3409 svm_range_bo_unref(svm_bo);
3414 spin_lock(&svm_bo->list_lock);
3415 while (!list_empty(&svm_bo->range_list) && !r) {
3416 struct svm_range *prange =
3417 list_first_entry(&svm_bo->range_list,
3418 struct svm_range, svm_bo_list);
3421 list_del_init(&prange->svm_bo_list);
3422 spin_unlock(&svm_bo->list_lock);
3424 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3425 prange->start, prange->last);
3427 mutex_lock(&prange->migrate_mutex);
3429 r = svm_migrate_vram_to_ram(prange, mm,
3430 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3431 } while (!r && prange->actual_loc && --retries);
3433 if (!r && prange->actual_loc)
3434 pr_info_once("Migration failed during eviction");
3436 if (!prange->actual_loc) {
3437 mutex_lock(&prange->lock);
3438 prange->svm_bo = NULL;
3439 mutex_unlock(&prange->lock);
3441 mutex_unlock(&prange->migrate_mutex);
3443 spin_lock(&svm_bo->list_lock);
3445 spin_unlock(&svm_bo->list_lock);
3446 mmap_read_unlock(mm);
3449 dma_fence_signal(&svm_bo->eviction_fence->base);
3451 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3452 * has been called in svm_migrate_vram_to_ram
3454 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3455 svm_range_bo_unref(svm_bo);
3459 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3460 uint64_t start, uint64_t size, uint32_t nattr,
3461 struct kfd_ioctl_svm_attribute *attrs)
3463 struct amdkfd_process_info *process_info = p->kgd_process_info;
3464 struct list_head update_list;
3465 struct list_head insert_list;
3466 struct list_head remove_list;
3467 struct svm_range_list *svms;
3468 struct svm_range *prange;
3469 struct svm_range *next;
3470 bool update_mapping = false;
3474 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3475 p->pasid, &p->svms, start, start + size - 1, size);
3477 r = svm_range_check_attr(p, nattr, attrs);
3483 mutex_lock(&process_info->lock);
3485 svm_range_list_lock_and_flush_work(svms, mm);
3487 r = svm_range_is_valid(p, start, size);
3489 pr_debug("invalid range r=%d\n", r);
3490 mmap_write_unlock(mm);
3494 mutex_lock(&svms->lock);
3496 /* Add new range and split existing ranges as needed */
3497 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3498 &insert_list, &remove_list);
3500 mutex_unlock(&svms->lock);
3501 mmap_write_unlock(mm);
3504 /* Apply changes as a transaction */
3505 list_for_each_entry_safe(prange, next, &insert_list, list) {
3506 svm_range_add_to_svms(prange);
3507 svm_range_add_notifier_locked(mm, prange);
3509 list_for_each_entry(prange, &update_list, update_list) {
3510 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3511 /* TODO: unmap ranges from GPU that lost access */
3513 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3514 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3515 prange->svms, prange, prange->start,
3517 svm_range_unlink(prange);
3518 svm_range_remove_notifier(prange);
3519 svm_range_free(prange, false);
3522 mmap_write_downgrade(mm);
3523 /* Trigger migrations and revalidate and map to GPUs as needed. If
3524 * this fails we may be left with partially completed actions. There
3525 * is no clean way of rolling back to the previous state in such a
3526 * case because the rollback wouldn't be guaranteed to work either.
3528 list_for_each_entry(prange, &update_list, update_list) {
3531 mutex_lock(&prange->migrate_mutex);
3533 r = svm_range_trigger_migration(mm, prange, &migrated);
3535 goto out_unlock_range;
3537 if (migrated && (!p->xnack_enabled ||
3538 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3539 prange->mapped_to_gpu) {
3540 pr_debug("restore_work will update mappings of GPUs\n");
3541 mutex_unlock(&prange->migrate_mutex);
3545 if (!migrated && !update_mapping) {
3546 mutex_unlock(&prange->migrate_mutex);
3550 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3552 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3553 true, true, flush_tlb);
3555 pr_debug("failed %d to map svm range\n", r);
3558 mutex_unlock(&prange->migrate_mutex);
3563 svm_range_debug_dump(svms);
3565 mutex_unlock(&svms->lock);
3566 mmap_read_unlock(mm);
3568 mutex_unlock(&process_info->lock);
3570 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3571 &p->svms, start, start + size - 1, r);
3577 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3578 uint64_t start, uint64_t size, uint32_t nattr,
3579 struct kfd_ioctl_svm_attribute *attrs)
3581 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3582 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3583 bool get_preferred_loc = false;
3584 bool get_prefetch_loc = false;
3585 bool get_granularity = false;
3586 bool get_accessible = false;
3587 bool get_flags = false;
3588 uint64_t last = start + size - 1UL;
3589 uint8_t granularity = 0xff;
3590 struct interval_tree_node *node;
3591 struct svm_range_list *svms;
3592 struct svm_range *prange;
3593 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3594 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3595 uint32_t flags_and = 0xffffffff;
3596 uint32_t flags_or = 0;
3601 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3602 start + size - 1, nattr);
3604 /* Flush pending deferred work to avoid racing with deferred actions from
3605 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3606 * can still race with get_attr because we don't hold the mmap lock. But that
3607 * would be a race condition in the application anyway, and undefined
3608 * behaviour is acceptable in that case.
3610 flush_work(&p->svms.deferred_list_work);
3613 r = svm_range_is_valid(p, start, size);
3614 mmap_read_unlock(mm);
3616 pr_debug("invalid range r=%d\n", r);
3620 for (i = 0; i < nattr; i++) {
3621 switch (attrs[i].type) {
3622 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3623 get_preferred_loc = true;
3625 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3626 get_prefetch_loc = true;
3628 case KFD_IOCTL_SVM_ATTR_ACCESS:
3629 get_accessible = true;
3631 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3632 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3635 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3636 get_granularity = true;
3638 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3639 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3642 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3649 mutex_lock(&svms->lock);
3651 node = interval_tree_iter_first(&svms->objects, start, last);
3653 pr_debug("range attrs not found return default values\n");
3654 svm_range_set_default_attributes(&location, &prefetch_loc,
3655 &granularity, &flags_and);
3656 flags_or = flags_and;
3657 if (p->xnack_enabled)
3658 bitmap_copy(bitmap_access, svms->bitmap_supported,
3661 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3662 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3665 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3666 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3669 struct interval_tree_node *next;
3671 prange = container_of(node, struct svm_range, it_node);
3672 next = interval_tree_iter_next(node, start, last);
3674 if (get_preferred_loc) {
3675 if (prange->preferred_loc ==
3676 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3677 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3678 location != prange->preferred_loc)) {
3679 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3680 get_preferred_loc = false;
3682 location = prange->preferred_loc;
3685 if (get_prefetch_loc) {
3686 if (prange->prefetch_loc ==
3687 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3688 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3689 prefetch_loc != prange->prefetch_loc)) {
3690 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3691 get_prefetch_loc = false;
3693 prefetch_loc = prange->prefetch_loc;
3696 if (get_accessible) {
3697 bitmap_and(bitmap_access, bitmap_access,
3698 prange->bitmap_access, MAX_GPU_INSTANCE);
3699 bitmap_and(bitmap_aip, bitmap_aip,
3700 prange->bitmap_aip, MAX_GPU_INSTANCE);
3703 flags_and &= prange->flags;
3704 flags_or |= prange->flags;
3707 if (get_granularity && prange->granularity < granularity)
3708 granularity = prange->granularity;
3713 mutex_unlock(&svms->lock);
3715 for (i = 0; i < nattr; i++) {
3716 switch (attrs[i].type) {
3717 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3718 attrs[i].value = location;
3720 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3721 attrs[i].value = prefetch_loc;
3723 case KFD_IOCTL_SVM_ATTR_ACCESS:
3724 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3727 pr_debug("invalid gpuid %x\n", attrs[i].value);
3730 if (test_bit(gpuidx, bitmap_access))
3731 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3732 else if (test_bit(gpuidx, bitmap_aip))
3734 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3736 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3738 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3739 attrs[i].value = flags_and;
3741 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3742 attrs[i].value = ~flags_or;
3744 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3745 attrs[i].value = (uint32_t)granularity;
3753 int kfd_criu_resume_svm(struct kfd_process *p)
3755 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3756 int nattr_common = 4, nattr_accessibility = 1;
3757 struct criu_svm_metadata *criu_svm_md = NULL;
3758 struct svm_range_list *svms = &p->svms;
3759 struct criu_svm_metadata *next = NULL;
3760 uint32_t set_flags = 0xffffffff;
3761 int i, j, num_attrs, ret = 0;
3762 uint64_t set_attr_size;
3763 struct mm_struct *mm;
3765 if (list_empty(&svms->criu_svm_metadata_list)) {
3766 pr_debug("No SVM data from CRIU restore stage 2\n");
3770 mm = get_task_mm(p->lead_thread);
3772 pr_err("failed to get mm for the target process\n");
3776 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3779 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3780 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3781 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3783 for (j = 0; j < num_attrs; j++) {
3784 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3785 i, j, criu_svm_md->data.attrs[j].type,
3786 i, j, criu_svm_md->data.attrs[j].value);
3787 switch (criu_svm_md->data.attrs[j].type) {
3788 /* During Checkpoint operation, the query for
3789 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3790 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3791 * not used by the range which was checkpointed. Care
3792 * must be taken to not restore with an invalid value
3793 * otherwise the gpuidx value will be invalid and
3794 * set_attr would eventually fail so just replace those
3795 * with another dummy attribute such as
3796 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3798 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3799 if (criu_svm_md->data.attrs[j].value ==
3800 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3801 criu_svm_md->data.attrs[j].type =
3802 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3803 criu_svm_md->data.attrs[j].value = 0;
3806 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3807 set_flags = criu_svm_md->data.attrs[j].value;
3814 /* CLR_FLAGS is not available via get_attr during checkpoint but
3815 * it needs to be inserted before restoring the ranges so
3816 * allocate extra space for it before calling set_attr
3818 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3820 set_attr_new = krealloc(set_attr, set_attr_size,
3822 if (!set_attr_new) {
3826 set_attr = set_attr_new;
3828 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3829 sizeof(struct kfd_ioctl_svm_attribute));
3830 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3831 set_attr[num_attrs].value = ~set_flags;
3833 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3834 criu_svm_md->data.size, num_attrs + 1,
3837 pr_err("CRIU: failed to set range attributes\n");
3845 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3846 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3847 criu_svm_md->data.start_addr);
3856 int kfd_criu_restore_svm(struct kfd_process *p,
3857 uint8_t __user *user_priv_ptr,
3858 uint64_t *priv_data_offset,
3859 uint64_t max_priv_data_size)
3861 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3862 int nattr_common = 4, nattr_accessibility = 1;
3863 struct criu_svm_metadata *criu_svm_md = NULL;
3864 struct svm_range_list *svms = &p->svms;
3865 uint32_t num_devices;
3868 num_devices = p->n_pdds;
3869 /* Handle one SVM range object at a time, also the number of gpus are
3870 * assumed to be same on the restore node, checking must be done while
3871 * evaluating the topology earlier
3874 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3875 (nattr_common + nattr_accessibility * num_devices);
3876 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3878 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3881 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3883 pr_err("failed to allocate memory to store svm metadata\n");
3886 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3891 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3892 svm_priv_data_size);
3897 *priv_data_offset += svm_priv_data_size;
3899 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3909 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3910 uint64_t *svm_priv_data_size)
3912 uint64_t total_size, accessibility_size, common_attr_size;
3913 int nattr_common = 4, nattr_accessibility = 1;
3914 int num_devices = p->n_pdds;
3915 struct svm_range_list *svms;
3916 struct svm_range *prange;
3919 *svm_priv_data_size = 0;
3925 mutex_lock(&svms->lock);
3926 list_for_each_entry(prange, &svms->list, list) {
3927 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3928 prange, prange->start, prange->npages,
3929 prange->start + prange->npages - 1);
3932 mutex_unlock(&svms->lock);
3934 *num_svm_ranges = count;
3935 /* Only the accessbility attributes need to be queried for all the gpus
3936 * individually, remaining ones are spanned across the entire process
3937 * regardless of the various gpu nodes. Of the remaining attributes,
3938 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3940 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3941 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3942 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3943 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3945 * ** ACCESSBILITY ATTRIBUTES **
3946 * (Considered as one, type is altered during query, value is gpuid)
3947 * KFD_IOCTL_SVM_ATTR_ACCESS
3948 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3949 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3951 if (*num_svm_ranges > 0) {
3952 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3954 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3955 nattr_accessibility * num_devices;
3957 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3958 common_attr_size + accessibility_size;
3960 *svm_priv_data_size = *num_svm_ranges * total_size;
3963 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3964 *svm_priv_data_size);
3968 int kfd_criu_checkpoint_svm(struct kfd_process *p,
3969 uint8_t __user *user_priv_data,
3970 uint64_t *priv_data_offset)
3972 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3973 struct kfd_ioctl_svm_attribute *query_attr = NULL;
3974 uint64_t svm_priv_data_size, query_attr_size = 0;
3975 int index, nattr_common = 4, ret = 0;
3976 struct svm_range_list *svms;
3977 int num_devices = p->n_pdds;
3978 struct svm_range *prange;
3979 struct mm_struct *mm;
3985 mm = get_task_mm(p->lead_thread);
3987 pr_err("failed to get mm for the target process\n");
3991 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3992 (nattr_common + num_devices);
3994 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4000 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4001 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4002 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4003 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4005 for (index = 0; index < num_devices; index++) {
4006 struct kfd_process_device *pdd = p->pdds[index];
4008 query_attr[index + nattr_common].type =
4009 KFD_IOCTL_SVM_ATTR_ACCESS;
4010 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4013 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4015 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4022 list_for_each_entry(prange, &svms->list, list) {
4024 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4025 svm_priv->start_addr = prange->start;
4026 svm_priv->size = prange->npages;
4027 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4028 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4029 prange, prange->start, prange->npages,
4030 prange->start + prange->npages - 1,
4031 prange->npages * PAGE_SIZE);
4033 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4035 (nattr_common + num_devices),
4038 pr_err("CRIU: failed to obtain range attributes\n");
4042 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4043 svm_priv_data_size)) {
4044 pr_err("Failed to copy svm priv to user\n");
4049 *priv_data_offset += svm_priv_data_size;
4064 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4065 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4067 struct mm_struct *mm = current->mm;
4070 start >>= PAGE_SHIFT;
4071 size >>= PAGE_SHIFT;
4074 case KFD_IOCTL_SVM_OP_SET_ATTR:
4075 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4077 case KFD_IOCTL_SVM_OP_GET_ATTR:
4078 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);