1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
31 #include "amdgpu_xgmi.h"
34 #include "kfd_migrate.h"
39 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
41 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
43 /* Long enough to ensure no retry fault comes after svm range is restored and
44 * page table is updated.
46 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
48 struct criu_svm_metadata {
49 struct list_head list;
50 struct kfd_criu_svm_range_priv_data data;
53 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
55 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
56 const struct mmu_notifier_range *range,
57 unsigned long cur_seq);
59 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
60 uint64_t *bo_s, uint64_t *bo_l);
61 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
62 .invalidate = svm_range_cpu_invalidate_pagetables,
66 * svm_range_unlink - unlink svm_range from lists and interval tree
67 * @prange: svm range structure to be removed
69 * Remove the svm_range from the svms and svm_bo lists and the svms
72 * Context: The caller must hold svms->lock
74 static void svm_range_unlink(struct svm_range *prange)
76 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
77 prange, prange->start, prange->last);
80 spin_lock(&prange->svm_bo->list_lock);
81 list_del(&prange->svm_bo_list);
82 spin_unlock(&prange->svm_bo->list_lock);
85 list_del(&prange->list);
86 if (prange->it_node.start != 0 && prange->it_node.last != 0)
87 interval_tree_remove(&prange->it_node, &prange->svms->objects);
91 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
93 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
94 prange, prange->start, prange->last);
96 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
97 prange->start << PAGE_SHIFT,
98 prange->npages << PAGE_SHIFT,
103 * svm_range_add_to_svms - add svm range to svms
104 * @prange: svm range structure to be added
106 * Add the svm range to svms interval tree and link list
108 * Context: The caller must hold svms->lock
110 static void svm_range_add_to_svms(struct svm_range *prange)
112 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
113 prange, prange->start, prange->last);
115 list_move_tail(&prange->list, &prange->svms->list);
116 prange->it_node.start = prange->start;
117 prange->it_node.last = prange->last;
118 interval_tree_insert(&prange->it_node, &prange->svms->objects);
121 static void svm_range_remove_notifier(struct svm_range *prange)
123 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
124 prange->svms, prange,
125 prange->notifier.interval_tree.start >> PAGE_SHIFT,
126 prange->notifier.interval_tree.last >> PAGE_SHIFT);
128 if (prange->notifier.interval_tree.start != 0 &&
129 prange->notifier.interval_tree.last != 0)
130 mmu_interval_notifier_remove(&prange->notifier);
134 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
136 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
137 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
141 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
142 unsigned long offset, unsigned long npages,
143 unsigned long *hmm_pfns, uint32_t gpuidx)
145 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
146 dma_addr_t *addr = prange->dma_addr[gpuidx];
147 struct device *dev = adev->dev;
152 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
155 prange->dma_addr[gpuidx] = addr;
159 for (i = 0; i < npages; i++) {
160 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
161 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
163 page = hmm_pfn_to_page(hmm_pfns[i]);
164 if (is_zone_device_page(page)) {
165 struct amdgpu_device *bo_adev =
166 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
168 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
169 bo_adev->vm_manager.vram_base_offset -
170 bo_adev->kfd.dev->pgmap.range.start;
171 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
172 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
175 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
176 r = dma_mapping_error(dev, addr[i]);
178 dev_err(dev, "failed %d dma_map_page\n", r);
181 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
182 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
188 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
189 unsigned long offset, unsigned long npages,
190 unsigned long *hmm_pfns)
192 struct kfd_process *p;
196 p = container_of(prange->svms, struct kfd_process, svms);
198 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
199 struct kfd_process_device *pdd;
201 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
202 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
204 pr_debug("failed to find device idx %d\n", gpuidx);
208 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
217 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
218 unsigned long offset, unsigned long npages)
220 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
226 for (i = offset; i < offset + npages; i++) {
227 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
229 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
230 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
235 void svm_range_free_dma_mappings(struct svm_range *prange)
237 struct kfd_process_device *pdd;
238 dma_addr_t *dma_addr;
240 struct kfd_process *p;
243 p = container_of(prange->svms, struct kfd_process, svms);
245 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
246 dma_addr = prange->dma_addr[gpuidx];
250 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
252 pr_debug("failed to find device idx %d\n", gpuidx);
255 dev = &pdd->dev->pdev->dev;
256 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
258 prange->dma_addr[gpuidx] = NULL;
262 static void svm_range_free(struct svm_range *prange)
264 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
265 prange->start, prange->last);
267 svm_range_vram_node_free(prange);
268 svm_range_free_dma_mappings(prange);
269 mutex_destroy(&prange->lock);
270 mutex_destroy(&prange->migrate_mutex);
275 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
276 uint8_t *granularity, uint32_t *flags)
278 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
279 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
282 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
286 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
289 uint64_t size = last - start + 1;
290 struct svm_range *prange;
291 struct kfd_process *p;
293 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
296 prange->npages = size;
298 prange->start = start;
300 INIT_LIST_HEAD(&prange->list);
301 INIT_LIST_HEAD(&prange->update_list);
302 INIT_LIST_HEAD(&prange->svm_bo_list);
303 INIT_LIST_HEAD(&prange->deferred_list);
304 INIT_LIST_HEAD(&prange->child_list);
305 atomic_set(&prange->invalid, 0);
306 prange->validate_timestamp = 0;
307 mutex_init(&prange->migrate_mutex);
308 mutex_init(&prange->lock);
310 p = container_of(svms, struct kfd_process, svms);
311 if (p->xnack_enabled)
312 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
315 svm_range_set_default_attributes(&prange->preferred_loc,
316 &prange->prefetch_loc,
317 &prange->granularity, &prange->flags);
319 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
324 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
326 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
332 static void svm_range_bo_release(struct kref *kref)
334 struct svm_range_bo *svm_bo;
336 svm_bo = container_of(kref, struct svm_range_bo, kref);
337 pr_debug("svm_bo 0x%p\n", svm_bo);
339 spin_lock(&svm_bo->list_lock);
340 while (!list_empty(&svm_bo->range_list)) {
341 struct svm_range *prange =
342 list_first_entry(&svm_bo->range_list,
343 struct svm_range, svm_bo_list);
344 /* list_del_init tells a concurrent svm_range_vram_node_new when
345 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
347 list_del_init(&prange->svm_bo_list);
348 spin_unlock(&svm_bo->list_lock);
350 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
351 prange->start, prange->last);
352 mutex_lock(&prange->lock);
353 prange->svm_bo = NULL;
354 mutex_unlock(&prange->lock);
356 spin_lock(&svm_bo->list_lock);
358 spin_unlock(&svm_bo->list_lock);
359 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
360 /* We're not in the eviction worker.
361 * Signal the fence and synchronize with any
362 * pending eviction work.
364 dma_fence_signal(&svm_bo->eviction_fence->base);
365 cancel_work_sync(&svm_bo->eviction_work);
367 dma_fence_put(&svm_bo->eviction_fence->base);
368 amdgpu_bo_unref(&svm_bo->bo);
372 static void svm_range_bo_wq_release(struct work_struct *work)
374 struct svm_range_bo *svm_bo;
376 svm_bo = container_of(work, struct svm_range_bo, release_work);
377 svm_range_bo_release(&svm_bo->kref);
380 static void svm_range_bo_release_async(struct kref *kref)
382 struct svm_range_bo *svm_bo;
384 svm_bo = container_of(kref, struct svm_range_bo, kref);
385 pr_debug("svm_bo 0x%p\n", svm_bo);
386 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
387 schedule_work(&svm_bo->release_work);
390 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
392 kref_put(&svm_bo->kref, svm_range_bo_release_async);
395 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
398 kref_put(&svm_bo->kref, svm_range_bo_release);
402 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
404 struct amdgpu_device *bo_adev;
406 mutex_lock(&prange->lock);
407 if (!prange->svm_bo) {
408 mutex_unlock(&prange->lock);
411 if (prange->ttm_res) {
412 /* We still have a reference, all is well */
413 mutex_unlock(&prange->lock);
416 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
418 * Migrate from GPU to GPU, remove range from source bo_adev
419 * svm_bo range list, and return false to allocate svm_bo from
422 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
423 if (bo_adev != adev) {
424 mutex_unlock(&prange->lock);
426 spin_lock(&prange->svm_bo->list_lock);
427 list_del_init(&prange->svm_bo_list);
428 spin_unlock(&prange->svm_bo->list_lock);
430 svm_range_bo_unref(prange->svm_bo);
433 if (READ_ONCE(prange->svm_bo->evicting)) {
435 struct svm_range_bo *svm_bo;
436 /* The BO is getting evicted,
437 * we need to get a new one
439 mutex_unlock(&prange->lock);
440 svm_bo = prange->svm_bo;
441 f = dma_fence_get(&svm_bo->eviction_fence->base);
442 svm_range_bo_unref(prange->svm_bo);
443 /* wait for the fence to avoid long spin-loop
444 * at list_empty_careful
446 dma_fence_wait(f, false);
449 /* The BO was still around and we got
450 * a new reference to it
452 mutex_unlock(&prange->lock);
453 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
454 prange->svms, prange->start, prange->last);
456 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
461 mutex_unlock(&prange->lock);
464 /* We need a new svm_bo. Spin-loop to wait for concurrent
465 * svm_range_bo_release to finish removing this range from
466 * its range list. After this, it is safe to reuse the
467 * svm_bo pointer and svm_bo_list head.
469 while (!list_empty_careful(&prange->svm_bo_list))
475 static struct svm_range_bo *svm_range_bo_new(void)
477 struct svm_range_bo *svm_bo;
479 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
483 kref_init(&svm_bo->kref);
484 INIT_LIST_HEAD(&svm_bo->range_list);
485 spin_lock_init(&svm_bo->list_lock);
491 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
494 struct amdgpu_bo_param bp;
495 struct svm_range_bo *svm_bo;
496 struct amdgpu_bo_user *ubo;
497 struct amdgpu_bo *bo;
498 struct kfd_process *p;
499 struct mm_struct *mm;
502 p = container_of(prange->svms, struct kfd_process, svms);
503 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
504 prange->start, prange->last);
506 if (svm_range_validate_svm_bo(adev, prange))
509 svm_bo = svm_range_bo_new();
511 pr_debug("failed to alloc svm bo\n");
514 mm = get_task_mm(p->lead_thread);
516 pr_debug("failed to get mm\n");
520 svm_bo->svms = prange->svms;
521 svm_bo->eviction_fence =
522 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
526 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
527 svm_bo->evicting = 0;
528 memset(&bp, 0, sizeof(bp));
529 bp.size = prange->npages * PAGE_SIZE;
530 bp.byte_align = PAGE_SIZE;
531 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
532 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
533 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
534 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
535 bp.type = ttm_bo_type_device;
538 r = amdgpu_bo_create_user(adev, &bp, &ubo);
540 pr_debug("failed %d to create bo\n", r);
541 goto create_bo_failed;
544 r = amdgpu_bo_reserve(bo, true);
546 pr_debug("failed %d to reserve bo\n", r);
547 goto reserve_bo_failed;
550 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
552 pr_debug("failed %d to reserve bo\n", r);
553 amdgpu_bo_unreserve(bo);
554 goto reserve_bo_failed;
556 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
558 amdgpu_bo_unreserve(bo);
561 prange->svm_bo = svm_bo;
562 prange->ttm_res = bo->tbo.resource;
565 spin_lock(&svm_bo->list_lock);
566 list_add(&prange->svm_bo_list, &svm_bo->range_list);
567 spin_unlock(&svm_bo->list_lock);
572 amdgpu_bo_unref(&bo);
574 dma_fence_put(&svm_bo->eviction_fence->base);
576 prange->ttm_res = NULL;
581 void svm_range_vram_node_free(struct svm_range *prange)
583 svm_range_bo_unref(prange->svm_bo);
584 prange->ttm_res = NULL;
587 struct amdgpu_device *
588 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
590 struct kfd_process_device *pdd;
591 struct kfd_process *p;
594 p = container_of(prange->svms, struct kfd_process, svms);
596 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
598 pr_debug("failed to get device by id 0x%x\n", gpu_id);
601 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
603 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
607 return pdd->dev->adev;
610 struct kfd_process_device *
611 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
613 struct kfd_process *p;
614 int32_t gpu_idx, gpuid;
617 p = container_of(prange->svms, struct kfd_process, svms);
619 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
621 pr_debug("failed to get device id by adev %p\n", adev);
625 return kfd_process_device_from_gpuidx(p, gpu_idx);
628 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
630 struct ttm_operation_ctx ctx = { false, false };
632 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
634 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
638 svm_range_check_attr(struct kfd_process *p,
639 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
643 for (i = 0; i < nattr; i++) {
644 uint32_t val = attrs[i].value;
645 int gpuidx = MAX_GPU_INSTANCE;
647 switch (attrs[i].type) {
648 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
649 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
650 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
651 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
653 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
654 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
655 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
657 case KFD_IOCTL_SVM_ATTR_ACCESS:
658 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
659 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
660 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
662 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
664 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
666 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
669 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
674 pr_debug("no GPU 0x%x found\n", val);
676 } else if (gpuidx < MAX_GPU_INSTANCE &&
677 !test_bit(gpuidx, p->svms.bitmap_supported)) {
678 pr_debug("GPU 0x%x not supported\n", val);
687 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
688 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
689 bool *update_mapping)
694 for (i = 0; i < nattr; i++) {
695 switch (attrs[i].type) {
696 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
697 prange->preferred_loc = attrs[i].value;
699 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
700 prange->prefetch_loc = attrs[i].value;
702 case KFD_IOCTL_SVM_ATTR_ACCESS:
703 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
704 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
705 *update_mapping = true;
706 gpuidx = kfd_process_gpuidx_from_gpuid(p,
708 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
709 bitmap_clear(prange->bitmap_access, gpuidx, 1);
710 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
711 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
712 bitmap_set(prange->bitmap_access, gpuidx, 1);
713 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
715 bitmap_clear(prange->bitmap_access, gpuidx, 1);
716 bitmap_set(prange->bitmap_aip, gpuidx, 1);
719 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
720 *update_mapping = true;
721 prange->flags |= attrs[i].value;
723 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
724 *update_mapping = true;
725 prange->flags &= ~attrs[i].value;
727 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
728 prange->granularity = attrs[i].value;
731 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
737 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
738 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
743 for (i = 0; i < nattr; i++) {
744 switch (attrs[i].type) {
745 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
746 if (prange->preferred_loc != attrs[i].value)
749 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
750 /* Prefetch should always trigger a migration even
751 * if the value of the attribute didn't change.
754 case KFD_IOCTL_SVM_ATTR_ACCESS:
755 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
756 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
757 gpuidx = kfd_process_gpuidx_from_gpuid(p,
759 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
760 if (test_bit(gpuidx, prange->bitmap_access) ||
761 test_bit(gpuidx, prange->bitmap_aip))
763 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
764 if (!test_bit(gpuidx, prange->bitmap_access))
767 if (!test_bit(gpuidx, prange->bitmap_aip))
771 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
772 if ((prange->flags & attrs[i].value) != attrs[i].value)
775 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
776 if ((prange->flags & attrs[i].value) != 0)
779 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
780 if (prange->granularity != attrs[i].value)
784 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
792 * svm_range_debug_dump - print all range information from svms
793 * @svms: svm range list header
795 * debug output svm range start, end, prefetch location from svms
796 * interval tree and link list
798 * Context: The caller must hold svms->lock
800 static void svm_range_debug_dump(struct svm_range_list *svms)
802 struct interval_tree_node *node;
803 struct svm_range *prange;
805 pr_debug("dump svms 0x%p list\n", svms);
806 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
808 list_for_each_entry(prange, &svms->list, list) {
809 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
810 prange, prange->start, prange->npages,
811 prange->start + prange->npages - 1,
815 pr_debug("dump svms 0x%p interval tree\n", svms);
816 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
817 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
819 prange = container_of(node, struct svm_range, it_node);
820 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
821 prange, prange->start, prange->npages,
822 prange->start + prange->npages - 1,
824 node = interval_tree_iter_next(node, 0, ~0ULL);
829 svm_range_split_array(void *ppnew, void *ppold, size_t size,
830 uint64_t old_start, uint64_t old_n,
831 uint64_t new_start, uint64_t new_n)
833 unsigned char *new, *old, *pold;
838 pold = *(unsigned char **)ppold;
842 new = kvmalloc_array(new_n, size, GFP_KERNEL);
846 d = (new_start - old_start) * size;
847 memcpy(new, pold + d, new_n * size);
849 old = kvmalloc_array(old_n, size, GFP_KERNEL);
855 d = (new_start == old_start) ? new_n * size : 0;
856 memcpy(old, pold + d, old_n * size);
859 *(void **)ppold = old;
860 *(void **)ppnew = new;
866 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
867 uint64_t start, uint64_t last)
869 uint64_t npages = last - start + 1;
872 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
873 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
874 sizeof(*old->dma_addr[i]), old->start,
875 npages, new->start, new->npages);
884 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
885 uint64_t start, uint64_t last)
887 uint64_t npages = last - start + 1;
889 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
890 new->svms, new, new->start, start, last);
892 if (new->start == old->start) {
893 new->offset = old->offset;
894 old->offset += new->npages;
896 new->offset = old->offset + npages;
899 new->svm_bo = svm_range_bo_ref(old->svm_bo);
900 new->ttm_res = old->ttm_res;
902 spin_lock(&new->svm_bo->list_lock);
903 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
904 spin_unlock(&new->svm_bo->list_lock);
910 * svm_range_split_adjust - split range and adjust
913 * @old: the old range
914 * @start: the old range adjust to start address in pages
915 * @last: the old range adjust to last address in pages
917 * Copy system memory dma_addr or vram ttm_res in old range to new
918 * range from new_start up to size new->npages, the remaining old range is from
922 * 0 - OK, -ENOMEM - out of memory
925 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
926 uint64_t start, uint64_t last)
930 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
931 new->svms, new->start, old->start, old->last, start, last);
933 if (new->start < old->start ||
934 new->last > old->last) {
935 WARN_ONCE(1, "invalid new range start or last\n");
939 r = svm_range_split_pages(new, old, start, last);
943 if (old->actual_loc && old->ttm_res) {
944 r = svm_range_split_nodes(new, old, start, last);
949 old->npages = last - start + 1;
952 new->flags = old->flags;
953 new->preferred_loc = old->preferred_loc;
954 new->prefetch_loc = old->prefetch_loc;
955 new->actual_loc = old->actual_loc;
956 new->granularity = old->granularity;
957 new->mapped_to_gpu = old->mapped_to_gpu;
958 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
959 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
965 * svm_range_split - split a range in 2 ranges
967 * @prange: the svm range to split
968 * @start: the remaining range start address in pages
969 * @last: the remaining range last address in pages
970 * @new: the result new range generated
973 * case 1: if start == prange->start
974 * prange ==> prange[start, last]
975 * new range [last + 1, prange->last]
977 * case 2: if last == prange->last
978 * prange ==> prange[start, last]
979 * new range [prange->start, start - 1]
982 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
985 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
986 struct svm_range **new)
988 uint64_t old_start = prange->start;
989 uint64_t old_last = prange->last;
990 struct svm_range_list *svms;
993 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
994 old_start, old_last, start, last);
996 if (old_start != start && old_last != last)
998 if (start < old_start || last > old_last)
1001 svms = prange->svms;
1002 if (old_start == start)
1003 *new = svm_range_new(svms, last + 1, old_last);
1005 *new = svm_range_new(svms, old_start, start - 1);
1009 r = svm_range_split_adjust(*new, prange, start, last);
1011 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1012 r, old_start, old_last, start, last);
1013 svm_range_free(*new);
1021 svm_range_split_tail(struct svm_range *prange,
1022 uint64_t new_last, struct list_head *insert_list)
1024 struct svm_range *tail;
1025 int r = svm_range_split(prange, prange->start, new_last, &tail);
1028 list_add(&tail->list, insert_list);
1033 svm_range_split_head(struct svm_range *prange,
1034 uint64_t new_start, struct list_head *insert_list)
1036 struct svm_range *head;
1037 int r = svm_range_split(prange, new_start, prange->last, &head);
1040 list_add(&head->list, insert_list);
1045 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1046 struct svm_range *pchild, enum svm_work_list_ops op)
1048 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1049 pchild, pchild->start, pchild->last, prange, op);
1051 pchild->work_item.mm = mm;
1052 pchild->work_item.op = op;
1053 list_add_tail(&pchild->child_list, &prange->child_list);
1057 * svm_range_split_by_granularity - collect ranges within granularity boundary
1059 * @p: the process with svms list
1061 * @addr: the vm fault address in pages, to split the prange
1062 * @parent: parent range if prange is from child list
1063 * @prange: prange to split
1065 * Trims @prange to be a single aligned block of prange->granularity if
1066 * possible. The head and tail are added to the child_list in @parent.
1068 * Context: caller must hold mmap_read_lock and prange->lock
1071 * 0 - OK, otherwise error code
1074 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1075 unsigned long addr, struct svm_range *parent,
1076 struct svm_range *prange)
1078 struct svm_range *head, *tail;
1079 unsigned long start, last, size;
1082 /* Align splited range start and size to granularity size, then a single
1083 * PTE will be used for whole range, this reduces the number of PTE
1084 * updated and the L1 TLB space used for translation.
1086 size = 1UL << prange->granularity;
1087 start = ALIGN_DOWN(addr, size);
1088 last = ALIGN(addr + 1, size) - 1;
1090 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1091 prange->svms, prange->start, prange->last, start, last, size);
1093 if (start > prange->start) {
1094 r = svm_range_split(prange, start, prange->last, &head);
1097 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1100 if (last < prange->last) {
1101 r = svm_range_split(prange, prange->start, last, &tail);
1104 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1107 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1108 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1109 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1110 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1111 prange, prange->start, prange->last,
1112 SVM_OP_ADD_RANGE_AND_MAP);
1118 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1121 struct amdgpu_device *bo_adev;
1122 uint32_t flags = prange->flags;
1123 uint32_t mapping_flags = 0;
1125 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1126 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1128 if (domain == SVM_RANGE_VRAM_DOMAIN)
1129 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1131 switch (KFD_GC_VERSION(adev->kfd.dev)) {
1132 case IP_VERSION(9, 4, 1):
1133 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1134 if (bo_adev == adev) {
1135 mapping_flags |= coherent ?
1136 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1138 mapping_flags |= coherent ?
1139 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1140 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1144 mapping_flags |= coherent ?
1145 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1148 case IP_VERSION(9, 4, 2):
1149 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1150 if (bo_adev == adev) {
1151 mapping_flags |= coherent ?
1152 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1153 if (adev->gmc.xgmi.connected_to_cpu)
1156 mapping_flags |= coherent ?
1157 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1158 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1162 mapping_flags |= coherent ?
1163 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1167 mapping_flags |= coherent ?
1168 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1171 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1173 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1174 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1175 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1176 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1178 pte_flags = AMDGPU_PTE_VALID;
1179 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1180 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1182 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1187 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1188 uint64_t start, uint64_t last,
1189 struct dma_fence **fence)
1191 uint64_t init_pte_value = 0;
1193 pr_debug("[0x%llx 0x%llx]\n", start, last);
1195 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1196 last, init_pte_value, 0, 0, NULL, NULL,
1201 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1204 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1205 struct kfd_process_device *pdd;
1206 struct dma_fence *fence = NULL;
1207 struct kfd_process *p;
1211 if (!prange->mapped_to_gpu) {
1212 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1213 prange, prange->start, prange->last);
1217 if (prange->start == start && prange->last == last) {
1218 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1219 prange->mapped_to_gpu = false;
1222 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1224 p = container_of(prange->svms, struct kfd_process, svms);
1226 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1227 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1228 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1230 pr_debug("failed to find device idx %d\n", gpuidx);
1234 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1235 drm_priv_to_vm(pdd->drm_priv),
1236 start, last, &fence);
1241 r = dma_fence_wait(fence, false);
1242 dma_fence_put(fence);
1247 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1254 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1255 unsigned long offset, unsigned long npages, bool readonly,
1256 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1257 struct dma_fence **fence, bool flush_tlb)
1259 struct amdgpu_device *adev = pdd->dev->adev;
1260 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1262 unsigned long last_start;
1267 last_start = prange->start + offset;
1269 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1270 last_start, last_start + npages - 1, readonly);
1272 for (i = offset; i < offset + npages; i++) {
1273 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1274 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1276 /* Collect all pages in the same address range and memory domain
1277 * that can be mapped with a single call to update mapping.
1279 if (i < offset + npages - 1 &&
1280 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1283 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1284 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1286 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1288 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1290 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1291 prange->svms, last_start, prange->start + i,
1292 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1295 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1296 last_start, prange->start + i,
1298 last_start - prange->start,
1299 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1300 NULL, dma_addr, &vm->last_update);
1302 for (j = last_start - prange->start; j <= i; j++)
1303 dma_addr[j] |= last_domain;
1306 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1309 last_start = prange->start + i + 1;
1312 r = amdgpu_vm_update_pdes(adev, vm, false);
1314 pr_debug("failed %d to update directories 0x%lx\n", r,
1320 *fence = dma_fence_get(vm->last_update);
1327 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1328 unsigned long npages, bool readonly,
1329 unsigned long *bitmap, bool wait, bool flush_tlb)
1331 struct kfd_process_device *pdd;
1332 struct amdgpu_device *bo_adev;
1333 struct kfd_process *p;
1334 struct dma_fence *fence = NULL;
1338 if (prange->svm_bo && prange->ttm_res)
1339 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1343 p = container_of(prange->svms, struct kfd_process, svms);
1344 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1345 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1346 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1348 pr_debug("failed to find device idx %d\n", gpuidx);
1352 pdd = kfd_bind_process_to_device(pdd->dev, p);
1356 if (bo_adev && pdd->dev->adev != bo_adev &&
1357 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1358 pr_debug("cannot map to device idx %d\n", gpuidx);
1362 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1363 prange->dma_addr[gpuidx],
1364 bo_adev, wait ? &fence : NULL,
1370 r = dma_fence_wait(fence, false);
1371 dma_fence_put(fence);
1374 pr_debug("failed %d to dma fence wait\n", r);
1379 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1385 struct svm_validate_context {
1386 struct kfd_process *process;
1387 struct svm_range *prange;
1389 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1390 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1391 struct list_head validate_list;
1392 struct ww_acquire_ctx ticket;
1395 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1397 struct kfd_process_device *pdd;
1398 struct amdgpu_vm *vm;
1402 INIT_LIST_HEAD(&ctx->validate_list);
1403 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1404 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1406 pr_debug("failed to find device idx %d\n", gpuidx);
1409 vm = drm_priv_to_vm(pdd->drm_priv);
1411 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1412 ctx->tv[gpuidx].num_shared = 4;
1413 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1416 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1419 pr_debug("failed %d to reserve bo\n", r);
1423 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1424 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1426 pr_debug("failed to find device idx %d\n", gpuidx);
1431 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1432 drm_priv_to_vm(pdd->drm_priv),
1433 svm_range_bo_validate, NULL);
1435 pr_debug("failed %d validate pt bos\n", r);
1443 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1447 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1449 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1452 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1454 struct kfd_process_device *pdd;
1456 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1458 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1462 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1464 * To prevent concurrent destruction or change of range attributes, the
1465 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1466 * because that would block concurrent evictions and lead to deadlocks. To
1467 * serialize concurrent migrations or validations of the same range, the
1468 * prange->migrate_mutex must be held.
1470 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1473 * The following sequence ensures race-free validation and GPU mapping:
1475 * 1. Reserve page table (and SVM BO if range is in VRAM)
1476 * 2. hmm_range_fault to get page addresses (if system memory)
1477 * 3. DMA-map pages (if system memory)
1478 * 4-a. Take notifier lock
1479 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1480 * 4-c. Check that the range was not split or otherwise invalidated
1481 * 4-d. Update GPU page table
1482 * 4.e. Release notifier lock
1483 * 5. Release page table (and SVM BO) reservation
1485 static int svm_range_validate_and_map(struct mm_struct *mm,
1486 struct svm_range *prange, int32_t gpuidx,
1487 bool intr, bool wait, bool flush_tlb)
1489 struct svm_validate_context ctx;
1490 unsigned long start, end, addr;
1491 struct kfd_process *p;
1496 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1497 ctx.prange = prange;
1500 if (gpuidx < MAX_GPU_INSTANCE) {
1501 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1502 bitmap_set(ctx.bitmap, gpuidx, 1);
1503 } else if (ctx.process->xnack_enabled) {
1504 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1506 /* If prefetch range to GPU, or GPU retry fault migrate range to
1507 * GPU, which has ACCESS attribute to the range, create mapping
1510 if (prange->actual_loc) {
1511 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1512 prange->actual_loc);
1514 WARN_ONCE(1, "failed get device by id 0x%x\n",
1515 prange->actual_loc);
1518 if (test_bit(gpuidx, prange->bitmap_access))
1519 bitmap_set(ctx.bitmap, gpuidx, 1);
1522 bitmap_or(ctx.bitmap, prange->bitmap_access,
1523 prange->bitmap_aip, MAX_GPU_INSTANCE);
1526 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
1527 if (!prange->mapped_to_gpu)
1530 bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1533 if (prange->actual_loc && !prange->ttm_res) {
1534 /* This should never happen. actual_loc gets set by
1535 * svm_migrate_ram_to_vram after allocating a BO.
1537 WARN_ONCE(1, "VRAM BO missing during validation\n");
1541 svm_range_reserve_bos(&ctx);
1543 p = container_of(prange->svms, struct kfd_process, svms);
1544 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1546 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1547 if (kfd_svm_page_owner(p, idx) != owner) {
1553 start = prange->start << PAGE_SHIFT;
1554 end = (prange->last + 1) << PAGE_SHIFT;
1555 for (addr = start; addr < end && !r; ) {
1556 struct hmm_range *hmm_range;
1557 struct vm_area_struct *vma;
1559 unsigned long offset;
1560 unsigned long npages;
1563 vma = find_vma(mm, addr);
1564 if (!vma || addr < vma->vm_start) {
1568 readonly = !(vma->vm_flags & VM_WRITE);
1570 next = min(vma->vm_end, end);
1571 npages = (next - addr) >> PAGE_SHIFT;
1572 WRITE_ONCE(p->svms.faulting_task, current);
1573 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1574 addr, npages, &hmm_range,
1575 readonly, true, owner);
1576 WRITE_ONCE(p->svms.faulting_task, NULL);
1578 pr_debug("failed %d to get svm range pages\n", r);
1582 offset = (addr - start) >> PAGE_SHIFT;
1583 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1584 hmm_range->hmm_pfns);
1586 pr_debug("failed %d to dma map range\n", r);
1590 svm_range_lock(prange);
1591 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1592 pr_debug("hmm update the range, need validate again\n");
1596 if (!list_empty(&prange->child_list)) {
1597 pr_debug("range split by unmap in parallel, validate again\n");
1602 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1603 ctx.bitmap, wait, flush_tlb);
1606 svm_range_unlock(prange);
1612 prange->validated_once = true;
1613 prange->mapped_to_gpu = true;
1617 svm_range_unreserve_bos(&ctx);
1620 prange->validate_timestamp = ktime_to_us(ktime_get());
1626 * svm_range_list_lock_and_flush_work - flush pending deferred work
1628 * @svms: the svm range list
1629 * @mm: the mm structure
1631 * Context: Returns with mmap write lock held, pending deferred work flushed
1635 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1636 struct mm_struct *mm)
1639 flush_work(&svms->deferred_list_work);
1640 mmap_write_lock(mm);
1642 if (list_empty(&svms->deferred_range_list))
1644 mmap_write_unlock(mm);
1645 pr_debug("retry flush\n");
1646 goto retry_flush_work;
1649 static void svm_range_restore_work(struct work_struct *work)
1651 struct delayed_work *dwork = to_delayed_work(work);
1652 struct amdkfd_process_info *process_info;
1653 struct svm_range_list *svms;
1654 struct svm_range *prange;
1655 struct kfd_process *p;
1656 struct mm_struct *mm;
1661 svms = container_of(dwork, struct svm_range_list, restore_work);
1662 evicted_ranges = atomic_read(&svms->evicted_ranges);
1663 if (!evicted_ranges)
1666 pr_debug("restore svm ranges\n");
1668 p = container_of(svms, struct kfd_process, svms);
1669 process_info = p->kgd_process_info;
1671 /* Keep mm reference when svm_range_validate_and_map ranges */
1672 mm = get_task_mm(p->lead_thread);
1674 pr_debug("svms 0x%p process mm gone\n", svms);
1678 mutex_lock(&process_info->lock);
1679 svm_range_list_lock_and_flush_work(svms, mm);
1680 mutex_lock(&svms->lock);
1682 evicted_ranges = atomic_read(&svms->evicted_ranges);
1684 list_for_each_entry(prange, &svms->list, list) {
1685 invalid = atomic_read(&prange->invalid);
1689 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1690 prange->svms, prange, prange->start, prange->last,
1694 * If range is migrating, wait for migration is done.
1696 mutex_lock(&prange->migrate_mutex);
1698 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1699 false, true, false);
1701 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1704 mutex_unlock(&prange->migrate_mutex);
1706 goto out_reschedule;
1708 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1709 goto out_reschedule;
1712 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1714 goto out_reschedule;
1718 r = kgd2kfd_resume_mm(mm);
1720 /* No recovery from this failure. Probably the CP is
1721 * hanging. No point trying again.
1723 pr_debug("failed %d to resume KFD\n", r);
1726 pr_debug("restore svm ranges successfully\n");
1729 mutex_unlock(&svms->lock);
1730 mmap_write_unlock(mm);
1731 mutex_unlock(&process_info->lock);
1734 /* If validation failed, reschedule another attempt */
1735 if (evicted_ranges) {
1736 pr_debug("reschedule to restore svm range\n");
1737 schedule_delayed_work(&svms->restore_work,
1738 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1743 * svm_range_evict - evict svm range
1744 * @prange: svm range structure
1745 * @mm: current process mm_struct
1746 * @start: starting process queue number
1747 * @last: last process queue number
1749 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1750 * return to let CPU evict the buffer and proceed CPU pagetable update.
1752 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1753 * If invalidation happens while restore work is running, restore work will
1754 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1758 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1759 unsigned long start, unsigned long last)
1761 struct svm_range_list *svms = prange->svms;
1762 struct svm_range *pchild;
1763 struct kfd_process *p;
1766 p = container_of(svms, struct kfd_process, svms);
1768 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1769 svms, prange->start, prange->last, start, last);
1771 if (!p->xnack_enabled) {
1774 list_for_each_entry(pchild, &prange->child_list, child_list) {
1775 mutex_lock_nested(&pchild->lock, 1);
1776 if (pchild->start <= last && pchild->last >= start) {
1777 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1778 pchild->start, pchild->last);
1779 atomic_inc(&pchild->invalid);
1781 mutex_unlock(&pchild->lock);
1784 if (prange->start <= last && prange->last >= start)
1785 atomic_inc(&prange->invalid);
1787 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1788 if (evicted_ranges != 1)
1791 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1792 prange->svms, prange->start, prange->last);
1794 /* First eviction, stop the queues */
1795 r = kgd2kfd_quiesce_mm(mm);
1797 pr_debug("failed to quiesce KFD\n");
1799 pr_debug("schedule to restore svm %p ranges\n", svms);
1800 schedule_delayed_work(&svms->restore_work,
1801 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1805 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1806 prange->svms, start, last);
1807 list_for_each_entry(pchild, &prange->child_list, child_list) {
1808 mutex_lock_nested(&pchild->lock, 1);
1809 s = max(start, pchild->start);
1810 l = min(last, pchild->last);
1812 svm_range_unmap_from_gpus(pchild, s, l);
1813 mutex_unlock(&pchild->lock);
1815 s = max(start, prange->start);
1816 l = min(last, prange->last);
1818 svm_range_unmap_from_gpus(prange, s, l);
1824 static struct svm_range *svm_range_clone(struct svm_range *old)
1826 struct svm_range *new;
1828 new = svm_range_new(old->svms, old->start, old->last);
1833 new->ttm_res = old->ttm_res;
1834 new->offset = old->offset;
1835 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1836 spin_lock(&new->svm_bo->list_lock);
1837 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1838 spin_unlock(&new->svm_bo->list_lock);
1840 new->flags = old->flags;
1841 new->preferred_loc = old->preferred_loc;
1842 new->prefetch_loc = old->prefetch_loc;
1843 new->actual_loc = old->actual_loc;
1844 new->granularity = old->granularity;
1845 new->mapped_to_gpu = old->mapped_to_gpu;
1846 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1847 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1853 * svm_range_add - add svm range and handle overlap
1854 * @p: the range add to this process svms
1855 * @start: page size aligned
1856 * @size: page size aligned
1857 * @nattr: number of attributes
1858 * @attrs: array of attributes
1859 * @update_list: output, the ranges need validate and update GPU mapping
1860 * @insert_list: output, the ranges need insert to svms
1861 * @remove_list: output, the ranges are replaced and need remove from svms
1863 * Check if the virtual address range has overlap with any existing ranges,
1864 * split partly overlapping ranges and add new ranges in the gaps. All changes
1865 * should be applied to the range_list and interval tree transactionally. If
1866 * any range split or allocation fails, the entire update fails. Therefore any
1867 * existing overlapping svm_ranges are cloned and the original svm_ranges left
1870 * If the transaction succeeds, the caller can update and insert clones and
1871 * new ranges, then free the originals.
1873 * Otherwise the caller can free the clones and new ranges, while the old
1874 * svm_ranges remain unchanged.
1876 * Context: Process context, caller must hold svms->lock
1879 * 0 - OK, otherwise error code
1882 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1883 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1884 struct list_head *update_list, struct list_head *insert_list,
1885 struct list_head *remove_list)
1887 unsigned long last = start + size - 1UL;
1888 struct svm_range_list *svms = &p->svms;
1889 struct interval_tree_node *node;
1890 struct svm_range *prange;
1891 struct svm_range *tmp;
1894 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1896 INIT_LIST_HEAD(update_list);
1897 INIT_LIST_HEAD(insert_list);
1898 INIT_LIST_HEAD(remove_list);
1900 node = interval_tree_iter_first(&svms->objects, start, last);
1902 struct interval_tree_node *next;
1903 unsigned long next_start;
1905 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1908 prange = container_of(node, struct svm_range, it_node);
1909 next = interval_tree_iter_next(node, start, last);
1910 next_start = min(node->last, last) + 1;
1912 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
1914 } else if (node->start < start || node->last > last) {
1915 /* node intersects the update range and its attributes
1916 * will change. Clone and split it, apply updates only
1917 * to the overlapping part
1919 struct svm_range *old = prange;
1921 prange = svm_range_clone(old);
1927 list_add(&old->update_list, remove_list);
1928 list_add(&prange->list, insert_list);
1929 list_add(&prange->update_list, update_list);
1931 if (node->start < start) {
1932 pr_debug("change old range start\n");
1933 r = svm_range_split_head(prange, start,
1938 if (node->last > last) {
1939 pr_debug("change old range last\n");
1940 r = svm_range_split_tail(prange, last,
1946 /* The node is contained within start..last,
1949 list_add(&prange->update_list, update_list);
1952 /* insert a new node if needed */
1953 if (node->start > start) {
1954 prange = svm_range_new(svms, start, node->start - 1);
1960 list_add(&prange->list, insert_list);
1961 list_add(&prange->update_list, update_list);
1968 /* add a final range at the end if needed */
1969 if (start <= last) {
1970 prange = svm_range_new(svms, start, last);
1975 list_add(&prange->list, insert_list);
1976 list_add(&prange->update_list, update_list);
1981 list_for_each_entry_safe(prange, tmp, insert_list, list)
1982 svm_range_free(prange);
1988 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1989 struct svm_range *prange)
1991 unsigned long start;
1994 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1995 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1997 if (prange->start == start && prange->last == last)
2000 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2001 prange->svms, prange, start, last, prange->start,
2004 if (start != 0 && last != 0) {
2005 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2006 svm_range_remove_notifier(prange);
2008 prange->it_node.start = prange->start;
2009 prange->it_node.last = prange->last;
2011 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2012 svm_range_add_notifier_locked(mm, prange);
2016 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2017 struct mm_struct *mm)
2019 switch (prange->work_item.op) {
2021 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2022 svms, prange, prange->start, prange->last);
2024 case SVM_OP_UNMAP_RANGE:
2025 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2026 svms, prange, prange->start, prange->last);
2027 svm_range_unlink(prange);
2028 svm_range_remove_notifier(prange);
2029 svm_range_free(prange);
2031 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2032 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2033 svms, prange, prange->start, prange->last);
2034 svm_range_update_notifier_and_interval_tree(mm, prange);
2036 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2037 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2038 svms, prange, prange->start, prange->last);
2039 svm_range_update_notifier_and_interval_tree(mm, prange);
2040 /* TODO: implement deferred validation and mapping */
2042 case SVM_OP_ADD_RANGE:
2043 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2044 prange->start, prange->last);
2045 svm_range_add_to_svms(prange);
2046 svm_range_add_notifier_locked(mm, prange);
2048 case SVM_OP_ADD_RANGE_AND_MAP:
2049 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2050 prange, prange->start, prange->last);
2051 svm_range_add_to_svms(prange);
2052 svm_range_add_notifier_locked(mm, prange);
2053 /* TODO: implement deferred validation and mapping */
2056 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2057 prange->work_item.op);
2061 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2063 struct kfd_process_device *pdd;
2064 struct kfd_process *p;
2068 p = container_of(svms, struct kfd_process, svms);
2071 drain = atomic_read(&svms->drain_pagefaults);
2075 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2080 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2082 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2083 &pdd->dev->adev->irq.ih1);
2084 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2086 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2090 static void svm_range_deferred_list_work(struct work_struct *work)
2092 struct svm_range_list *svms;
2093 struct svm_range *prange;
2094 struct mm_struct *mm;
2096 svms = container_of(work, struct svm_range_list, deferred_list_work);
2097 pr_debug("enter svms 0x%p\n", svms);
2099 spin_lock(&svms->deferred_list_lock);
2100 while (!list_empty(&svms->deferred_range_list)) {
2101 prange = list_first_entry(&svms->deferred_range_list,
2102 struct svm_range, deferred_list);
2103 spin_unlock(&svms->deferred_list_lock);
2105 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2106 prange->start, prange->last, prange->work_item.op);
2108 mm = prange->work_item.mm;
2110 mmap_write_lock(mm);
2112 /* Checking for the need to drain retry faults must be inside
2113 * mmap write lock to serialize with munmap notifiers.
2115 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2116 mmap_write_unlock(mm);
2117 svm_range_drain_retry_fault(svms);
2121 /* Remove from deferred_list must be inside mmap write lock, for
2123 * 1. unmap_from_cpu may change work_item.op and add the range
2124 * to deferred_list again, cause use after free bug.
2125 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2126 * lock and continue because deferred_list is empty, but
2127 * deferred_list work is actually waiting for mmap lock.
2129 spin_lock(&svms->deferred_list_lock);
2130 list_del_init(&prange->deferred_list);
2131 spin_unlock(&svms->deferred_list_lock);
2133 mutex_lock(&svms->lock);
2134 mutex_lock(&prange->migrate_mutex);
2135 while (!list_empty(&prange->child_list)) {
2136 struct svm_range *pchild;
2138 pchild = list_first_entry(&prange->child_list,
2139 struct svm_range, child_list);
2140 pr_debug("child prange 0x%p op %d\n", pchild,
2141 pchild->work_item.op);
2142 list_del_init(&pchild->child_list);
2143 svm_range_handle_list_op(svms, pchild, mm);
2145 mutex_unlock(&prange->migrate_mutex);
2147 svm_range_handle_list_op(svms, prange, mm);
2148 mutex_unlock(&svms->lock);
2149 mmap_write_unlock(mm);
2151 /* Pairs with mmget in svm_range_add_list_work */
2154 spin_lock(&svms->deferred_list_lock);
2156 spin_unlock(&svms->deferred_list_lock);
2157 pr_debug("exit svms 0x%p\n", svms);
2161 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2162 struct mm_struct *mm, enum svm_work_list_ops op)
2164 spin_lock(&svms->deferred_list_lock);
2165 /* if prange is on the deferred list */
2166 if (!list_empty(&prange->deferred_list)) {
2167 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2168 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2169 if (op != SVM_OP_NULL &&
2170 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2171 prange->work_item.op = op;
2173 prange->work_item.op = op;
2175 /* Pairs with mmput in deferred_list_work */
2177 prange->work_item.mm = mm;
2178 list_add_tail(&prange->deferred_list,
2179 &prange->svms->deferred_range_list);
2180 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2181 prange, prange->start, prange->last, op);
2183 spin_unlock(&svms->deferred_list_lock);
2186 void schedule_deferred_list_work(struct svm_range_list *svms)
2188 spin_lock(&svms->deferred_list_lock);
2189 if (!list_empty(&svms->deferred_range_list))
2190 schedule_work(&svms->deferred_list_work);
2191 spin_unlock(&svms->deferred_list_lock);
2195 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2196 struct svm_range *prange, unsigned long start,
2199 struct svm_range *head;
2200 struct svm_range *tail;
2202 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2203 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2204 prange->start, prange->last);
2207 if (start > prange->last || last < prange->start)
2210 head = tail = prange;
2211 if (start > prange->start)
2212 svm_range_split(prange, prange->start, start - 1, &tail);
2213 if (last < tail->last)
2214 svm_range_split(tail, last + 1, tail->last, &head);
2216 if (head != prange && tail != prange) {
2217 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2218 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2219 } else if (tail != prange) {
2220 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2221 } else if (head != prange) {
2222 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2223 } else if (parent != prange) {
2224 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2229 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2230 unsigned long start, unsigned long last)
2232 struct svm_range_list *svms;
2233 struct svm_range *pchild;
2234 struct kfd_process *p;
2238 p = kfd_lookup_process_by_mm(mm);
2243 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2244 prange, prange->start, prange->last, start, last);
2246 /* Make sure pending page faults are drained in the deferred worker
2247 * before the range is freed to avoid straggler interrupts on
2248 * unmapped memory causing "phantom faults".
2250 atomic_inc(&svms->drain_pagefaults);
2252 unmap_parent = start <= prange->start && last >= prange->last;
2254 list_for_each_entry(pchild, &prange->child_list, child_list) {
2255 mutex_lock_nested(&pchild->lock, 1);
2256 s = max(start, pchild->start);
2257 l = min(last, pchild->last);
2259 svm_range_unmap_from_gpus(pchild, s, l);
2260 svm_range_unmap_split(mm, prange, pchild, start, last);
2261 mutex_unlock(&pchild->lock);
2263 s = max(start, prange->start);
2264 l = min(last, prange->last);
2266 svm_range_unmap_from_gpus(prange, s, l);
2267 svm_range_unmap_split(mm, prange, prange, start, last);
2270 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2272 svm_range_add_list_work(svms, prange, mm,
2273 SVM_OP_UPDATE_RANGE_NOTIFIER);
2274 schedule_deferred_list_work(svms);
2276 kfd_unref_process(p);
2280 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2281 * @mni: mmu_interval_notifier struct
2282 * @range: mmu_notifier_range struct
2283 * @cur_seq: value to pass to mmu_interval_set_seq()
2285 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2286 * is from migration, or CPU page invalidation callback.
2288 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2289 * work thread, and split prange if only part of prange is unmapped.
2291 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2292 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2293 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2294 * update GPU mapping to recover.
2296 * Context: mmap lock, notifier_invalidate_start lock are held
2297 * for invalidate event, prange lock is held if this is from migration
2300 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2301 const struct mmu_notifier_range *range,
2302 unsigned long cur_seq)
2304 struct svm_range *prange;
2305 unsigned long start;
2308 if (range->event == MMU_NOTIFY_RELEASE)
2311 start = mni->interval_tree.start;
2312 last = mni->interval_tree.last;
2313 start = max(start, range->start) >> PAGE_SHIFT;
2314 last = min(last, range->end - 1) >> PAGE_SHIFT;
2315 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2316 start, last, range->start >> PAGE_SHIFT,
2317 (range->end - 1) >> PAGE_SHIFT,
2318 mni->interval_tree.start >> PAGE_SHIFT,
2319 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2321 prange = container_of(mni, struct svm_range, notifier);
2323 svm_range_lock(prange);
2324 mmu_interval_set_seq(mni, cur_seq);
2326 switch (range->event) {
2327 case MMU_NOTIFY_UNMAP:
2328 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2331 svm_range_evict(prange, mni->mm, start, last);
2335 svm_range_unlock(prange);
2341 * svm_range_from_addr - find svm range from fault address
2342 * @svms: svm range list header
2343 * @addr: address to search range interval tree, in pages
2344 * @parent: parent range if range is on child list
2346 * Context: The caller must hold svms->lock
2348 * Return: the svm_range found or NULL
2351 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2352 struct svm_range **parent)
2354 struct interval_tree_node *node;
2355 struct svm_range *prange;
2356 struct svm_range *pchild;
2358 node = interval_tree_iter_first(&svms->objects, addr, addr);
2362 prange = container_of(node, struct svm_range, it_node);
2363 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2364 addr, prange->start, prange->last, node->start, node->last);
2366 if (addr >= prange->start && addr <= prange->last) {
2371 list_for_each_entry(pchild, &prange->child_list, child_list)
2372 if (addr >= pchild->start && addr <= pchild->last) {
2373 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2374 addr, pchild->start, pchild->last);
2383 /* svm_range_best_restore_location - decide the best fault restore location
2384 * @prange: svm range structure
2385 * @adev: the GPU on which vm fault happened
2387 * This is only called when xnack is on, to decide the best location to restore
2388 * the range mapping after GPU vm fault. Caller uses the best location to do
2389 * migration if actual loc is not best location, then update GPU page table
2390 * mapping to the best location.
2392 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2393 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2394 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2395 * if range actual loc is cpu, best_loc is cpu
2396 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2398 * Otherwise, GPU no access, best_loc is -1.
2401 * -1 means vm fault GPU no access
2402 * 0 for CPU or GPU id
2405 svm_range_best_restore_location(struct svm_range *prange,
2406 struct amdgpu_device *adev,
2409 struct amdgpu_device *bo_adev, *preferred_adev;
2410 struct kfd_process *p;
2414 p = container_of(prange->svms, struct kfd_process, svms);
2416 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2418 pr_debug("failed to get gpuid from kgd\n");
2422 if (prange->preferred_loc == gpuid ||
2423 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2424 return prange->preferred_loc;
2425 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2426 preferred_adev = svm_range_get_adev_by_id(prange,
2427 prange->preferred_loc);
2428 if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2429 return prange->preferred_loc;
2433 if (test_bit(*gpuidx, prange->bitmap_access))
2436 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2437 if (!prange->actual_loc)
2440 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2441 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2442 return prange->actual_loc;
2451 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2452 unsigned long *start, unsigned long *last,
2453 bool *is_heap_stack)
2455 struct vm_area_struct *vma;
2456 struct interval_tree_node *node;
2457 unsigned long start_limit, end_limit;
2459 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2460 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2461 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2465 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2466 vma->vm_end >= vma->vm_mm->start_brk) ||
2467 (vma->vm_start <= vma->vm_mm->start_stack &&
2468 vma->vm_end >= vma->vm_mm->start_stack);
2470 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2471 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2472 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2473 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2474 /* First range that starts after the fault address */
2475 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2477 end_limit = min(end_limit, node->start);
2478 /* Last range that ends before the fault address */
2479 node = container_of(rb_prev(&node->rb),
2480 struct interval_tree_node, rb);
2482 /* Last range must end before addr because
2483 * there was no range after addr
2485 node = container_of(rb_last(&p->svms.objects.rb_root),
2486 struct interval_tree_node, rb);
2489 if (node->last >= addr) {
2490 WARN(1, "Overlap with prev node and page fault addr\n");
2493 start_limit = max(start_limit, node->last + 1);
2496 *start = start_limit;
2497 *last = end_limit - 1;
2499 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2500 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2501 *start, *last, *is_heap_stack);
2507 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2508 uint64_t *bo_s, uint64_t *bo_l)
2510 struct amdgpu_bo_va_mapping *mapping;
2511 struct interval_tree_node *node;
2512 struct amdgpu_bo *bo = NULL;
2513 unsigned long userptr;
2517 for (i = 0; i < p->n_pdds; i++) {
2518 struct amdgpu_vm *vm;
2520 if (!p->pdds[i]->drm_priv)
2523 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2524 r = amdgpu_bo_reserve(vm->root.bo, false);
2528 /* Check userptr by searching entire vm->va interval tree */
2529 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2531 mapping = container_of((struct rb_node *)node,
2532 struct amdgpu_bo_va_mapping, rb);
2533 bo = mapping->bo_va->base.bo;
2535 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2536 start << PAGE_SHIFT,
2539 node = interval_tree_iter_next(node, 0, ~0ULL);
2543 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2546 *bo_s = userptr >> PAGE_SHIFT;
2547 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2549 amdgpu_bo_unreserve(vm->root.bo);
2552 amdgpu_bo_unreserve(vm->root.bo);
2558 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2559 struct kfd_process *p,
2560 struct mm_struct *mm,
2563 struct svm_range *prange = NULL;
2564 unsigned long start, last;
2565 uint32_t gpuid, gpuidx;
2571 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2575 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2576 if (r != -EADDRINUSE)
2577 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2579 if (r == -EADDRINUSE) {
2580 if (addr >= bo_s && addr <= bo_l)
2583 /* Create one page svm range if 2MB range overlapping */
2588 prange = svm_range_new(&p->svms, start, last);
2590 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2593 if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2594 pr_debug("failed to get gpuid from kgd\n");
2595 svm_range_free(prange);
2600 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2602 svm_range_add_to_svms(prange);
2603 svm_range_add_notifier_locked(mm, prange);
2608 /* svm_range_skip_recover - decide if prange can be recovered
2609 * @prange: svm range structure
2611 * GPU vm retry fault handle skip recover the range for cases:
2612 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2613 * deferred list work will drain the stale fault before free the prange.
2614 * 2. prange is on deferred list to add interval notifier after split, or
2615 * 3. prange is child range, it is split from parent prange, recover later
2616 * after interval notifier is added.
2618 * Return: true to skip recover, false to recover
2620 static bool svm_range_skip_recover(struct svm_range *prange)
2622 struct svm_range_list *svms = prange->svms;
2624 spin_lock(&svms->deferred_list_lock);
2625 if (list_empty(&prange->deferred_list) &&
2626 list_empty(&prange->child_list)) {
2627 spin_unlock(&svms->deferred_list_lock);
2630 spin_unlock(&svms->deferred_list_lock);
2632 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2633 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2634 svms, prange, prange->start, prange->last);
2637 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2638 prange->work_item.op == SVM_OP_ADD_RANGE) {
2639 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2640 svms, prange, prange->start, prange->last);
2647 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2650 struct kfd_process_device *pdd;
2652 /* fault is on different page of same range
2653 * or fault is skipped to recover later
2654 * or fault is on invalid virtual address
2656 if (gpuidx == MAX_GPU_INSTANCE) {
2660 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2665 /* fault is recovered
2666 * or fault cannot recover because GPU no access on the range
2668 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2670 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2674 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2676 unsigned long requested = VM_READ;
2679 requested |= VM_WRITE;
2681 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2683 return (vma->vm_flags & requested) == requested;
2687 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2688 uint64_t addr, bool write_fault)
2690 struct mm_struct *mm = NULL;
2691 struct svm_range_list *svms;
2692 struct svm_range *prange;
2693 struct kfd_process *p;
2696 int32_t gpuidx = MAX_GPU_INSTANCE;
2697 bool write_locked = false;
2698 struct vm_area_struct *vma;
2701 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2702 pr_debug("device does not support SVM\n");
2706 p = kfd_lookup_process_by_pasid(pasid);
2708 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2713 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2715 if (atomic_read(&svms->drain_pagefaults)) {
2716 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2721 if (!p->xnack_enabled) {
2722 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2727 /* p->lead_thread is available as kfd_process_wq_release flush the work
2728 * before releasing task ref.
2730 mm = get_task_mm(p->lead_thread);
2732 pr_debug("svms 0x%p failed to get mm\n", svms);
2739 mutex_lock(&svms->lock);
2740 prange = svm_range_from_addr(svms, addr, NULL);
2742 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2744 if (!write_locked) {
2745 /* Need the write lock to create new range with MMU notifier.
2746 * Also flush pending deferred work to make sure the interval
2747 * tree is up to date before we add a new range
2749 mutex_unlock(&svms->lock);
2750 mmap_read_unlock(mm);
2751 mmap_write_lock(mm);
2752 write_locked = true;
2753 goto retry_write_locked;
2755 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2757 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2759 mmap_write_downgrade(mm);
2761 goto out_unlock_svms;
2765 mmap_write_downgrade(mm);
2767 mutex_lock(&prange->migrate_mutex);
2769 if (svm_range_skip_recover(prange)) {
2770 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2772 goto out_unlock_range;
2775 timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2776 /* skip duplicate vm fault on different pages of same range */
2777 if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2778 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2779 svms, prange->start, prange->last);
2781 goto out_unlock_range;
2784 /* __do_munmap removed VMA, return success as we are handling stale
2787 vma = find_vma(mm, addr << PAGE_SHIFT);
2788 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2789 pr_debug("address 0x%llx VMA is removed\n", addr);
2791 goto out_unlock_range;
2794 if (!svm_fault_allowed(vma, write_fault)) {
2795 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2796 write_fault ? "write" : "read");
2798 goto out_unlock_range;
2801 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2802 if (best_loc == -1) {
2803 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2804 svms, prange->start, prange->last);
2806 goto out_unlock_range;
2809 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2810 svms, prange->start, prange->last, best_loc,
2811 prange->actual_loc);
2813 if (prange->actual_loc != best_loc) {
2815 r = svm_migrate_to_vram(prange, best_loc, mm);
2817 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2819 /* Fallback to system memory if migration to
2822 if (prange->actual_loc)
2823 r = svm_migrate_vram_to_ram(prange, mm);
2828 r = svm_migrate_vram_to_ram(prange, mm);
2831 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2832 r, svms, prange->start, prange->last);
2833 goto out_unlock_range;
2837 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
2839 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2840 r, svms, prange->start, prange->last);
2843 mutex_unlock(&prange->migrate_mutex);
2845 mutex_unlock(&svms->lock);
2846 mmap_read_unlock(mm);
2848 svm_range_count_fault(adev, p, gpuidx);
2852 kfd_unref_process(p);
2855 pr_debug("recover vm fault later\n");
2856 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2862 void svm_range_list_fini(struct kfd_process *p)
2864 struct svm_range *prange;
2865 struct svm_range *next;
2867 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2869 cancel_delayed_work_sync(&p->svms.restore_work);
2871 /* Ensure list work is finished before process is destroyed */
2872 flush_work(&p->svms.deferred_list_work);
2875 * Ensure no retry fault comes in afterwards, as page fault handler will
2876 * not find kfd process and take mm lock to recover fault.
2878 atomic_inc(&p->svms.drain_pagefaults);
2879 svm_range_drain_retry_fault(&p->svms);
2881 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2882 svm_range_unlink(prange);
2883 svm_range_remove_notifier(prange);
2884 svm_range_free(prange);
2887 mutex_destroy(&p->svms.lock);
2889 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2892 int svm_range_list_init(struct kfd_process *p)
2894 struct svm_range_list *svms = &p->svms;
2897 svms->objects = RB_ROOT_CACHED;
2898 mutex_init(&svms->lock);
2899 INIT_LIST_HEAD(&svms->list);
2900 atomic_set(&svms->evicted_ranges, 0);
2901 atomic_set(&svms->drain_pagefaults, 0);
2902 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2903 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2904 INIT_LIST_HEAD(&svms->deferred_range_list);
2905 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
2906 spin_lock_init(&svms->deferred_list_lock);
2908 for (i = 0; i < p->n_pdds; i++)
2909 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2910 bitmap_set(svms->bitmap_supported, i, 1);
2916 * svm_range_check_vm - check if virtual address range mapped already
2917 * @p: current kfd_process
2918 * @start: range start address, in pages
2919 * @last: range last address, in pages
2920 * @bo_s: mapping start address in pages if address range already mapped
2921 * @bo_l: mapping last address in pages if address range already mapped
2923 * The purpose is to avoid virtual address ranges already allocated by
2924 * kfd_ioctl_alloc_memory_of_gpu ioctl.
2925 * It looks for each pdd in the kfd_process.
2927 * Context: Process context
2929 * Return 0 - OK, if the range is not mapped.
2930 * Otherwise error code:
2931 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
2932 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
2933 * a signal. Release all buffer reservations and return to user-space.
2936 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
2937 uint64_t *bo_s, uint64_t *bo_l)
2939 struct amdgpu_bo_va_mapping *mapping;
2940 struct interval_tree_node *node;
2944 for (i = 0; i < p->n_pdds; i++) {
2945 struct amdgpu_vm *vm;
2947 if (!p->pdds[i]->drm_priv)
2950 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2951 r = amdgpu_bo_reserve(vm->root.bo, false);
2955 node = interval_tree_iter_first(&vm->va, start, last);
2957 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
2959 mapping = container_of((struct rb_node *)node,
2960 struct amdgpu_bo_va_mapping, rb);
2962 *bo_s = mapping->start;
2963 *bo_l = mapping->last;
2965 amdgpu_bo_unreserve(vm->root.bo);
2968 amdgpu_bo_unreserve(vm->root.bo);
2975 * svm_range_is_valid - check if virtual address range is valid
2976 * @p: current kfd_process
2977 * @start: range start address, in pages
2978 * @size: range size, in pages
2980 * Valid virtual address range means it belongs to one or more VMAs
2982 * Context: Process context
2985 * 0 - OK, otherwise error code
2988 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
2990 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2991 struct vm_area_struct *vma;
2993 unsigned long start_unchg = start;
2995 start <<= PAGE_SHIFT;
2996 end = start + (size << PAGE_SHIFT);
2998 vma = find_vma(p->mm, start);
2999 if (!vma || start < vma->vm_start ||
3000 (vma->vm_flags & device_vma))
3002 start = min(end, vma->vm_end);
3003 } while (start < end);
3005 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3010 * svm_range_best_prefetch_location - decide the best prefetch location
3011 * @prange: svm range structure
3014 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3015 * can be CPU or GPU.
3017 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3018 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3019 * the best prefetch location is always CPU, because GPU can not have coherent
3020 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3023 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3024 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3026 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3027 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3028 * prefetch location is always CPU.
3030 * Context: Process context
3033 * 0 for CPU or GPU id
3036 svm_range_best_prefetch_location(struct svm_range *prange)
3038 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3039 uint32_t best_loc = prange->prefetch_loc;
3040 struct kfd_process_device *pdd;
3041 struct amdgpu_device *bo_adev;
3042 struct kfd_process *p;
3045 p = container_of(prange->svms, struct kfd_process, svms);
3047 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3050 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
3052 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
3057 if (p->xnack_enabled)
3058 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3060 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3063 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3064 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3066 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3070 if (pdd->dev->adev == bo_adev)
3073 if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3080 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3081 p->xnack_enabled, &p->svms, prange->start, prange->last,
3087 /* FIXME: This is a workaround for page locking bug when some pages are
3088 * invalid during migration to VRAM
3090 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
3093 struct hmm_range *hmm_range;
3096 if (prange->validated_once)
3099 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
3100 prange->start << PAGE_SHIFT,
3101 prange->npages, &hmm_range,
3102 false, true, owner);
3104 amdgpu_hmm_range_get_pages_done(hmm_range);
3105 prange->validated_once = true;
3109 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3110 * @mm: current process mm_struct
3111 * @prange: svm range structure
3112 * @migrated: output, true if migration is triggered
3114 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3116 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3119 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3121 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3122 * stops all queues, schedule restore work
3123 * 2. svm_range_restore_work wait for migration is done by
3124 * a. svm_range_validate_vram takes prange->migrate_mutex
3125 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3126 * 3. restore work update mappings of GPU, resume all queues.
3128 * Context: Process context
3131 * 0 - OK, otherwise - error code of migration
3134 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3141 best_loc = svm_range_best_prefetch_location(prange);
3143 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3144 best_loc == prange->actual_loc)
3148 r = svm_migrate_vram_to_ram(prange, mm);
3153 r = svm_migrate_to_vram(prange, best_loc, mm);
3159 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3164 if (dma_fence_is_signaled(&fence->base))
3167 if (fence->svm_bo) {
3168 WRITE_ONCE(fence->svm_bo->evicting, 1);
3169 schedule_work(&fence->svm_bo->eviction_work);
3175 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3177 struct svm_range_bo *svm_bo;
3178 struct kfd_process *p;
3179 struct mm_struct *mm;
3182 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3183 if (!svm_bo_ref_unless_zero(svm_bo))
3184 return; /* svm_bo was freed while eviction was pending */
3186 /* svm_range_bo_release destroys this worker thread. So during
3187 * the lifetime of this thread, kfd_process and mm will be valid.
3189 p = container_of(svm_bo->svms, struct kfd_process, svms);
3195 spin_lock(&svm_bo->list_lock);
3196 while (!list_empty(&svm_bo->range_list) && !r) {
3197 struct svm_range *prange =
3198 list_first_entry(&svm_bo->range_list,
3199 struct svm_range, svm_bo_list);
3202 list_del_init(&prange->svm_bo_list);
3203 spin_unlock(&svm_bo->list_lock);
3205 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3206 prange->start, prange->last);
3208 mutex_lock(&prange->migrate_mutex);
3210 r = svm_migrate_vram_to_ram(prange,
3211 svm_bo->eviction_fence->mm);
3212 } while (!r && prange->actual_loc && --retries);
3214 if (!r && prange->actual_loc)
3215 pr_info_once("Migration failed during eviction");
3217 if (!prange->actual_loc) {
3218 mutex_lock(&prange->lock);
3219 prange->svm_bo = NULL;
3220 mutex_unlock(&prange->lock);
3222 mutex_unlock(&prange->migrate_mutex);
3224 spin_lock(&svm_bo->list_lock);
3226 spin_unlock(&svm_bo->list_lock);
3227 mmap_read_unlock(mm);
3229 dma_fence_signal(&svm_bo->eviction_fence->base);
3231 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3232 * has been called in svm_migrate_vram_to_ram
3234 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3235 svm_range_bo_unref(svm_bo);
3239 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3240 uint64_t start, uint64_t size, uint32_t nattr,
3241 struct kfd_ioctl_svm_attribute *attrs)
3243 struct amdkfd_process_info *process_info = p->kgd_process_info;
3244 struct list_head update_list;
3245 struct list_head insert_list;
3246 struct list_head remove_list;
3247 struct svm_range_list *svms;
3248 struct svm_range *prange;
3249 struct svm_range *next;
3250 bool update_mapping = false;
3254 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3255 p->pasid, &p->svms, start, start + size - 1, size);
3257 r = svm_range_check_attr(p, nattr, attrs);
3263 mutex_lock(&process_info->lock);
3265 svm_range_list_lock_and_flush_work(svms, mm);
3267 r = svm_range_is_valid(p, start, size);
3269 pr_debug("invalid range r=%d\n", r);
3270 mmap_write_unlock(mm);
3274 mutex_lock(&svms->lock);
3276 /* Add new range and split existing ranges as needed */
3277 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3278 &insert_list, &remove_list);
3280 mutex_unlock(&svms->lock);
3281 mmap_write_unlock(mm);
3284 /* Apply changes as a transaction */
3285 list_for_each_entry_safe(prange, next, &insert_list, list) {
3286 svm_range_add_to_svms(prange);
3287 svm_range_add_notifier_locked(mm, prange);
3289 list_for_each_entry(prange, &update_list, update_list) {
3290 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3291 /* TODO: unmap ranges from GPU that lost access */
3293 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3294 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3295 prange->svms, prange, prange->start,
3297 svm_range_unlink(prange);
3298 svm_range_remove_notifier(prange);
3299 svm_range_free(prange);
3302 mmap_write_downgrade(mm);
3303 /* Trigger migrations and revalidate and map to GPUs as needed. If
3304 * this fails we may be left with partially completed actions. There
3305 * is no clean way of rolling back to the previous state in such a
3306 * case because the rollback wouldn't be guaranteed to work either.
3308 list_for_each_entry(prange, &update_list, update_list) {
3311 mutex_lock(&prange->migrate_mutex);
3313 r = svm_range_trigger_migration(mm, prange, &migrated);
3315 goto out_unlock_range;
3317 if (migrated && !p->xnack_enabled) {
3318 pr_debug("restore_work will update mappings of GPUs\n");
3319 mutex_unlock(&prange->migrate_mutex);
3323 if (!migrated && !update_mapping) {
3324 mutex_unlock(&prange->migrate_mutex);
3328 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3330 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3331 true, true, flush_tlb);
3333 pr_debug("failed %d to map svm range\n", r);
3336 mutex_unlock(&prange->migrate_mutex);
3341 svm_range_debug_dump(svms);
3343 mutex_unlock(&svms->lock);
3344 mmap_read_unlock(mm);
3346 mutex_unlock(&process_info->lock);
3348 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3349 &p->svms, start, start + size - 1, r);
3355 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3356 uint64_t start, uint64_t size, uint32_t nattr,
3357 struct kfd_ioctl_svm_attribute *attrs)
3359 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3360 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3361 bool get_preferred_loc = false;
3362 bool get_prefetch_loc = false;
3363 bool get_granularity = false;
3364 bool get_accessible = false;
3365 bool get_flags = false;
3366 uint64_t last = start + size - 1UL;
3367 uint8_t granularity = 0xff;
3368 struct interval_tree_node *node;
3369 struct svm_range_list *svms;
3370 struct svm_range *prange;
3371 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3372 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3373 uint32_t flags_and = 0xffffffff;
3374 uint32_t flags_or = 0;
3379 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3380 start + size - 1, nattr);
3382 /* Flush pending deferred work to avoid racing with deferred actions from
3383 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3384 * can still race with get_attr because we don't hold the mmap lock. But that
3385 * would be a race condition in the application anyway, and undefined
3386 * behaviour is acceptable in that case.
3388 flush_work(&p->svms.deferred_list_work);
3391 r = svm_range_is_valid(p, start, size);
3392 mmap_read_unlock(mm);
3394 pr_debug("invalid range r=%d\n", r);
3398 for (i = 0; i < nattr; i++) {
3399 switch (attrs[i].type) {
3400 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3401 get_preferred_loc = true;
3403 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3404 get_prefetch_loc = true;
3406 case KFD_IOCTL_SVM_ATTR_ACCESS:
3407 get_accessible = true;
3409 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3410 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3413 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3414 get_granularity = true;
3416 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3417 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3420 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3427 mutex_lock(&svms->lock);
3429 node = interval_tree_iter_first(&svms->objects, start, last);
3431 pr_debug("range attrs not found return default values\n");
3432 svm_range_set_default_attributes(&location, &prefetch_loc,
3433 &granularity, &flags_and);
3434 flags_or = flags_and;
3435 if (p->xnack_enabled)
3436 bitmap_copy(bitmap_access, svms->bitmap_supported,
3439 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3440 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3443 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3444 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3447 struct interval_tree_node *next;
3449 prange = container_of(node, struct svm_range, it_node);
3450 next = interval_tree_iter_next(node, start, last);
3452 if (get_preferred_loc) {
3453 if (prange->preferred_loc ==
3454 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3455 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3456 location != prange->preferred_loc)) {
3457 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3458 get_preferred_loc = false;
3460 location = prange->preferred_loc;
3463 if (get_prefetch_loc) {
3464 if (prange->prefetch_loc ==
3465 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3466 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3467 prefetch_loc != prange->prefetch_loc)) {
3468 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3469 get_prefetch_loc = false;
3471 prefetch_loc = prange->prefetch_loc;
3474 if (get_accessible) {
3475 bitmap_and(bitmap_access, bitmap_access,
3476 prange->bitmap_access, MAX_GPU_INSTANCE);
3477 bitmap_and(bitmap_aip, bitmap_aip,
3478 prange->bitmap_aip, MAX_GPU_INSTANCE);
3481 flags_and &= prange->flags;
3482 flags_or |= prange->flags;
3485 if (get_granularity && prange->granularity < granularity)
3486 granularity = prange->granularity;
3491 mutex_unlock(&svms->lock);
3493 for (i = 0; i < nattr; i++) {
3494 switch (attrs[i].type) {
3495 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3496 attrs[i].value = location;
3498 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3499 attrs[i].value = prefetch_loc;
3501 case KFD_IOCTL_SVM_ATTR_ACCESS:
3502 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3505 pr_debug("invalid gpuid %x\n", attrs[i].value);
3508 if (test_bit(gpuidx, bitmap_access))
3509 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3510 else if (test_bit(gpuidx, bitmap_aip))
3512 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3514 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3516 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3517 attrs[i].value = flags_and;
3519 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3520 attrs[i].value = ~flags_or;
3522 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3523 attrs[i].value = (uint32_t)granularity;
3531 int kfd_criu_resume_svm(struct kfd_process *p)
3533 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3534 int nattr_common = 4, nattr_accessibility = 1;
3535 struct criu_svm_metadata *criu_svm_md = NULL;
3536 struct svm_range_list *svms = &p->svms;
3537 struct criu_svm_metadata *next = NULL;
3538 uint32_t set_flags = 0xffffffff;
3539 int i, j, num_attrs, ret = 0;
3540 uint64_t set_attr_size;
3541 struct mm_struct *mm;
3543 if (list_empty(&svms->criu_svm_metadata_list)) {
3544 pr_debug("No SVM data from CRIU restore stage 2\n");
3548 mm = get_task_mm(p->lead_thread);
3550 pr_err("failed to get mm for the target process\n");
3554 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3557 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3558 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3559 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3561 for (j = 0; j < num_attrs; j++) {
3562 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3563 i, j, criu_svm_md->data.attrs[j].type,
3564 i, j, criu_svm_md->data.attrs[j].value);
3565 switch (criu_svm_md->data.attrs[j].type) {
3566 /* During Checkpoint operation, the query for
3567 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3568 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3569 * not used by the range which was checkpointed. Care
3570 * must be taken to not restore with an invalid value
3571 * otherwise the gpuidx value will be invalid and
3572 * set_attr would eventually fail so just replace those
3573 * with another dummy attribute such as
3574 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3576 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3577 if (criu_svm_md->data.attrs[j].value ==
3578 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3579 criu_svm_md->data.attrs[j].type =
3580 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3581 criu_svm_md->data.attrs[j].value = 0;
3584 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3585 set_flags = criu_svm_md->data.attrs[j].value;
3592 /* CLR_FLAGS is not available via get_attr during checkpoint but
3593 * it needs to be inserted before restoring the ranges so
3594 * allocate extra space for it before calling set_attr
3596 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3598 set_attr_new = krealloc(set_attr, set_attr_size,
3600 if (!set_attr_new) {
3604 set_attr = set_attr_new;
3606 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3607 sizeof(struct kfd_ioctl_svm_attribute));
3608 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3609 set_attr[num_attrs].value = ~set_flags;
3611 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3612 criu_svm_md->data.size, num_attrs + 1,
3615 pr_err("CRIU: failed to set range attributes\n");
3623 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3624 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3625 criu_svm_md->data.start_addr);
3634 int kfd_criu_restore_svm(struct kfd_process *p,
3635 uint8_t __user *user_priv_ptr,
3636 uint64_t *priv_data_offset,
3637 uint64_t max_priv_data_size)
3639 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3640 int nattr_common = 4, nattr_accessibility = 1;
3641 struct criu_svm_metadata *criu_svm_md = NULL;
3642 struct svm_range_list *svms = &p->svms;
3643 uint32_t num_devices;
3646 num_devices = p->n_pdds;
3647 /* Handle one SVM range object at a time, also the number of gpus are
3648 * assumed to be same on the restore node, checking must be done while
3649 * evaluating the topology earlier
3652 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3653 (nattr_common + nattr_accessibility * num_devices);
3654 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3656 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3659 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3661 pr_err("failed to allocate memory to store svm metadata\n");
3664 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3669 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3670 svm_priv_data_size);
3675 *priv_data_offset += svm_priv_data_size;
3677 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3687 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3688 uint64_t *svm_priv_data_size)
3690 uint64_t total_size, accessibility_size, common_attr_size;
3691 int nattr_common = 4, nattr_accessibility = 1;
3692 int num_devices = p->n_pdds;
3693 struct svm_range_list *svms;
3694 struct svm_range *prange;
3697 *svm_priv_data_size = 0;
3703 mutex_lock(&svms->lock);
3704 list_for_each_entry(prange, &svms->list, list) {
3705 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3706 prange, prange->start, prange->npages,
3707 prange->start + prange->npages - 1);
3710 mutex_unlock(&svms->lock);
3712 *num_svm_ranges = count;
3713 /* Only the accessbility attributes need to be queried for all the gpus
3714 * individually, remaining ones are spanned across the entire process
3715 * regardless of the various gpu nodes. Of the remaining attributes,
3716 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3718 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3719 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3720 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3721 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3723 * ** ACCESSBILITY ATTRIBUTES **
3724 * (Considered as one, type is altered during query, value is gpuid)
3725 * KFD_IOCTL_SVM_ATTR_ACCESS
3726 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3727 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3729 if (*num_svm_ranges > 0) {
3730 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3732 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3733 nattr_accessibility * num_devices;
3735 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3736 common_attr_size + accessibility_size;
3738 *svm_priv_data_size = *num_svm_ranges * total_size;
3741 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3742 *svm_priv_data_size);
3746 int kfd_criu_checkpoint_svm(struct kfd_process *p,
3747 uint8_t __user *user_priv_data,
3748 uint64_t *priv_data_offset)
3750 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3751 struct kfd_ioctl_svm_attribute *query_attr = NULL;
3752 uint64_t svm_priv_data_size, query_attr_size = 0;
3753 int index, nattr_common = 4, ret = 0;
3754 struct svm_range_list *svms;
3755 int num_devices = p->n_pdds;
3756 struct svm_range *prange;
3757 struct mm_struct *mm;
3763 mm = get_task_mm(p->lead_thread);
3765 pr_err("failed to get mm for the target process\n");
3769 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3770 (nattr_common + num_devices);
3772 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
3778 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
3779 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
3780 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3781 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
3783 for (index = 0; index < num_devices; index++) {
3784 struct kfd_process_device *pdd = p->pdds[index];
3786 query_attr[index + nattr_common].type =
3787 KFD_IOCTL_SVM_ATTR_ACCESS;
3788 query_attr[index + nattr_common].value = pdd->user_gpu_id;
3791 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
3793 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
3800 list_for_each_entry(prange, &svms->list, list) {
3802 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
3803 svm_priv->start_addr = prange->start;
3804 svm_priv->size = prange->npages;
3805 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
3806 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
3807 prange, prange->start, prange->npages,
3808 prange->start + prange->npages - 1,
3809 prange->npages * PAGE_SIZE);
3811 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
3813 (nattr_common + num_devices),
3816 pr_err("CRIU: failed to obtain range attributes\n");
3820 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
3821 svm_priv_data_size)) {
3822 pr_err("Failed to copy svm priv to user\n");
3827 *priv_data_offset += svm_priv_data_size;
3842 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3843 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3845 struct mm_struct *mm = current->mm;
3848 start >>= PAGE_SHIFT;
3849 size >>= PAGE_SHIFT;
3852 case KFD_IOCTL_SVM_OP_SET_ATTR:
3853 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
3855 case KFD_IOCTL_SVM_OP_GET_ATTR:
3856 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);