1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
31 #include "amdgpu_xgmi.h"
34 #include "kfd_migrate.h"
36 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
38 /* Long enough to ensure no retry fault comes after svm range is restored and
39 * page table is updated.
41 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
43 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
45 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
46 const struct mmu_notifier_range *range,
47 unsigned long cur_seq);
49 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
50 .invalidate = svm_range_cpu_invalidate_pagetables,
54 * svm_range_unlink - unlink svm_range from lists and interval tree
55 * @prange: svm range structure to be removed
57 * Remove the svm_range from the svms and svm_bo lists and the svms
60 * Context: The caller must hold svms->lock
62 static void svm_range_unlink(struct svm_range *prange)
64 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
65 prange, prange->start, prange->last);
68 spin_lock(&prange->svm_bo->list_lock);
69 list_del(&prange->svm_bo_list);
70 spin_unlock(&prange->svm_bo->list_lock);
73 list_del(&prange->list);
74 if (prange->it_node.start != 0 && prange->it_node.last != 0)
75 interval_tree_remove(&prange->it_node, &prange->svms->objects);
79 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
81 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
82 prange, prange->start, prange->last);
84 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
85 prange->start << PAGE_SHIFT,
86 prange->npages << PAGE_SHIFT,
91 * svm_range_add_to_svms - add svm range to svms
92 * @prange: svm range structure to be added
94 * Add the svm range to svms interval tree and link list
96 * Context: The caller must hold svms->lock
98 static void svm_range_add_to_svms(struct svm_range *prange)
100 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
101 prange, prange->start, prange->last);
103 list_add_tail(&prange->list, &prange->svms->list);
104 prange->it_node.start = prange->start;
105 prange->it_node.last = prange->last;
106 interval_tree_insert(&prange->it_node, &prange->svms->objects);
109 static void svm_range_remove_notifier(struct svm_range *prange)
111 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
112 prange->svms, prange,
113 prange->notifier.interval_tree.start >> PAGE_SHIFT,
114 prange->notifier.interval_tree.last >> PAGE_SHIFT);
116 if (prange->notifier.interval_tree.start != 0 &&
117 prange->notifier.interval_tree.last != 0)
118 mmu_interval_notifier_remove(&prange->notifier);
122 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
123 unsigned long offset, unsigned long npages,
124 unsigned long *hmm_pfns, uint32_t gpuidx)
126 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
127 dma_addr_t *addr = prange->dma_addr[gpuidx];
128 struct device *dev = adev->dev;
133 addr = kvmalloc_array(prange->npages, sizeof(*addr),
134 GFP_KERNEL | __GFP_ZERO);
137 prange->dma_addr[gpuidx] = addr;
141 for (i = 0; i < npages; i++) {
142 if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
143 "leaking dma mapping\n"))
144 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
146 page = hmm_pfn_to_page(hmm_pfns[i]);
147 if (is_zone_device_page(page)) {
148 struct amdgpu_device *bo_adev =
149 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
151 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
152 bo_adev->vm_manager.vram_base_offset -
153 bo_adev->kfd.dev->pgmap.range.start;
154 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
155 pr_debug("vram address detected: 0x%llx\n", addr[i]);
158 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
159 r = dma_mapping_error(dev, addr[i]);
161 pr_debug("failed %d dma_map_page\n", r);
164 pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
165 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
171 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
172 unsigned long offset, unsigned long npages,
173 unsigned long *hmm_pfns)
175 struct kfd_process *p;
179 p = container_of(prange->svms, struct kfd_process, svms);
181 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
182 struct kfd_process_device *pdd;
183 struct amdgpu_device *adev;
185 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
186 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
188 pr_debug("failed to find device idx %d\n", gpuidx);
191 adev = (struct amdgpu_device *)pdd->dev->kgd;
193 r = svm_range_dma_map_dev(adev, prange, offset, npages,
202 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
203 unsigned long offset, unsigned long npages)
205 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
211 for (i = offset; i < offset + npages; i++) {
212 if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
214 pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
215 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
220 void svm_range_free_dma_mappings(struct svm_range *prange)
222 struct kfd_process_device *pdd;
223 dma_addr_t *dma_addr;
225 struct kfd_process *p;
228 p = container_of(prange->svms, struct kfd_process, svms);
230 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
231 dma_addr = prange->dma_addr[gpuidx];
235 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
237 pr_debug("failed to find device idx %d\n", gpuidx);
240 dev = &pdd->dev->pdev->dev;
241 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
243 prange->dma_addr[gpuidx] = NULL;
247 static void svm_range_free(struct svm_range *prange)
249 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
250 prange->start, prange->last);
252 svm_range_vram_node_free(prange);
253 svm_range_free_dma_mappings(prange);
254 mutex_destroy(&prange->lock);
255 mutex_destroy(&prange->migrate_mutex);
260 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
261 uint8_t *granularity, uint32_t *flags)
263 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
264 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
267 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
271 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
274 uint64_t size = last - start + 1;
275 struct svm_range *prange;
276 struct kfd_process *p;
278 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
281 prange->npages = size;
283 prange->start = start;
285 INIT_LIST_HEAD(&prange->list);
286 INIT_LIST_HEAD(&prange->update_list);
287 INIT_LIST_HEAD(&prange->remove_list);
288 INIT_LIST_HEAD(&prange->insert_list);
289 INIT_LIST_HEAD(&prange->svm_bo_list);
290 INIT_LIST_HEAD(&prange->deferred_list);
291 INIT_LIST_HEAD(&prange->child_list);
292 atomic_set(&prange->invalid, 0);
293 prange->validate_timestamp = 0;
294 mutex_init(&prange->migrate_mutex);
295 mutex_init(&prange->lock);
297 p = container_of(svms, struct kfd_process, svms);
298 if (p->xnack_enabled)
299 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
302 svm_range_set_default_attributes(&prange->preferred_loc,
303 &prange->prefetch_loc,
304 &prange->granularity, &prange->flags);
306 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
311 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
313 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
319 static void svm_range_bo_release(struct kref *kref)
321 struct svm_range_bo *svm_bo;
323 svm_bo = container_of(kref, struct svm_range_bo, kref);
324 spin_lock(&svm_bo->list_lock);
325 while (!list_empty(&svm_bo->range_list)) {
326 struct svm_range *prange =
327 list_first_entry(&svm_bo->range_list,
328 struct svm_range, svm_bo_list);
329 /* list_del_init tells a concurrent svm_range_vram_node_new when
330 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
332 list_del_init(&prange->svm_bo_list);
333 spin_unlock(&svm_bo->list_lock);
335 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
336 prange->start, prange->last);
337 mutex_lock(&prange->lock);
338 prange->svm_bo = NULL;
339 mutex_unlock(&prange->lock);
341 spin_lock(&svm_bo->list_lock);
343 spin_unlock(&svm_bo->list_lock);
344 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
345 /* We're not in the eviction worker.
346 * Signal the fence and synchronize with any
347 * pending eviction work.
349 dma_fence_signal(&svm_bo->eviction_fence->base);
350 cancel_work_sync(&svm_bo->eviction_work);
352 dma_fence_put(&svm_bo->eviction_fence->base);
353 amdgpu_bo_unref(&svm_bo->bo);
357 void svm_range_bo_unref(struct svm_range_bo *svm_bo)
362 kref_put(&svm_bo->kref, svm_range_bo_release);
366 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
368 struct amdgpu_device *bo_adev;
370 mutex_lock(&prange->lock);
371 if (!prange->svm_bo) {
372 mutex_unlock(&prange->lock);
375 if (prange->ttm_res) {
376 /* We still have a reference, all is well */
377 mutex_unlock(&prange->lock);
380 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
382 * Migrate from GPU to GPU, remove range from source bo_adev
383 * svm_bo range list, and return false to allocate svm_bo from
386 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
387 if (bo_adev != adev) {
388 mutex_unlock(&prange->lock);
390 spin_lock(&prange->svm_bo->list_lock);
391 list_del_init(&prange->svm_bo_list);
392 spin_unlock(&prange->svm_bo->list_lock);
394 svm_range_bo_unref(prange->svm_bo);
397 if (READ_ONCE(prange->svm_bo->evicting)) {
399 struct svm_range_bo *svm_bo;
400 /* The BO is getting evicted,
401 * we need to get a new one
403 mutex_unlock(&prange->lock);
404 svm_bo = prange->svm_bo;
405 f = dma_fence_get(&svm_bo->eviction_fence->base);
406 svm_range_bo_unref(prange->svm_bo);
407 /* wait for the fence to avoid long spin-loop
408 * at list_empty_careful
410 dma_fence_wait(f, false);
413 /* The BO was still around and we got
414 * a new reference to it
416 mutex_unlock(&prange->lock);
417 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
418 prange->svms, prange->start, prange->last);
420 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
425 mutex_unlock(&prange->lock);
428 /* We need a new svm_bo. Spin-loop to wait for concurrent
429 * svm_range_bo_release to finish removing this range from
430 * its range list. After this, it is safe to reuse the
431 * svm_bo pointer and svm_bo_list head.
433 while (!list_empty_careful(&prange->svm_bo_list))
439 static struct svm_range_bo *svm_range_bo_new(void)
441 struct svm_range_bo *svm_bo;
443 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
447 kref_init(&svm_bo->kref);
448 INIT_LIST_HEAD(&svm_bo->range_list);
449 spin_lock_init(&svm_bo->list_lock);
455 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
458 struct amdgpu_bo_param bp;
459 struct svm_range_bo *svm_bo;
460 struct amdgpu_bo_user *ubo;
461 struct amdgpu_bo *bo;
462 struct kfd_process *p;
463 struct mm_struct *mm;
466 p = container_of(prange->svms, struct kfd_process, svms);
467 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
468 prange->start, prange->last);
470 if (svm_range_validate_svm_bo(adev, prange))
473 svm_bo = svm_range_bo_new();
475 pr_debug("failed to alloc svm bo\n");
478 mm = get_task_mm(p->lead_thread);
480 pr_debug("failed to get mm\n");
484 svm_bo->svms = prange->svms;
485 svm_bo->eviction_fence =
486 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
490 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
491 svm_bo->evicting = 0;
492 memset(&bp, 0, sizeof(bp));
493 bp.size = prange->npages * PAGE_SIZE;
494 bp.byte_align = PAGE_SIZE;
495 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
496 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
497 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
498 bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
499 bp.type = ttm_bo_type_device;
502 r = amdgpu_bo_create_user(adev, &bp, &ubo);
504 pr_debug("failed %d to create bo\n", r);
505 goto create_bo_failed;
508 r = amdgpu_bo_reserve(bo, true);
510 pr_debug("failed %d to reserve bo\n", r);
511 goto reserve_bo_failed;
514 r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
516 pr_debug("failed %d to reserve bo\n", r);
517 amdgpu_bo_unreserve(bo);
518 goto reserve_bo_failed;
520 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
522 amdgpu_bo_unreserve(bo);
525 prange->svm_bo = svm_bo;
526 prange->ttm_res = bo->tbo.resource;
529 spin_lock(&svm_bo->list_lock);
530 list_add(&prange->svm_bo_list, &svm_bo->range_list);
531 spin_unlock(&svm_bo->list_lock);
536 amdgpu_bo_unref(&bo);
538 dma_fence_put(&svm_bo->eviction_fence->base);
540 prange->ttm_res = NULL;
545 void svm_range_vram_node_free(struct svm_range *prange)
547 svm_range_bo_unref(prange->svm_bo);
548 prange->ttm_res = NULL;
551 struct amdgpu_device *
552 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
554 struct kfd_process_device *pdd;
555 struct kfd_process *p;
558 p = container_of(prange->svms, struct kfd_process, svms);
560 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
562 pr_debug("failed to get device by id 0x%x\n", gpu_id);
565 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
567 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
571 return (struct amdgpu_device *)pdd->dev->kgd;
574 struct kfd_process_device *
575 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
577 struct kfd_process *p;
578 int32_t gpu_idx, gpuid;
581 p = container_of(prange->svms, struct kfd_process, svms);
583 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
585 pr_debug("failed to get device id by adev %p\n", adev);
589 return kfd_process_device_from_gpuidx(p, gpu_idx);
592 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
594 struct ttm_operation_ctx ctx = { false, false };
596 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
598 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
602 svm_range_check_attr(struct kfd_process *p,
603 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
607 for (i = 0; i < nattr; i++) {
608 uint32_t val = attrs[i].value;
609 int gpuidx = MAX_GPU_INSTANCE;
611 switch (attrs[i].type) {
612 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
613 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
614 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
615 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
617 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
618 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
619 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
621 case KFD_IOCTL_SVM_ATTR_ACCESS:
622 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
623 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
624 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
626 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
628 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
630 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
633 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
638 pr_debug("no GPU 0x%x found\n", val);
640 } else if (gpuidx < MAX_GPU_INSTANCE &&
641 !test_bit(gpuidx, p->svms.bitmap_supported)) {
642 pr_debug("GPU 0x%x not supported\n", val);
651 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
652 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
657 for (i = 0; i < nattr; i++) {
658 switch (attrs[i].type) {
659 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
660 prange->preferred_loc = attrs[i].value;
662 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
663 prange->prefetch_loc = attrs[i].value;
665 case KFD_IOCTL_SVM_ATTR_ACCESS:
666 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
667 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
668 gpuidx = kfd_process_gpuidx_from_gpuid(p,
670 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
671 bitmap_clear(prange->bitmap_access, gpuidx, 1);
672 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
673 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
674 bitmap_set(prange->bitmap_access, gpuidx, 1);
675 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
677 bitmap_clear(prange->bitmap_access, gpuidx, 1);
678 bitmap_set(prange->bitmap_aip, gpuidx, 1);
681 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
682 prange->flags |= attrs[i].value;
684 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
685 prange->flags &= ~attrs[i].value;
687 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
688 prange->granularity = attrs[i].value;
691 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
697 * svm_range_debug_dump - print all range information from svms
698 * @svms: svm range list header
700 * debug output svm range start, end, prefetch location from svms
701 * interval tree and link list
703 * Context: The caller must hold svms->lock
705 static void svm_range_debug_dump(struct svm_range_list *svms)
707 struct interval_tree_node *node;
708 struct svm_range *prange;
710 pr_debug("dump svms 0x%p list\n", svms);
711 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
713 list_for_each_entry(prange, &svms->list, list) {
714 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
715 prange, prange->start, prange->npages,
716 prange->start + prange->npages - 1,
720 pr_debug("dump svms 0x%p interval tree\n", svms);
721 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
722 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
724 prange = container_of(node, struct svm_range, it_node);
725 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
726 prange, prange->start, prange->npages,
727 prange->start + prange->npages - 1,
729 node = interval_tree_iter_next(node, 0, ~0ULL);
734 svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
736 return (old->prefetch_loc == new->prefetch_loc &&
737 old->flags == new->flags &&
738 old->granularity == new->granularity);
742 svm_range_split_array(void *ppnew, void *ppold, size_t size,
743 uint64_t old_start, uint64_t old_n,
744 uint64_t new_start, uint64_t new_n)
746 unsigned char *new, *old, *pold;
751 pold = *(unsigned char **)ppold;
755 new = kvmalloc_array(new_n, size, GFP_KERNEL);
759 d = (new_start - old_start) * size;
760 memcpy(new, pold + d, new_n * size);
762 old = kvmalloc_array(old_n, size, GFP_KERNEL);
768 d = (new_start == old_start) ? new_n * size : 0;
769 memcpy(old, pold + d, old_n * size);
772 *(void **)ppold = old;
773 *(void **)ppnew = new;
779 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
780 uint64_t start, uint64_t last)
782 uint64_t npages = last - start + 1;
785 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
786 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
787 sizeof(*old->dma_addr[i]), old->start,
788 npages, new->start, new->npages);
797 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
798 uint64_t start, uint64_t last)
800 uint64_t npages = last - start + 1;
802 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
803 new->svms, new, new->start, start, last);
805 if (new->start == old->start) {
806 new->offset = old->offset;
807 old->offset += new->npages;
809 new->offset = old->offset + npages;
812 new->svm_bo = svm_range_bo_ref(old->svm_bo);
813 new->ttm_res = old->ttm_res;
815 spin_lock(&new->svm_bo->list_lock);
816 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
817 spin_unlock(&new->svm_bo->list_lock);
823 * svm_range_split_adjust - split range and adjust
826 * @old: the old range
827 * @start: the old range adjust to start address in pages
828 * @last: the old range adjust to last address in pages
830 * Copy system memory dma_addr or vram ttm_res in old range to new
831 * range from new_start up to size new->npages, the remaining old range is from
835 * 0 - OK, -ENOMEM - out of memory
838 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
839 uint64_t start, uint64_t last)
843 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
844 new->svms, new->start, old->start, old->last, start, last);
846 if (new->start < old->start ||
847 new->last > old->last) {
848 WARN_ONCE(1, "invalid new range start or last\n");
852 r = svm_range_split_pages(new, old, start, last);
856 if (old->actual_loc && old->ttm_res) {
857 r = svm_range_split_nodes(new, old, start, last);
862 old->npages = last - start + 1;
865 new->flags = old->flags;
866 new->preferred_loc = old->preferred_loc;
867 new->prefetch_loc = old->prefetch_loc;
868 new->actual_loc = old->actual_loc;
869 new->granularity = old->granularity;
870 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
871 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
877 * svm_range_split - split a range in 2 ranges
879 * @prange: the svm range to split
880 * @start: the remaining range start address in pages
881 * @last: the remaining range last address in pages
882 * @new: the result new range generated
885 * case 1: if start == prange->start
886 * prange ==> prange[start, last]
887 * new range [last + 1, prange->last]
889 * case 2: if last == prange->last
890 * prange ==> prange[start, last]
891 * new range [prange->start, start - 1]
894 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
897 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
898 struct svm_range **new)
900 uint64_t old_start = prange->start;
901 uint64_t old_last = prange->last;
902 struct svm_range_list *svms;
905 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
906 old_start, old_last, start, last);
908 if (old_start != start && old_last != last)
910 if (start < old_start || last > old_last)
914 if (old_start == start)
915 *new = svm_range_new(svms, last + 1, old_last);
917 *new = svm_range_new(svms, old_start, start - 1);
921 r = svm_range_split_adjust(*new, prange, start, last);
923 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
924 r, old_start, old_last, start, last);
925 svm_range_free(*new);
933 svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
934 uint64_t new_last, struct list_head *insert_list)
936 struct svm_range *tail;
937 int r = svm_range_split(prange, prange->start, new_last, &tail);
940 list_add(&tail->insert_list, insert_list);
945 svm_range_split_head(struct svm_range *prange, struct svm_range *new,
946 uint64_t new_start, struct list_head *insert_list)
948 struct svm_range *head;
949 int r = svm_range_split(prange, new_start, prange->last, &head);
952 list_add(&head->insert_list, insert_list);
957 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
958 struct svm_range *pchild, enum svm_work_list_ops op)
960 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
961 pchild, pchild->start, pchild->last, prange, op);
963 pchild->work_item.mm = mm;
964 pchild->work_item.op = op;
965 list_add_tail(&pchild->child_list, &prange->child_list);
969 * svm_range_split_by_granularity - collect ranges within granularity boundary
971 * @p: the process with svms list
973 * @addr: the vm fault address in pages, to split the prange
974 * @parent: parent range if prange is from child list
975 * @prange: prange to split
977 * Trims @prange to be a single aligned block of prange->granularity if
978 * possible. The head and tail are added to the child_list in @parent.
980 * Context: caller must hold mmap_read_lock and prange->lock
983 * 0 - OK, otherwise error code
986 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
987 unsigned long addr, struct svm_range *parent,
988 struct svm_range *prange)
990 struct svm_range *head, *tail;
991 unsigned long start, last, size;
994 /* Align splited range start and size to granularity size, then a single
995 * PTE will be used for whole range, this reduces the number of PTE
996 * updated and the L1 TLB space used for translation.
998 size = 1UL << prange->granularity;
999 start = ALIGN_DOWN(addr, size);
1000 last = ALIGN(addr + 1, size) - 1;
1002 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1003 prange->svms, prange->start, prange->last, start, last, size);
1005 if (start > prange->start) {
1006 r = svm_range_split(prange, start, prange->last, &head);
1009 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1012 if (last < prange->last) {
1013 r = svm_range_split(prange, prange->start, last, &tail);
1016 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1019 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1020 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1021 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1022 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1023 prange, prange->start, prange->last,
1024 SVM_OP_ADD_RANGE_AND_MAP);
1030 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1033 struct amdgpu_device *bo_adev;
1034 uint32_t flags = prange->flags;
1035 uint32_t mapping_flags = 0;
1037 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1038 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1040 if (domain == SVM_RANGE_VRAM_DOMAIN)
1041 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1043 switch (adev->asic_type) {
1045 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1046 if (bo_adev == adev) {
1047 mapping_flags |= coherent ?
1048 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1050 mapping_flags |= coherent ?
1051 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1052 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1056 mapping_flags |= coherent ?
1057 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1060 case CHIP_ALDEBARAN:
1061 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1062 if (bo_adev == adev) {
1063 mapping_flags |= coherent ?
1064 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1065 if (adev->gmc.xgmi.connected_to_cpu)
1068 mapping_flags |= coherent ?
1069 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1070 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1074 mapping_flags |= coherent ?
1075 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1079 mapping_flags |= coherent ?
1080 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1083 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1085 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1086 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1087 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1088 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1090 pte_flags = AMDGPU_PTE_VALID;
1091 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1092 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1094 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1099 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1100 uint64_t start, uint64_t last,
1101 struct dma_fence **fence)
1103 uint64_t init_pte_value = 0;
1105 pr_debug("[0x%llx 0x%llx]\n", start, last);
1107 return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1108 start, last, init_pte_value, 0,
1109 NULL, NULL, fence, NULL);
1113 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1116 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1117 struct kfd_process_device *pdd;
1118 struct dma_fence *fence = NULL;
1119 struct amdgpu_device *adev;
1120 struct kfd_process *p;
1124 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1126 p = container_of(prange->svms, struct kfd_process, svms);
1128 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1129 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1130 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1132 pr_debug("failed to find device idx %d\n", gpuidx);
1135 adev = (struct amdgpu_device *)pdd->dev->kgd;
1137 r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1138 start, last, &fence);
1143 r = dma_fence_wait(fence, false);
1144 dma_fence_put(fence);
1149 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1150 p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1157 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1158 struct svm_range *prange, unsigned long offset,
1159 unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1160 struct amdgpu_device *bo_adev, struct dma_fence **fence)
1162 struct amdgpu_bo_va bo_va;
1163 bool table_freed = false;
1165 unsigned long last_start;
1170 last_start = prange->start + offset;
1172 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1173 last_start, last_start + npages - 1, readonly);
1175 if (prange->svm_bo && prange->ttm_res)
1176 bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1178 for (i = offset; i < offset + npages; i++) {
1179 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1180 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1181 if ((prange->start + i) < prange->last &&
1182 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1185 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1186 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1188 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1190 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1192 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1193 prange->svms, last_start, prange->start + i,
1194 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1197 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1199 prange->start + i, pte_flags,
1200 last_start - prange->start,
1205 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1208 last_start = prange->start + i + 1;
1211 r = amdgpu_vm_update_pdes(adev, vm, false);
1213 pr_debug("failed %d to update directories 0x%lx\n", r,
1219 *fence = dma_fence_get(vm->last_update);
1222 struct kfd_process *p;
1224 p = container_of(prange->svms, struct kfd_process, svms);
1225 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1226 p->pasid, TLB_FLUSH_LEGACY);
1233 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1234 unsigned long npages, bool readonly,
1235 unsigned long *bitmap, bool wait)
1237 struct kfd_process_device *pdd;
1238 struct amdgpu_device *bo_adev;
1239 struct amdgpu_device *adev;
1240 struct kfd_process *p;
1241 struct dma_fence *fence = NULL;
1245 if (prange->svm_bo && prange->ttm_res)
1246 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1250 p = container_of(prange->svms, struct kfd_process, svms);
1251 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1252 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1253 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1255 pr_debug("failed to find device idx %d\n", gpuidx);
1258 adev = (struct amdgpu_device *)pdd->dev->kgd;
1260 pdd = kfd_bind_process_to_device(pdd->dev, p);
1264 if (bo_adev && adev != bo_adev &&
1265 !amdgpu_xgmi_same_hive(adev, bo_adev)) {
1266 pr_debug("cannot map to device idx %d\n", gpuidx);
1270 r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1271 prange, offset, npages, readonly,
1272 prange->dma_addr[gpuidx],
1273 bo_adev, wait ? &fence : NULL);
1278 r = dma_fence_wait(fence, false);
1279 dma_fence_put(fence);
1282 pr_debug("failed %d to dma fence wait\n", r);
1291 struct svm_validate_context {
1292 struct kfd_process *process;
1293 struct svm_range *prange;
1295 unsigned long bitmap[MAX_GPU_INSTANCE];
1296 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1];
1297 struct list_head validate_list;
1298 struct ww_acquire_ctx ticket;
1301 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1303 struct kfd_process_device *pdd;
1304 struct amdgpu_device *adev;
1305 struct amdgpu_vm *vm;
1309 INIT_LIST_HEAD(&ctx->validate_list);
1310 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1311 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1313 pr_debug("failed to find device idx %d\n", gpuidx);
1316 adev = (struct amdgpu_device *)pdd->dev->kgd;
1317 vm = drm_priv_to_vm(pdd->drm_priv);
1319 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1320 ctx->tv[gpuidx].num_shared = 4;
1321 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1323 if (ctx->prange->svm_bo && ctx->prange->ttm_res) {
1324 ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo;
1325 ctx->tv[MAX_GPU_INSTANCE].num_shared = 1;
1326 list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list);
1329 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1332 pr_debug("failed %d to reserve bo\n", r);
1336 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1337 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1339 pr_debug("failed to find device idx %d\n", gpuidx);
1343 adev = (struct amdgpu_device *)pdd->dev->kgd;
1345 r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
1346 svm_range_bo_validate, NULL);
1348 pr_debug("failed %d validate pt bos\n", r);
1356 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1360 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1362 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1365 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1367 struct kfd_process_device *pdd;
1368 struct amdgpu_device *adev;
1370 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1371 adev = (struct amdgpu_device *)pdd->dev->kgd;
1373 return SVM_ADEV_PGMAP_OWNER(adev);
1377 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1379 * To prevent concurrent destruction or change of range attributes, the
1380 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1381 * because that would block concurrent evictions and lead to deadlocks. To
1382 * serialize concurrent migrations or validations of the same range, the
1383 * prange->migrate_mutex must be held.
1385 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1388 * The following sequence ensures race-free validation and GPU mapping:
1390 * 1. Reserve page table (and SVM BO if range is in VRAM)
1391 * 2. hmm_range_fault to get page addresses (if system memory)
1392 * 3. DMA-map pages (if system memory)
1393 * 4-a. Take notifier lock
1394 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1395 * 4-c. Check that the range was not split or otherwise invalidated
1396 * 4-d. Update GPU page table
1397 * 4.e. Release notifier lock
1398 * 5. Release page table (and SVM BO) reservation
1400 static int svm_range_validate_and_map(struct mm_struct *mm,
1401 struct svm_range *prange,
1402 int32_t gpuidx, bool intr, bool wait)
1404 struct svm_validate_context ctx;
1405 unsigned long start, end, addr;
1406 struct kfd_process *p;
1411 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1412 ctx.prange = prange;
1415 if (gpuidx < MAX_GPU_INSTANCE) {
1416 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1417 bitmap_set(ctx.bitmap, gpuidx, 1);
1418 } else if (ctx.process->xnack_enabled) {
1419 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1421 /* If prefetch range to GPU, or GPU retry fault migrate range to
1422 * GPU, which has ACCESS attribute to the range, create mapping
1425 if (prange->actual_loc) {
1426 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1427 prange->actual_loc);
1429 WARN_ONCE(1, "failed get device by id 0x%x\n",
1430 prange->actual_loc);
1433 if (test_bit(gpuidx, prange->bitmap_access))
1434 bitmap_set(ctx.bitmap, gpuidx, 1);
1437 bitmap_or(ctx.bitmap, prange->bitmap_access,
1438 prange->bitmap_aip, MAX_GPU_INSTANCE);
1441 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1444 if (prange->actual_loc && !prange->ttm_res) {
1445 /* This should never happen. actual_loc gets set by
1446 * svm_migrate_ram_to_vram after allocating a BO.
1448 WARN(1, "VRAM BO missing during validation\n");
1452 svm_range_reserve_bos(&ctx);
1454 p = container_of(prange->svms, struct kfd_process, svms);
1455 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1457 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1458 if (kfd_svm_page_owner(p, idx) != owner) {
1464 start = prange->start << PAGE_SHIFT;
1465 end = (prange->last + 1) << PAGE_SHIFT;
1466 for (addr = start; addr < end && !r; ) {
1467 struct hmm_range *hmm_range;
1468 struct vm_area_struct *vma;
1470 unsigned long offset;
1471 unsigned long npages;
1474 vma = find_vma(mm, addr);
1475 if (!vma || addr < vma->vm_start) {
1479 readonly = !(vma->vm_flags & VM_WRITE);
1481 next = min(vma->vm_end, end);
1482 npages = (next - addr) >> PAGE_SHIFT;
1483 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1484 addr, npages, &hmm_range,
1485 readonly, true, owner);
1487 pr_debug("failed %d to get svm range pages\n", r);
1491 offset = (addr - start) >> PAGE_SHIFT;
1492 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1493 hmm_range->hmm_pfns);
1495 pr_debug("failed %d to dma map range\n", r);
1499 svm_range_lock(prange);
1500 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1501 pr_debug("hmm update the range, need validate again\n");
1505 if (!list_empty(&prange->child_list)) {
1506 pr_debug("range split by unmap in parallel, validate again\n");
1511 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1515 svm_range_unlock(prange);
1521 prange->validated_once = true;
1524 svm_range_unreserve_bos(&ctx);
1527 prange->validate_timestamp = ktime_to_us(ktime_get());
1533 * svm_range_list_lock_and_flush_work - flush pending deferred work
1535 * @svms: the svm range list
1536 * @mm: the mm structure
1538 * Context: Returns with mmap write lock held, pending deferred work flushed
1542 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1543 struct mm_struct *mm)
1546 flush_work(&svms->deferred_list_work);
1547 mmap_write_lock(mm);
1549 if (list_empty(&svms->deferred_range_list))
1551 mmap_write_unlock(mm);
1552 pr_debug("retry flush\n");
1553 goto retry_flush_work;
1556 static void svm_range_restore_work(struct work_struct *work)
1558 struct delayed_work *dwork = to_delayed_work(work);
1559 struct amdkfd_process_info *process_info;
1560 struct svm_range_list *svms;
1561 struct svm_range *prange;
1562 struct kfd_process *p;
1563 struct mm_struct *mm;
1568 svms = container_of(dwork, struct svm_range_list, restore_work);
1569 evicted_ranges = atomic_read(&svms->evicted_ranges);
1570 if (!evicted_ranges)
1573 pr_debug("restore svm ranges\n");
1575 /* kfd_process_notifier_release destroys this worker thread. So during
1576 * the lifetime of this thread, kfd_process and mm will be valid.
1578 p = container_of(svms, struct kfd_process, svms);
1579 process_info = p->kgd_process_info;
1584 mutex_lock(&process_info->lock);
1585 svm_range_list_lock_and_flush_work(svms, mm);
1586 mutex_lock(&svms->lock);
1588 evicted_ranges = atomic_read(&svms->evicted_ranges);
1590 list_for_each_entry(prange, &svms->list, list) {
1591 invalid = atomic_read(&prange->invalid);
1595 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1596 prange->svms, prange, prange->start, prange->last,
1600 * If range is migrating, wait for migration is done.
1602 mutex_lock(&prange->migrate_mutex);
1604 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1607 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1610 mutex_unlock(&prange->migrate_mutex);
1612 goto out_reschedule;
1614 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1615 goto out_reschedule;
1618 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1620 goto out_reschedule;
1624 r = kgd2kfd_resume_mm(mm);
1626 /* No recovery from this failure. Probably the CP is
1627 * hanging. No point trying again.
1629 pr_debug("failed %d to resume KFD\n", r);
1632 pr_debug("restore svm ranges successfully\n");
1635 mutex_unlock(&svms->lock);
1636 mmap_write_unlock(mm);
1637 mutex_unlock(&process_info->lock);
1639 /* If validation failed, reschedule another attempt */
1640 if (evicted_ranges) {
1641 pr_debug("reschedule to restore svm range\n");
1642 schedule_delayed_work(&svms->restore_work,
1643 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1648 * svm_range_evict - evict svm range
1650 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1651 * return to let CPU evict the buffer and proceed CPU pagetable update.
1653 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1654 * If invalidation happens while restore work is running, restore work will
1655 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1659 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1660 unsigned long start, unsigned long last)
1662 struct svm_range_list *svms = prange->svms;
1663 struct svm_range *pchild;
1664 struct kfd_process *p;
1667 p = container_of(svms, struct kfd_process, svms);
1669 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1670 svms, prange->start, prange->last, start, last);
1672 if (!p->xnack_enabled) {
1675 list_for_each_entry(pchild, &prange->child_list, child_list) {
1676 mutex_lock_nested(&pchild->lock, 1);
1677 if (pchild->start <= last && pchild->last >= start) {
1678 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1679 pchild->start, pchild->last);
1680 atomic_inc(&pchild->invalid);
1682 mutex_unlock(&pchild->lock);
1685 if (prange->start <= last && prange->last >= start)
1686 atomic_inc(&prange->invalid);
1688 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1689 if (evicted_ranges != 1)
1692 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1693 prange->svms, prange->start, prange->last);
1695 /* First eviction, stop the queues */
1696 r = kgd2kfd_quiesce_mm(mm);
1698 pr_debug("failed to quiesce KFD\n");
1700 pr_debug("schedule to restore svm %p ranges\n", svms);
1701 schedule_delayed_work(&svms->restore_work,
1702 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1706 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1707 prange->svms, start, last);
1708 list_for_each_entry(pchild, &prange->child_list, child_list) {
1709 mutex_lock_nested(&pchild->lock, 1);
1710 s = max(start, pchild->start);
1711 l = min(last, pchild->last);
1713 svm_range_unmap_from_gpus(pchild, s, l);
1714 mutex_unlock(&pchild->lock);
1716 s = max(start, prange->start);
1717 l = min(last, prange->last);
1719 svm_range_unmap_from_gpus(prange, s, l);
1725 static struct svm_range *svm_range_clone(struct svm_range *old)
1727 struct svm_range *new;
1729 new = svm_range_new(old->svms, old->start, old->last);
1734 new->ttm_res = old->ttm_res;
1735 new->offset = old->offset;
1736 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1737 spin_lock(&new->svm_bo->list_lock);
1738 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1739 spin_unlock(&new->svm_bo->list_lock);
1741 new->flags = old->flags;
1742 new->preferred_loc = old->preferred_loc;
1743 new->prefetch_loc = old->prefetch_loc;
1744 new->actual_loc = old->actual_loc;
1745 new->granularity = old->granularity;
1746 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1747 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1753 * svm_range_handle_overlap - split overlap ranges
1754 * @svms: svm range list header
1755 * @new: range added with this attributes
1756 * @start: range added start address, in pages
1757 * @last: range last address, in pages
1758 * @update_list: output, the ranges attributes are updated. For set_attr, this
1759 * will do validation and map to GPUs. For unmap, this will be
1760 * removed and unmap from GPUs
1761 * @insert_list: output, the ranges will be inserted into svms, attributes are
1762 * not changes. For set_attr, this will add into svms.
1763 * @remove_list:output, the ranges will be removed from svms
1764 * @left: the remaining range after overlap, For set_attr, this will be added
1767 * Total have 5 overlap cases.
1769 * This function handles overlap of an address interval with existing
1770 * struct svm_ranges for applying new attributes. This may require
1771 * splitting existing struct svm_ranges. All changes should be applied to
1772 * the range_list and interval tree transactionally. If any split operation
1773 * fails, the entire update fails. Therefore the existing overlapping
1774 * svm_ranges are cloned and the original svm_ranges left unchanged. If the
1775 * transaction succeeds, the modified clones are added and the originals
1776 * freed. Otherwise the clones are removed and the old svm_ranges remain.
1778 * Context: The caller must hold svms->lock
1781 svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
1782 unsigned long start, unsigned long last,
1783 struct list_head *update_list,
1784 struct list_head *insert_list,
1785 struct list_head *remove_list,
1786 unsigned long *left)
1788 struct interval_tree_node *node;
1789 struct svm_range *prange;
1790 struct svm_range *tmp;
1793 INIT_LIST_HEAD(update_list);
1794 INIT_LIST_HEAD(insert_list);
1795 INIT_LIST_HEAD(remove_list);
1797 node = interval_tree_iter_first(&svms->objects, start, last);
1799 struct interval_tree_node *next;
1800 struct svm_range *old;
1801 unsigned long next_start;
1803 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1806 old = container_of(node, struct svm_range, it_node);
1807 next = interval_tree_iter_next(node, start, last);
1808 next_start = min(node->last, last) + 1;
1810 if (node->start < start || node->last > last) {
1811 /* node intersects the updated range, clone+split it */
1812 prange = svm_range_clone(old);
1818 list_add(&old->remove_list, remove_list);
1819 list_add(&prange->insert_list, insert_list);
1821 if (node->start < start) {
1822 pr_debug("change old range start\n");
1823 r = svm_range_split_head(prange, new, start,
1828 if (node->last > last) {
1829 pr_debug("change old range last\n");
1830 r = svm_range_split_tail(prange, new, last,
1836 /* The node is contained within start..last,
1842 if (!svm_range_is_same_attrs(prange, new))
1843 list_add(&prange->update_list, update_list);
1845 /* insert a new node if needed */
1846 if (node->start > start) {
1847 prange = svm_range_new(prange->svms, start,
1854 list_add(&prange->insert_list, insert_list);
1855 list_add(&prange->update_list, update_list);
1862 if (left && start <= last)
1863 *left = last - start + 1;
1867 list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1868 svm_range_free(prange);
1874 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1875 struct svm_range *prange)
1877 unsigned long start;
1880 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1881 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1883 if (prange->start == start && prange->last == last)
1886 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1887 prange->svms, prange, start, last, prange->start,
1890 if (start != 0 && last != 0) {
1891 interval_tree_remove(&prange->it_node, &prange->svms->objects);
1892 svm_range_remove_notifier(prange);
1894 prange->it_node.start = prange->start;
1895 prange->it_node.last = prange->last;
1897 interval_tree_insert(&prange->it_node, &prange->svms->objects);
1898 svm_range_add_notifier_locked(mm, prange);
1902 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1904 struct mm_struct *mm = prange->work_item.mm;
1906 switch (prange->work_item.op) {
1908 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1909 svms, prange, prange->start, prange->last);
1911 case SVM_OP_UNMAP_RANGE:
1912 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1913 svms, prange, prange->start, prange->last);
1914 svm_range_unlink(prange);
1915 svm_range_remove_notifier(prange);
1916 svm_range_free(prange);
1918 case SVM_OP_UPDATE_RANGE_NOTIFIER:
1919 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1920 svms, prange, prange->start, prange->last);
1921 svm_range_update_notifier_and_interval_tree(mm, prange);
1923 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
1924 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1925 svms, prange, prange->start, prange->last);
1926 svm_range_update_notifier_and_interval_tree(mm, prange);
1927 /* TODO: implement deferred validation and mapping */
1929 case SVM_OP_ADD_RANGE:
1930 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
1931 prange->start, prange->last);
1932 svm_range_add_to_svms(prange);
1933 svm_range_add_notifier_locked(mm, prange);
1935 case SVM_OP_ADD_RANGE_AND_MAP:
1936 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
1937 prange, prange->start, prange->last);
1938 svm_range_add_to_svms(prange);
1939 svm_range_add_notifier_locked(mm, prange);
1940 /* TODO: implement deferred validation and mapping */
1943 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
1944 prange->work_item.op);
1948 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1950 struct kfd_process_device *pdd;
1951 struct amdgpu_device *adev;
1952 struct kfd_process *p;
1955 p = container_of(svms, struct kfd_process, svms);
1957 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
1962 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
1963 adev = (struct amdgpu_device *)pdd->dev->kgd;
1965 amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
1966 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
1970 static void svm_range_deferred_list_work(struct work_struct *work)
1972 struct svm_range_list *svms;
1973 struct svm_range *prange;
1974 struct mm_struct *mm;
1976 svms = container_of(work, struct svm_range_list, deferred_list_work);
1977 pr_debug("enter svms 0x%p\n", svms);
1979 spin_lock(&svms->deferred_list_lock);
1980 while (!list_empty(&svms->deferred_range_list)) {
1981 prange = list_first_entry(&svms->deferred_range_list,
1982 struct svm_range, deferred_list);
1983 spin_unlock(&svms->deferred_list_lock);
1984 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
1985 prange->start, prange->last, prange->work_item.op);
1987 /* Make sure no stale retry fault coming after range is freed */
1988 if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
1989 svm_range_drain_retry_fault(prange->svms);
1991 mm = prange->work_item.mm;
1992 mmap_write_lock(mm);
1993 mutex_lock(&svms->lock);
1995 /* Remove from deferred_list must be inside mmap write lock,
1996 * otherwise, svm_range_list_lock_and_flush_work may hold mmap
1997 * write lock, and continue because deferred_list is empty, then
1998 * deferred_list handle is blocked by mmap write lock.
2000 spin_lock(&svms->deferred_list_lock);
2001 list_del_init(&prange->deferred_list);
2002 spin_unlock(&svms->deferred_list_lock);
2004 mutex_lock(&prange->migrate_mutex);
2005 while (!list_empty(&prange->child_list)) {
2006 struct svm_range *pchild;
2008 pchild = list_first_entry(&prange->child_list,
2009 struct svm_range, child_list);
2010 pr_debug("child prange 0x%p op %d\n", pchild,
2011 pchild->work_item.op);
2012 list_del_init(&pchild->child_list);
2013 svm_range_handle_list_op(svms, pchild);
2015 mutex_unlock(&prange->migrate_mutex);
2017 svm_range_handle_list_op(svms, prange);
2018 mutex_unlock(&svms->lock);
2019 mmap_write_unlock(mm);
2021 spin_lock(&svms->deferred_list_lock);
2023 spin_unlock(&svms->deferred_list_lock);
2025 pr_debug("exit svms 0x%p\n", svms);
2029 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2030 struct mm_struct *mm, enum svm_work_list_ops op)
2032 spin_lock(&svms->deferred_list_lock);
2033 /* if prange is on the deferred list */
2034 if (!list_empty(&prange->deferred_list)) {
2035 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2036 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2037 if (op != SVM_OP_NULL &&
2038 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2039 prange->work_item.op = op;
2041 prange->work_item.op = op;
2042 prange->work_item.mm = mm;
2043 list_add_tail(&prange->deferred_list,
2044 &prange->svms->deferred_range_list);
2045 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2046 prange, prange->start, prange->last, op);
2048 spin_unlock(&svms->deferred_list_lock);
2051 void schedule_deferred_list_work(struct svm_range_list *svms)
2053 spin_lock(&svms->deferred_list_lock);
2054 if (!list_empty(&svms->deferred_range_list))
2055 schedule_work(&svms->deferred_list_work);
2056 spin_unlock(&svms->deferred_list_lock);
2060 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2061 struct svm_range *prange, unsigned long start,
2064 struct svm_range *head;
2065 struct svm_range *tail;
2067 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2068 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2069 prange->start, prange->last);
2072 if (start > prange->last || last < prange->start)
2075 head = tail = prange;
2076 if (start > prange->start)
2077 svm_range_split(prange, prange->start, start - 1, &tail);
2078 if (last < tail->last)
2079 svm_range_split(tail, last + 1, tail->last, &head);
2081 if (head != prange && tail != prange) {
2082 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2083 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2084 } else if (tail != prange) {
2085 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2086 } else if (head != prange) {
2087 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2088 } else if (parent != prange) {
2089 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2094 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2095 unsigned long start, unsigned long last)
2097 struct svm_range_list *svms;
2098 struct svm_range *pchild;
2099 struct kfd_process *p;
2103 p = kfd_lookup_process_by_mm(mm);
2108 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2109 prange, prange->start, prange->last, start, last);
2111 unmap_parent = start <= prange->start && last >= prange->last;
2113 list_for_each_entry(pchild, &prange->child_list, child_list) {
2114 mutex_lock_nested(&pchild->lock, 1);
2115 s = max(start, pchild->start);
2116 l = min(last, pchild->last);
2118 svm_range_unmap_from_gpus(pchild, s, l);
2119 svm_range_unmap_split(mm, prange, pchild, start, last);
2120 mutex_unlock(&pchild->lock);
2122 s = max(start, prange->start);
2123 l = min(last, prange->last);
2125 svm_range_unmap_from_gpus(prange, s, l);
2126 svm_range_unmap_split(mm, prange, prange, start, last);
2129 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2131 svm_range_add_list_work(svms, prange, mm,
2132 SVM_OP_UPDATE_RANGE_NOTIFIER);
2133 schedule_deferred_list_work(svms);
2135 kfd_unref_process(p);
2139 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2141 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2142 * is from migration, or CPU page invalidation callback.
2144 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2145 * work thread, and split prange if only part of prange is unmapped.
2147 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2148 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2149 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2150 * update GPU mapping to recover.
2152 * Context: mmap lock, notifier_invalidate_start lock are held
2153 * for invalidate event, prange lock is held if this is from migration
2156 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2157 const struct mmu_notifier_range *range,
2158 unsigned long cur_seq)
2160 struct svm_range *prange;
2161 unsigned long start;
2164 if (range->event == MMU_NOTIFY_RELEASE)
2167 start = mni->interval_tree.start;
2168 last = mni->interval_tree.last;
2169 start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
2170 last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
2171 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2172 start, last, range->start >> PAGE_SHIFT,
2173 (range->end - 1) >> PAGE_SHIFT,
2174 mni->interval_tree.start >> PAGE_SHIFT,
2175 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2177 prange = container_of(mni, struct svm_range, notifier);
2179 svm_range_lock(prange);
2180 mmu_interval_set_seq(mni, cur_seq);
2182 switch (range->event) {
2183 case MMU_NOTIFY_UNMAP:
2184 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2187 svm_range_evict(prange, mni->mm, start, last);
2191 svm_range_unlock(prange);
2197 * svm_range_from_addr - find svm range from fault address
2198 * @svms: svm range list header
2199 * @addr: address to search range interval tree, in pages
2200 * @parent: parent range if range is on child list
2202 * Context: The caller must hold svms->lock
2204 * Return: the svm_range found or NULL
2207 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2208 struct svm_range **parent)
2210 struct interval_tree_node *node;
2211 struct svm_range *prange;
2212 struct svm_range *pchild;
2214 node = interval_tree_iter_first(&svms->objects, addr, addr);
2218 prange = container_of(node, struct svm_range, it_node);
2219 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2220 addr, prange->start, prange->last, node->start, node->last);
2222 if (addr >= prange->start && addr <= prange->last) {
2227 list_for_each_entry(pchild, &prange->child_list, child_list)
2228 if (addr >= pchild->start && addr <= pchild->last) {
2229 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2230 addr, pchild->start, pchild->last);
2239 /* svm_range_best_restore_location - decide the best fault restore location
2240 * @prange: svm range structure
2241 * @adev: the GPU on which vm fault happened
2243 * This is only called when xnack is on, to decide the best location to restore
2244 * the range mapping after GPU vm fault. Caller uses the best location to do
2245 * migration if actual loc is not best location, then update GPU page table
2246 * mapping to the best location.
2248 * If vm fault gpu is range preferred loc, the best_loc is preferred loc.
2249 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2250 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2251 * if range actual loc is cpu, best_loc is cpu
2252 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2254 * Otherwise, GPU no access, best_loc is -1.
2257 * -1 means vm fault GPU no access
2258 * 0 for CPU or GPU id
2261 svm_range_best_restore_location(struct svm_range *prange,
2262 struct amdgpu_device *adev,
2265 struct amdgpu_device *bo_adev;
2266 struct kfd_process *p;
2270 p = container_of(prange->svms, struct kfd_process, svms);
2272 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
2274 pr_debug("failed to get gpuid from kgd\n");
2278 if (prange->preferred_loc == gpuid)
2279 return prange->preferred_loc;
2281 if (test_bit(*gpuidx, prange->bitmap_access))
2284 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2285 if (!prange->actual_loc)
2288 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2289 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2290 return prange->actual_loc;
2298 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2299 unsigned long *start, unsigned long *last)
2301 struct vm_area_struct *vma;
2302 struct interval_tree_node *node;
2303 unsigned long start_limit, end_limit;
2305 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2306 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2307 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2310 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2311 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2312 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2313 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2314 /* First range that starts after the fault address */
2315 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2317 end_limit = min(end_limit, node->start);
2318 /* Last range that ends before the fault address */
2319 node = container_of(rb_prev(&node->rb),
2320 struct interval_tree_node, rb);
2322 /* Last range must end before addr because
2323 * there was no range after addr
2325 node = container_of(rb_last(&p->svms.objects.rb_root),
2326 struct interval_tree_node, rb);
2329 if (node->last >= addr) {
2330 WARN(1, "Overlap with prev node and page fault addr\n");
2333 start_limit = max(start_limit, node->last + 1);
2336 *start = start_limit;
2337 *last = end_limit - 1;
2339 pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
2340 vma->vm_start >> PAGE_SHIFT, *start,
2341 vma->vm_end >> PAGE_SHIFT, *last);
2347 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2348 struct kfd_process *p,
2349 struct mm_struct *mm,
2352 struct svm_range *prange = NULL;
2353 unsigned long start, last;
2354 uint32_t gpuid, gpuidx;
2356 if (svm_range_get_range_boundaries(p, addr, &start, &last))
2359 prange = svm_range_new(&p->svms, start, last);
2361 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2364 if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
2365 pr_debug("failed to get gpuid from kgd\n");
2366 svm_range_free(prange);
2370 svm_range_add_to_svms(prange);
2371 svm_range_add_notifier_locked(mm, prange);
2376 /* svm_range_skip_recover - decide if prange can be recovered
2377 * @prange: svm range structure
2379 * GPU vm retry fault handle skip recover the range for cases:
2380 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2381 * deferred list work will drain the stale fault before free the prange.
2382 * 2. prange is on deferred list to add interval notifier after split, or
2383 * 3. prange is child range, it is split from parent prange, recover later
2384 * after interval notifier is added.
2386 * Return: true to skip recover, false to recover
2388 static bool svm_range_skip_recover(struct svm_range *prange)
2390 struct svm_range_list *svms = prange->svms;
2392 spin_lock(&svms->deferred_list_lock);
2393 if (list_empty(&prange->deferred_list) &&
2394 list_empty(&prange->child_list)) {
2395 spin_unlock(&svms->deferred_list_lock);
2398 spin_unlock(&svms->deferred_list_lock);
2400 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2401 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2402 svms, prange, prange->start, prange->last);
2405 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2406 prange->work_item.op == SVM_OP_ADD_RANGE) {
2407 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2408 svms, prange, prange->start, prange->last);
2415 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2418 struct kfd_process_device *pdd;
2420 /* fault is on different page of same range
2421 * or fault is skipped to recover later
2422 * or fault is on invalid virtual address
2424 if (gpuidx == MAX_GPU_INSTANCE) {
2428 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
2433 /* fault is recovered
2434 * or fault cannot recover because GPU no access on the range
2436 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2438 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2442 svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
2444 unsigned long requested = VM_READ;
2445 struct vm_area_struct *vma;
2448 requested |= VM_WRITE;
2450 vma = find_vma(mm, addr << PAGE_SHIFT);
2451 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2452 pr_debug("address 0x%llx VMA is removed\n", addr);
2456 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2458 return (vma->vm_flags & requested) == requested;
2462 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2463 uint64_t addr, bool write_fault)
2465 struct mm_struct *mm = NULL;
2466 struct svm_range_list *svms;
2467 struct svm_range *prange;
2468 struct kfd_process *p;
2471 int32_t gpuidx = MAX_GPU_INSTANCE;
2472 bool write_locked = false;
2475 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2476 pr_debug("device does not support SVM\n");
2480 p = kfd_lookup_process_by_pasid(pasid);
2482 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2485 if (!p->xnack_enabled) {
2486 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2492 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2494 mm = get_task_mm(p->lead_thread);
2496 pr_debug("svms 0x%p failed to get mm\n", svms);
2503 mutex_lock(&svms->lock);
2504 prange = svm_range_from_addr(svms, addr, NULL);
2506 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2508 if (!write_locked) {
2509 /* Need the write lock to create new range with MMU notifier.
2510 * Also flush pending deferred work to make sure the interval
2511 * tree is up to date before we add a new range
2513 mutex_unlock(&svms->lock);
2514 mmap_read_unlock(mm);
2515 mmap_write_lock(mm);
2516 write_locked = true;
2517 goto retry_write_locked;
2519 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2521 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2523 mmap_write_downgrade(mm);
2525 goto out_unlock_svms;
2529 mmap_write_downgrade(mm);
2531 mutex_lock(&prange->migrate_mutex);
2533 if (svm_range_skip_recover(prange)) {
2534 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2535 goto out_unlock_range;
2538 timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2539 /* skip duplicate vm fault on different pages of same range */
2540 if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2541 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2542 svms, prange->start, prange->last);
2543 goto out_unlock_range;
2546 if (!svm_fault_allowed(mm, addr, write_fault)) {
2547 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2548 write_fault ? "write" : "read");
2550 goto out_unlock_range;
2553 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2554 if (best_loc == -1) {
2555 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2556 svms, prange->start, prange->last);
2558 goto out_unlock_range;
2561 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2562 svms, prange->start, prange->last, best_loc,
2563 prange->actual_loc);
2565 if (prange->actual_loc != best_loc) {
2567 r = svm_migrate_to_vram(prange, best_loc, mm);
2569 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2571 /* Fallback to system memory if migration to
2574 if (prange->actual_loc)
2575 r = svm_migrate_vram_to_ram(prange, mm);
2580 r = svm_migrate_vram_to_ram(prange, mm);
2583 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2584 r, svms, prange->start, prange->last);
2585 goto out_unlock_range;
2589 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2591 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2592 r, svms, prange->start, prange->last);
2595 mutex_unlock(&prange->migrate_mutex);
2597 mutex_unlock(&svms->lock);
2598 mmap_read_unlock(mm);
2600 svm_range_count_fault(adev, p, gpuidx);
2604 kfd_unref_process(p);
2607 pr_debug("recover vm fault later\n");
2608 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2614 void svm_range_list_fini(struct kfd_process *p)
2616 struct svm_range *prange;
2617 struct svm_range *next;
2619 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2621 /* Ensure list work is finished before process is destroyed */
2622 flush_work(&p->svms.deferred_list_work);
2624 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2625 svm_range_unlink(prange);
2626 svm_range_remove_notifier(prange);
2627 svm_range_free(prange);
2630 mutex_destroy(&p->svms.lock);
2632 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2635 int svm_range_list_init(struct kfd_process *p)
2637 struct svm_range_list *svms = &p->svms;
2640 svms->objects = RB_ROOT_CACHED;
2641 mutex_init(&svms->lock);
2642 INIT_LIST_HEAD(&svms->list);
2643 atomic_set(&svms->evicted_ranges, 0);
2644 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2645 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2646 INIT_LIST_HEAD(&svms->deferred_range_list);
2647 spin_lock_init(&svms->deferred_list_lock);
2649 for (i = 0; i < p->n_pdds; i++)
2650 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2651 bitmap_set(svms->bitmap_supported, i, 1);
2657 * svm_range_is_valid - check if virtual address range is valid
2658 * @mm: current process mm_struct
2659 * @start: range start address, in pages
2660 * @size: range size, in pages
2662 * Valid virtual address range means it belongs to one or more VMAs
2664 * Context: Process context
2667 * true - valid svm range
2668 * false - invalid svm range
2671 svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size)
2673 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2674 struct vm_area_struct *vma;
2677 start <<= PAGE_SHIFT;
2678 end = start + (size << PAGE_SHIFT);
2681 vma = find_vma(mm, start);
2682 if (!vma || start < vma->vm_start ||
2683 (vma->vm_flags & device_vma))
2685 start = min(end, vma->vm_end);
2686 } while (start < end);
2692 * svm_range_add - add svm range and handle overlap
2693 * @p: the range add to this process svms
2694 * @start: page size aligned
2695 * @size: page size aligned
2696 * @nattr: number of attributes
2697 * @attrs: array of attributes
2698 * @update_list: output, the ranges need validate and update GPU mapping
2699 * @insert_list: output, the ranges need insert to svms
2700 * @remove_list: output, the ranges are replaced and need remove from svms
2702 * Check if the virtual address range has overlap with the registered ranges,
2703 * split the overlapped range, copy and adjust pages address and vram nodes in
2704 * old and new ranges.
2706 * Context: Process context, caller must hold svms->lock
2709 * 0 - OK, otherwise error code
2712 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2713 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2714 struct list_head *update_list, struct list_head *insert_list,
2715 struct list_head *remove_list)
2717 uint64_t last = start + size - 1UL;
2718 struct svm_range_list *svms;
2719 struct svm_range new = {0};
2720 struct svm_range *prange;
2721 unsigned long left = 0;
2724 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
2726 svm_range_apply_attrs(p, &new, nattr, attrs);
2730 r = svm_range_handle_overlap(svms, &new, start, last, update_list,
2731 insert_list, remove_list, &left);
2736 prange = svm_range_new(svms, last - left + 1, last);
2737 list_add(&prange->insert_list, insert_list);
2738 list_add(&prange->update_list, update_list);
2745 * svm_range_best_prefetch_location - decide the best prefetch location
2746 * @prange: svm range structure
2749 * If range map to single GPU, the best prefetch location is prefetch_loc, which
2750 * can be CPU or GPU.
2752 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
2753 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
2754 * the best prefetch location is always CPU, because GPU can not have coherent
2755 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
2758 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
2759 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
2761 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
2762 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
2763 * prefetch location is always CPU.
2765 * Context: Process context
2768 * 0 for CPU or GPU id
2771 svm_range_best_prefetch_location(struct svm_range *prange)
2773 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
2774 uint32_t best_loc = prange->prefetch_loc;
2775 struct kfd_process_device *pdd;
2776 struct amdgpu_device *bo_adev;
2777 struct amdgpu_device *adev;
2778 struct kfd_process *p;
2781 p = container_of(prange->svms, struct kfd_process, svms);
2783 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
2786 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
2788 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
2793 if (p->xnack_enabled)
2794 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
2796 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
2799 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
2800 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2802 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
2805 adev = (struct amdgpu_device *)pdd->dev->kgd;
2807 if (adev == bo_adev)
2810 if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
2817 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
2818 p->xnack_enabled, &p->svms, prange->start, prange->last,
2824 /* FIXME: This is a workaround for page locking bug when some pages are
2825 * invalid during migration to VRAM
2827 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
2830 struct hmm_range *hmm_range;
2833 if (prange->validated_once)
2836 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
2837 prange->start << PAGE_SHIFT,
2838 prange->npages, &hmm_range,
2839 false, true, owner);
2841 amdgpu_hmm_range_get_pages_done(hmm_range);
2842 prange->validated_once = true;
2846 /* svm_range_trigger_migration - start page migration if prefetch loc changed
2847 * @mm: current process mm_struct
2848 * @prange: svm range structure
2849 * @migrated: output, true if migration is triggered
2851 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
2853 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
2856 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
2858 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
2859 * stops all queues, schedule restore work
2860 * 2. svm_range_restore_work wait for migration is done by
2861 * a. svm_range_validate_vram takes prange->migrate_mutex
2862 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
2863 * 3. restore work update mappings of GPU, resume all queues.
2865 * Context: Process context
2868 * 0 - OK, otherwise - error code of migration
2871 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
2878 best_loc = svm_range_best_prefetch_location(prange);
2880 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
2881 best_loc == prange->actual_loc)
2885 r = svm_migrate_vram_to_ram(prange, mm);
2890 r = svm_migrate_to_vram(prange, best_loc, mm);
2896 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
2901 if (dma_fence_is_signaled(&fence->base))
2904 if (fence->svm_bo) {
2905 WRITE_ONCE(fence->svm_bo->evicting, 1);
2906 schedule_work(&fence->svm_bo->eviction_work);
2912 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
2914 struct svm_range_bo *svm_bo;
2915 struct kfd_process *p;
2916 struct mm_struct *mm;
2918 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
2919 if (!svm_bo_ref_unless_zero(svm_bo))
2920 return; /* svm_bo was freed while eviction was pending */
2922 /* svm_range_bo_release destroys this worker thread. So during
2923 * the lifetime of this thread, kfd_process and mm will be valid.
2925 p = container_of(svm_bo->svms, struct kfd_process, svms);
2931 spin_lock(&svm_bo->list_lock);
2932 while (!list_empty(&svm_bo->range_list)) {
2933 struct svm_range *prange =
2934 list_first_entry(&svm_bo->range_list,
2935 struct svm_range, svm_bo_list);
2936 list_del_init(&prange->svm_bo_list);
2937 spin_unlock(&svm_bo->list_lock);
2939 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
2940 prange->start, prange->last);
2942 mutex_lock(&prange->migrate_mutex);
2943 svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm);
2945 mutex_lock(&prange->lock);
2946 prange->svm_bo = NULL;
2947 mutex_unlock(&prange->lock);
2949 mutex_unlock(&prange->migrate_mutex);
2951 spin_lock(&svm_bo->list_lock);
2953 spin_unlock(&svm_bo->list_lock);
2954 mmap_read_unlock(mm);
2956 dma_fence_signal(&svm_bo->eviction_fence->base);
2957 /* This is the last reference to svm_bo, after svm_range_vram_node_free
2958 * has been called in svm_migrate_vram_to_ram
2960 WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
2961 svm_range_bo_unref(svm_bo);
2965 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
2966 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
2968 struct amdkfd_process_info *process_info = p->kgd_process_info;
2969 struct mm_struct *mm = current->mm;
2970 struct list_head update_list;
2971 struct list_head insert_list;
2972 struct list_head remove_list;
2973 struct svm_range_list *svms;
2974 struct svm_range *prange;
2975 struct svm_range *next;
2978 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
2979 p->pasid, &p->svms, start, start + size - 1, size);
2981 r = svm_range_check_attr(p, nattr, attrs);
2987 mutex_lock(&process_info->lock);
2989 svm_range_list_lock_and_flush_work(svms, mm);
2991 if (!svm_range_is_valid(mm, start, size)) {
2992 pr_debug("invalid range\n");
2994 mmap_write_unlock(mm);
2998 mutex_lock(&svms->lock);
3000 /* Add new range and split existing ranges as needed */
3001 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3002 &insert_list, &remove_list);
3004 mutex_unlock(&svms->lock);
3005 mmap_write_unlock(mm);
3008 /* Apply changes as a transaction */
3009 list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
3010 svm_range_add_to_svms(prange);
3011 svm_range_add_notifier_locked(mm, prange);
3013 list_for_each_entry(prange, &update_list, update_list) {
3014 svm_range_apply_attrs(p, prange, nattr, attrs);
3015 /* TODO: unmap ranges from GPU that lost access */
3017 list_for_each_entry_safe(prange, next, &remove_list,
3019 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3020 prange->svms, prange, prange->start,
3022 svm_range_unlink(prange);
3023 svm_range_remove_notifier(prange);
3024 svm_range_free(prange);
3027 mmap_write_downgrade(mm);
3028 /* Trigger migrations and revalidate and map to GPUs as needed. If
3029 * this fails we may be left with partially completed actions. There
3030 * is no clean way of rolling back to the previous state in such a
3031 * case because the rollback wouldn't be guaranteed to work either.
3033 list_for_each_entry(prange, &update_list, update_list) {
3036 mutex_lock(&prange->migrate_mutex);
3038 r = svm_range_trigger_migration(mm, prange, &migrated);
3040 goto out_unlock_range;
3042 if (migrated && !p->xnack_enabled) {
3043 pr_debug("restore_work will update mappings of GPUs\n");
3044 mutex_unlock(&prange->migrate_mutex);
3048 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3051 pr_debug("failed %d to map svm range\n", r);
3054 mutex_unlock(&prange->migrate_mutex);
3059 svm_range_debug_dump(svms);
3061 mutex_unlock(&svms->lock);
3062 mmap_read_unlock(mm);
3064 mutex_unlock(&process_info->lock);
3066 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3067 &p->svms, start, start + size - 1, r);
3073 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3074 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3076 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3077 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3078 bool get_preferred_loc = false;
3079 bool get_prefetch_loc = false;
3080 bool get_granularity = false;
3081 bool get_accessible = false;
3082 bool get_flags = false;
3083 uint64_t last = start + size - 1UL;
3084 struct mm_struct *mm = current->mm;
3085 uint8_t granularity = 0xff;
3086 struct interval_tree_node *node;
3087 struct svm_range_list *svms;
3088 struct svm_range *prange;
3089 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3090 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3091 uint32_t flags_and = 0xffffffff;
3092 uint32_t flags_or = 0;
3096 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3097 start + size - 1, nattr);
3099 /* Flush pending deferred work to avoid racing with deferred actions from
3100 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3101 * can still race with get_attr because we don't hold the mmap lock. But that
3102 * would be a race condition in the application anyway, and undefined
3103 * behaviour is acceptable in that case.
3105 flush_work(&p->svms.deferred_list_work);
3108 if (!svm_range_is_valid(mm, start, size)) {
3109 pr_debug("invalid range\n");
3110 mmap_read_unlock(mm);
3113 mmap_read_unlock(mm);
3115 for (i = 0; i < nattr; i++) {
3116 switch (attrs[i].type) {
3117 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3118 get_preferred_loc = true;
3120 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3121 get_prefetch_loc = true;
3123 case KFD_IOCTL_SVM_ATTR_ACCESS:
3124 get_accessible = true;
3126 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3127 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3130 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3131 get_granularity = true;
3133 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3134 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3137 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3144 mutex_lock(&svms->lock);
3146 node = interval_tree_iter_first(&svms->objects, start, last);
3148 pr_debug("range attrs not found return default values\n");
3149 svm_range_set_default_attributes(&location, &prefetch_loc,
3150 &granularity, &flags_and);
3151 flags_or = flags_and;
3152 if (p->xnack_enabled)
3153 bitmap_copy(bitmap_access, svms->bitmap_supported,
3156 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3157 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3160 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3161 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3164 struct interval_tree_node *next;
3166 prange = container_of(node, struct svm_range, it_node);
3167 next = interval_tree_iter_next(node, start, last);
3169 if (get_preferred_loc) {
3170 if (prange->preferred_loc ==
3171 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3172 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3173 location != prange->preferred_loc)) {
3174 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3175 get_preferred_loc = false;
3177 location = prange->preferred_loc;
3180 if (get_prefetch_loc) {
3181 if (prange->prefetch_loc ==
3182 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3183 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3184 prefetch_loc != prange->prefetch_loc)) {
3185 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3186 get_prefetch_loc = false;
3188 prefetch_loc = prange->prefetch_loc;
3191 if (get_accessible) {
3192 bitmap_and(bitmap_access, bitmap_access,
3193 prange->bitmap_access, MAX_GPU_INSTANCE);
3194 bitmap_and(bitmap_aip, bitmap_aip,
3195 prange->bitmap_aip, MAX_GPU_INSTANCE);
3198 flags_and &= prange->flags;
3199 flags_or |= prange->flags;
3202 if (get_granularity && prange->granularity < granularity)
3203 granularity = prange->granularity;
3208 mutex_unlock(&svms->lock);
3210 for (i = 0; i < nattr; i++) {
3211 switch (attrs[i].type) {
3212 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3213 attrs[i].value = location;
3215 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3216 attrs[i].value = prefetch_loc;
3218 case KFD_IOCTL_SVM_ATTR_ACCESS:
3219 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3222 pr_debug("invalid gpuid %x\n", attrs[i].value);
3225 if (test_bit(gpuidx, bitmap_access))
3226 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3227 else if (test_bit(gpuidx, bitmap_aip))
3229 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3231 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3233 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3234 attrs[i].value = flags_and;
3236 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3237 attrs[i].value = ~flags_or;
3239 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3240 attrs[i].value = (uint32_t)granularity;
3249 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3250 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3254 start >>= PAGE_SHIFT;
3255 size >>= PAGE_SHIFT;
3258 case KFD_IOCTL_SVM_OP_SET_ATTR:
3259 r = svm_range_set_attr(p, start, size, nattrs, attrs);
3261 case KFD_IOCTL_SVM_OP_GET_ATTR:
3262 r = svm_range_get_attr(p, start, size, nattrs, attrs);