1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <drm/ttm/ttm_tt.h>
27 #include "amdgpu_sync.h"
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_hmm.h"
32 #include "amdgpu_xgmi.h"
35 #include "kfd_migrate.h"
36 #include "kfd_smi_events.h"
41 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
43 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
45 /* Long enough to ensure no retry fault comes after svm range is restored and
46 * page table is updated.
48 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
50 /* Giant svm range split into smaller ranges based on this, it is decided using
51 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
54 static uint64_t max_svm_range_pages;
56 struct criu_svm_metadata {
57 struct list_head list;
58 struct kfd_criu_svm_range_priv_data data;
61 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
63 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
64 const struct mmu_notifier_range *range,
65 unsigned long cur_seq);
67 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
68 uint64_t *bo_s, uint64_t *bo_l);
69 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
70 .invalidate = svm_range_cpu_invalidate_pagetables,
74 * svm_range_unlink - unlink svm_range from lists and interval tree
75 * @prange: svm range structure to be removed
77 * Remove the svm_range from the svms and svm_bo lists and the svms
80 * Context: The caller must hold svms->lock
82 static void svm_range_unlink(struct svm_range *prange)
84 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
85 prange, prange->start, prange->last);
88 spin_lock(&prange->svm_bo->list_lock);
89 list_del(&prange->svm_bo_list);
90 spin_unlock(&prange->svm_bo->list_lock);
93 list_del(&prange->list);
94 if (prange->it_node.start != 0 && prange->it_node.last != 0)
95 interval_tree_remove(&prange->it_node, &prange->svms->objects);
99 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
101 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
102 prange, prange->start, prange->last);
104 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
105 prange->start << PAGE_SHIFT,
106 prange->npages << PAGE_SHIFT,
111 * svm_range_add_to_svms - add svm range to svms
112 * @prange: svm range structure to be added
114 * Add the svm range to svms interval tree and link list
116 * Context: The caller must hold svms->lock
118 static void svm_range_add_to_svms(struct svm_range *prange)
120 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
121 prange, prange->start, prange->last);
123 list_move_tail(&prange->list, &prange->svms->list);
124 prange->it_node.start = prange->start;
125 prange->it_node.last = prange->last;
126 interval_tree_insert(&prange->it_node, &prange->svms->objects);
129 static void svm_range_remove_notifier(struct svm_range *prange)
131 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
132 prange->svms, prange,
133 prange->notifier.interval_tree.start >> PAGE_SHIFT,
134 prange->notifier.interval_tree.last >> PAGE_SHIFT);
136 if (prange->notifier.interval_tree.start != 0 &&
137 prange->notifier.interval_tree.last != 0)
138 mmu_interval_notifier_remove(&prange->notifier);
142 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
144 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
145 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
149 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
150 unsigned long offset, unsigned long npages,
151 unsigned long *hmm_pfns, uint32_t gpuidx)
153 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
154 dma_addr_t *addr = prange->dma_addr[gpuidx];
155 struct device *dev = adev->dev;
160 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
163 prange->dma_addr[gpuidx] = addr;
167 for (i = 0; i < npages; i++) {
168 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
169 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
171 page = hmm_pfn_to_page(hmm_pfns[i]);
172 if (is_zone_device_page(page)) {
173 struct amdgpu_device *bo_adev =
174 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
176 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
177 bo_adev->vm_manager.vram_base_offset -
178 bo_adev->kfd.dev->pgmap.range.start;
179 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
180 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
183 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
184 r = dma_mapping_error(dev, addr[i]);
186 dev_err(dev, "failed %d dma_map_page\n", r);
189 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
190 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
196 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
197 unsigned long offset, unsigned long npages,
198 unsigned long *hmm_pfns)
200 struct kfd_process *p;
204 p = container_of(prange->svms, struct kfd_process, svms);
206 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
207 struct kfd_process_device *pdd;
209 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
210 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
212 pr_debug("failed to find device idx %d\n", gpuidx);
216 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
225 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
226 unsigned long offset, unsigned long npages)
228 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
234 for (i = offset; i < offset + npages; i++) {
235 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
237 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
238 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
243 void svm_range_free_dma_mappings(struct svm_range *prange)
245 struct kfd_process_device *pdd;
246 dma_addr_t *dma_addr;
248 struct kfd_process *p;
251 p = container_of(prange->svms, struct kfd_process, svms);
253 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
254 dma_addr = prange->dma_addr[gpuidx];
258 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
260 pr_debug("failed to find device idx %d\n", gpuidx);
263 dev = &pdd->dev->adev->pdev->dev;
264 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
266 prange->dma_addr[gpuidx] = NULL;
270 static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
272 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
273 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
275 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
276 prange->start, prange->last);
278 svm_range_vram_node_free(prange);
279 svm_range_free_dma_mappings(prange);
281 if (update_mem_usage && !p->xnack_enabled) {
282 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
283 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
284 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
286 mutex_destroy(&prange->lock);
287 mutex_destroy(&prange->migrate_mutex);
292 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
293 uint8_t *granularity, uint32_t *flags)
295 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
296 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
299 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
303 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
304 uint64_t last, bool update_mem_usage)
306 uint64_t size = last - start + 1;
307 struct svm_range *prange;
308 struct kfd_process *p;
310 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
314 p = container_of(svms, struct kfd_process, svms);
315 if (!p->xnack_enabled && update_mem_usage &&
316 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
317 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
318 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
322 prange->npages = size;
324 prange->start = start;
326 INIT_LIST_HEAD(&prange->list);
327 INIT_LIST_HEAD(&prange->update_list);
328 INIT_LIST_HEAD(&prange->svm_bo_list);
329 INIT_LIST_HEAD(&prange->deferred_list);
330 INIT_LIST_HEAD(&prange->child_list);
331 atomic_set(&prange->invalid, 0);
332 prange->validate_timestamp = 0;
333 mutex_init(&prange->migrate_mutex);
334 mutex_init(&prange->lock);
336 if (p->xnack_enabled)
337 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
340 svm_range_set_default_attributes(&prange->preferred_loc,
341 &prange->prefetch_loc,
342 &prange->granularity, &prange->flags);
344 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
349 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
351 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
357 static void svm_range_bo_release(struct kref *kref)
359 struct svm_range_bo *svm_bo;
361 svm_bo = container_of(kref, struct svm_range_bo, kref);
362 pr_debug("svm_bo 0x%p\n", svm_bo);
364 spin_lock(&svm_bo->list_lock);
365 while (!list_empty(&svm_bo->range_list)) {
366 struct svm_range *prange =
367 list_first_entry(&svm_bo->range_list,
368 struct svm_range, svm_bo_list);
369 /* list_del_init tells a concurrent svm_range_vram_node_new when
370 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
372 list_del_init(&prange->svm_bo_list);
373 spin_unlock(&svm_bo->list_lock);
375 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
376 prange->start, prange->last);
377 mutex_lock(&prange->lock);
378 prange->svm_bo = NULL;
379 mutex_unlock(&prange->lock);
381 spin_lock(&svm_bo->list_lock);
383 spin_unlock(&svm_bo->list_lock);
384 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
385 /* We're not in the eviction worker.
386 * Signal the fence and synchronize with any
387 * pending eviction work.
389 dma_fence_signal(&svm_bo->eviction_fence->base);
390 cancel_work_sync(&svm_bo->eviction_work);
392 dma_fence_put(&svm_bo->eviction_fence->base);
393 amdgpu_bo_unref(&svm_bo->bo);
397 static void svm_range_bo_wq_release(struct work_struct *work)
399 struct svm_range_bo *svm_bo;
401 svm_bo = container_of(work, struct svm_range_bo, release_work);
402 svm_range_bo_release(&svm_bo->kref);
405 static void svm_range_bo_release_async(struct kref *kref)
407 struct svm_range_bo *svm_bo;
409 svm_bo = container_of(kref, struct svm_range_bo, kref);
410 pr_debug("svm_bo 0x%p\n", svm_bo);
411 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
412 schedule_work(&svm_bo->release_work);
415 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
417 kref_put(&svm_bo->kref, svm_range_bo_release_async);
420 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
423 kref_put(&svm_bo->kref, svm_range_bo_release);
427 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
429 struct amdgpu_device *bo_adev;
431 mutex_lock(&prange->lock);
432 if (!prange->svm_bo) {
433 mutex_unlock(&prange->lock);
436 if (prange->ttm_res) {
437 /* We still have a reference, all is well */
438 mutex_unlock(&prange->lock);
441 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
443 * Migrate from GPU to GPU, remove range from source bo_adev
444 * svm_bo range list, and return false to allocate svm_bo from
447 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
448 if (bo_adev != adev) {
449 mutex_unlock(&prange->lock);
451 spin_lock(&prange->svm_bo->list_lock);
452 list_del_init(&prange->svm_bo_list);
453 spin_unlock(&prange->svm_bo->list_lock);
455 svm_range_bo_unref(prange->svm_bo);
458 if (READ_ONCE(prange->svm_bo->evicting)) {
460 struct svm_range_bo *svm_bo;
461 /* The BO is getting evicted,
462 * we need to get a new one
464 mutex_unlock(&prange->lock);
465 svm_bo = prange->svm_bo;
466 f = dma_fence_get(&svm_bo->eviction_fence->base);
467 svm_range_bo_unref(prange->svm_bo);
468 /* wait for the fence to avoid long spin-loop
469 * at list_empty_careful
471 dma_fence_wait(f, false);
474 /* The BO was still around and we got
475 * a new reference to it
477 mutex_unlock(&prange->lock);
478 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
479 prange->svms, prange->start, prange->last);
481 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
486 mutex_unlock(&prange->lock);
489 /* We need a new svm_bo. Spin-loop to wait for concurrent
490 * svm_range_bo_release to finish removing this range from
491 * its range list. After this, it is safe to reuse the
492 * svm_bo pointer and svm_bo_list head.
494 while (!list_empty_careful(&prange->svm_bo_list))
500 static struct svm_range_bo *svm_range_bo_new(void)
502 struct svm_range_bo *svm_bo;
504 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
508 kref_init(&svm_bo->kref);
509 INIT_LIST_HEAD(&svm_bo->range_list);
510 spin_lock_init(&svm_bo->list_lock);
516 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
519 struct amdgpu_bo_param bp;
520 struct svm_range_bo *svm_bo;
521 struct amdgpu_bo_user *ubo;
522 struct amdgpu_bo *bo;
523 struct kfd_process *p;
524 struct mm_struct *mm;
527 p = container_of(prange->svms, struct kfd_process, svms);
528 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
529 prange->start, prange->last);
531 if (svm_range_validate_svm_bo(adev, prange))
534 svm_bo = svm_range_bo_new();
536 pr_debug("failed to alloc svm bo\n");
539 mm = get_task_mm(p->lead_thread);
541 pr_debug("failed to get mm\n");
545 svm_bo->eviction_fence =
546 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
550 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
551 svm_bo->evicting = 0;
552 memset(&bp, 0, sizeof(bp));
553 bp.size = prange->npages * PAGE_SIZE;
554 bp.byte_align = PAGE_SIZE;
555 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
556 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
557 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
558 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
559 bp.type = ttm_bo_type_device;
562 r = amdgpu_bo_create_user(adev, &bp, &ubo);
564 pr_debug("failed %d to create bo\n", r);
565 goto create_bo_failed;
568 r = amdgpu_bo_reserve(bo, true);
570 pr_debug("failed %d to reserve bo\n", r);
571 goto reserve_bo_failed;
575 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
577 pr_debug("failed %d to sync bo\n", r);
578 amdgpu_bo_unreserve(bo);
579 goto reserve_bo_failed;
583 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
585 pr_debug("failed %d to reserve bo\n", r);
586 amdgpu_bo_unreserve(bo);
587 goto reserve_bo_failed;
589 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
591 amdgpu_bo_unreserve(bo);
594 prange->svm_bo = svm_bo;
595 prange->ttm_res = bo->tbo.resource;
598 spin_lock(&svm_bo->list_lock);
599 list_add(&prange->svm_bo_list, &svm_bo->range_list);
600 spin_unlock(&svm_bo->list_lock);
605 amdgpu_bo_unref(&bo);
607 dma_fence_put(&svm_bo->eviction_fence->base);
609 prange->ttm_res = NULL;
614 void svm_range_vram_node_free(struct svm_range *prange)
616 svm_range_bo_unref(prange->svm_bo);
617 prange->ttm_res = NULL;
620 struct amdgpu_device *
621 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
623 struct kfd_process_device *pdd;
624 struct kfd_process *p;
627 p = container_of(prange->svms, struct kfd_process, svms);
629 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
631 pr_debug("failed to get device by id 0x%x\n", gpu_id);
634 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
636 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
640 return pdd->dev->adev;
643 struct kfd_process_device *
644 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
646 struct kfd_process *p;
647 int32_t gpu_idx, gpuid;
650 p = container_of(prange->svms, struct kfd_process, svms);
652 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
654 pr_debug("failed to get device id by adev %p\n", adev);
658 return kfd_process_device_from_gpuidx(p, gpu_idx);
661 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
663 struct ttm_operation_ctx ctx = { false, false };
665 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
667 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
671 svm_range_check_attr(struct kfd_process *p,
672 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
676 for (i = 0; i < nattr; i++) {
677 uint32_t val = attrs[i].value;
678 int gpuidx = MAX_GPU_INSTANCE;
680 switch (attrs[i].type) {
681 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
682 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
683 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
684 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
686 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
687 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
688 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
690 case KFD_IOCTL_SVM_ATTR_ACCESS:
691 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
692 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
693 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
695 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
697 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
699 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
702 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
707 pr_debug("no GPU 0x%x found\n", val);
709 } else if (gpuidx < MAX_GPU_INSTANCE &&
710 !test_bit(gpuidx, p->svms.bitmap_supported)) {
711 pr_debug("GPU 0x%x not supported\n", val);
720 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
721 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
722 bool *update_mapping)
727 for (i = 0; i < nattr; i++) {
728 switch (attrs[i].type) {
729 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
730 prange->preferred_loc = attrs[i].value;
732 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
733 prange->prefetch_loc = attrs[i].value;
735 case KFD_IOCTL_SVM_ATTR_ACCESS:
736 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
737 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
738 *update_mapping = true;
739 gpuidx = kfd_process_gpuidx_from_gpuid(p,
741 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
742 bitmap_clear(prange->bitmap_access, gpuidx, 1);
743 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
744 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
745 bitmap_set(prange->bitmap_access, gpuidx, 1);
746 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
748 bitmap_clear(prange->bitmap_access, gpuidx, 1);
749 bitmap_set(prange->bitmap_aip, gpuidx, 1);
752 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
753 *update_mapping = true;
754 prange->flags |= attrs[i].value;
756 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
757 *update_mapping = true;
758 prange->flags &= ~attrs[i].value;
760 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
761 prange->granularity = attrs[i].value;
764 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
770 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
771 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
776 for (i = 0; i < nattr; i++) {
777 switch (attrs[i].type) {
778 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
779 if (prange->preferred_loc != attrs[i].value)
782 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
783 /* Prefetch should always trigger a migration even
784 * if the value of the attribute didn't change.
787 case KFD_IOCTL_SVM_ATTR_ACCESS:
788 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
789 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
790 gpuidx = kfd_process_gpuidx_from_gpuid(p,
792 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
793 if (test_bit(gpuidx, prange->bitmap_access) ||
794 test_bit(gpuidx, prange->bitmap_aip))
796 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
797 if (!test_bit(gpuidx, prange->bitmap_access))
800 if (!test_bit(gpuidx, prange->bitmap_aip))
804 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
805 if ((prange->flags & attrs[i].value) != attrs[i].value)
808 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
809 if ((prange->flags & attrs[i].value) != 0)
812 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
813 if (prange->granularity != attrs[i].value)
817 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
825 * svm_range_debug_dump - print all range information from svms
826 * @svms: svm range list header
828 * debug output svm range start, end, prefetch location from svms
829 * interval tree and link list
831 * Context: The caller must hold svms->lock
833 static void svm_range_debug_dump(struct svm_range_list *svms)
835 struct interval_tree_node *node;
836 struct svm_range *prange;
838 pr_debug("dump svms 0x%p list\n", svms);
839 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
841 list_for_each_entry(prange, &svms->list, list) {
842 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
843 prange, prange->start, prange->npages,
844 prange->start + prange->npages - 1,
848 pr_debug("dump svms 0x%p interval tree\n", svms);
849 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
850 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
852 prange = container_of(node, struct svm_range, it_node);
853 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
854 prange, prange->start, prange->npages,
855 prange->start + prange->npages - 1,
857 node = interval_tree_iter_next(node, 0, ~0ULL);
862 svm_range_split_array(void *ppnew, void *ppold, size_t size,
863 uint64_t old_start, uint64_t old_n,
864 uint64_t new_start, uint64_t new_n)
866 unsigned char *new, *old, *pold;
871 pold = *(unsigned char **)ppold;
875 new = kvmalloc_array(new_n, size, GFP_KERNEL);
879 d = (new_start - old_start) * size;
880 memcpy(new, pold + d, new_n * size);
882 old = kvmalloc_array(old_n, size, GFP_KERNEL);
888 d = (new_start == old_start) ? new_n * size : 0;
889 memcpy(old, pold + d, old_n * size);
892 *(void **)ppold = old;
893 *(void **)ppnew = new;
899 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
900 uint64_t start, uint64_t last)
902 uint64_t npages = last - start + 1;
905 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
906 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
907 sizeof(*old->dma_addr[i]), old->start,
908 npages, new->start, new->npages);
917 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
918 uint64_t start, uint64_t last)
920 uint64_t npages = last - start + 1;
922 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
923 new->svms, new, new->start, start, last);
925 if (new->start == old->start) {
926 new->offset = old->offset;
927 old->offset += new->npages;
929 new->offset = old->offset + npages;
932 new->svm_bo = svm_range_bo_ref(old->svm_bo);
933 new->ttm_res = old->ttm_res;
935 spin_lock(&new->svm_bo->list_lock);
936 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
937 spin_unlock(&new->svm_bo->list_lock);
943 * svm_range_split_adjust - split range and adjust
946 * @old: the old range
947 * @start: the old range adjust to start address in pages
948 * @last: the old range adjust to last address in pages
950 * Copy system memory dma_addr or vram ttm_res in old range to new
951 * range from new_start up to size new->npages, the remaining old range is from
955 * 0 - OK, -ENOMEM - out of memory
958 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
959 uint64_t start, uint64_t last)
963 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
964 new->svms, new->start, old->start, old->last, start, last);
966 if (new->start < old->start ||
967 new->last > old->last) {
968 WARN_ONCE(1, "invalid new range start or last\n");
972 r = svm_range_split_pages(new, old, start, last);
976 if (old->actual_loc && old->ttm_res) {
977 r = svm_range_split_nodes(new, old, start, last);
982 old->npages = last - start + 1;
985 new->flags = old->flags;
986 new->preferred_loc = old->preferred_loc;
987 new->prefetch_loc = old->prefetch_loc;
988 new->actual_loc = old->actual_loc;
989 new->granularity = old->granularity;
990 new->mapped_to_gpu = old->mapped_to_gpu;
991 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
992 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
998 * svm_range_split - split a range in 2 ranges
1000 * @prange: the svm range to split
1001 * @start: the remaining range start address in pages
1002 * @last: the remaining range last address in pages
1003 * @new: the result new range generated
1006 * case 1: if start == prange->start
1007 * prange ==> prange[start, last]
1008 * new range [last + 1, prange->last]
1010 * case 2: if last == prange->last
1011 * prange ==> prange[start, last]
1012 * new range [prange->start, start - 1]
1015 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1018 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1019 struct svm_range **new)
1021 uint64_t old_start = prange->start;
1022 uint64_t old_last = prange->last;
1023 struct svm_range_list *svms;
1026 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1027 old_start, old_last, start, last);
1029 if (old_start != start && old_last != last)
1031 if (start < old_start || last > old_last)
1034 svms = prange->svms;
1035 if (old_start == start)
1036 *new = svm_range_new(svms, last + 1, old_last, false);
1038 *new = svm_range_new(svms, old_start, start - 1, false);
1042 r = svm_range_split_adjust(*new, prange, start, last);
1044 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1045 r, old_start, old_last, start, last);
1046 svm_range_free(*new, false);
1054 svm_range_split_tail(struct svm_range *prange,
1055 uint64_t new_last, struct list_head *insert_list)
1057 struct svm_range *tail;
1058 int r = svm_range_split(prange, prange->start, new_last, &tail);
1061 list_add(&tail->list, insert_list);
1066 svm_range_split_head(struct svm_range *prange,
1067 uint64_t new_start, struct list_head *insert_list)
1069 struct svm_range *head;
1070 int r = svm_range_split(prange, new_start, prange->last, &head);
1073 list_add(&head->list, insert_list);
1078 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1079 struct svm_range *pchild, enum svm_work_list_ops op)
1081 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1082 pchild, pchild->start, pchild->last, prange, op);
1084 pchild->work_item.mm = mm;
1085 pchild->work_item.op = op;
1086 list_add_tail(&pchild->child_list, &prange->child_list);
1090 * svm_range_split_by_granularity - collect ranges within granularity boundary
1092 * @p: the process with svms list
1094 * @addr: the vm fault address in pages, to split the prange
1095 * @parent: parent range if prange is from child list
1096 * @prange: prange to split
1098 * Trims @prange to be a single aligned block of prange->granularity if
1099 * possible. The head and tail are added to the child_list in @parent.
1101 * Context: caller must hold mmap_read_lock and prange->lock
1104 * 0 - OK, otherwise error code
1107 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1108 unsigned long addr, struct svm_range *parent,
1109 struct svm_range *prange)
1111 struct svm_range *head, *tail;
1112 unsigned long start, last, size;
1115 /* Align splited range start and size to granularity size, then a single
1116 * PTE will be used for whole range, this reduces the number of PTE
1117 * updated and the L1 TLB space used for translation.
1119 size = 1UL << prange->granularity;
1120 start = ALIGN_DOWN(addr, size);
1121 last = ALIGN(addr + 1, size) - 1;
1123 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1124 prange->svms, prange->start, prange->last, start, last, size);
1126 if (start > prange->start) {
1127 r = svm_range_split(prange, start, prange->last, &head);
1130 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1133 if (last < prange->last) {
1134 r = svm_range_split(prange, prange->start, last, &tail);
1137 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1140 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1141 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1142 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1143 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1144 prange, prange->start, prange->last,
1145 SVM_OP_ADD_RANGE_AND_MAP);
1151 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1154 struct amdgpu_device *bo_adev;
1155 uint32_t flags = prange->flags;
1156 uint32_t mapping_flags = 0;
1158 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1159 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1161 if (domain == SVM_RANGE_VRAM_DOMAIN)
1162 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1164 switch (KFD_GC_VERSION(adev->kfd.dev)) {
1165 case IP_VERSION(9, 4, 1):
1166 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1167 if (bo_adev == adev) {
1168 mapping_flags |= coherent ?
1169 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1171 mapping_flags |= coherent ?
1172 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1173 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1177 mapping_flags |= coherent ?
1178 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1181 case IP_VERSION(9, 4, 2):
1182 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1183 if (bo_adev == adev) {
1184 mapping_flags |= coherent ?
1185 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1186 if (adev->gmc.xgmi.connected_to_cpu)
1189 mapping_flags |= coherent ?
1190 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1191 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1195 mapping_flags |= coherent ?
1196 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1200 mapping_flags |= coherent ?
1201 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1204 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1206 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1207 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1208 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1209 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1211 pte_flags = AMDGPU_PTE_VALID;
1212 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1213 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1215 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1220 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1221 uint64_t start, uint64_t last,
1222 struct dma_fence **fence)
1224 uint64_t init_pte_value = 0;
1226 pr_debug("[0x%llx 0x%llx]\n", start, last);
1228 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1229 last, init_pte_value, 0, 0, NULL, NULL,
1234 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1235 unsigned long last, uint32_t trigger)
1237 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1238 struct kfd_process_device *pdd;
1239 struct dma_fence *fence = NULL;
1240 struct kfd_process *p;
1244 if (!prange->mapped_to_gpu) {
1245 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1246 prange, prange->start, prange->last);
1250 if (prange->start == start && prange->last == last) {
1251 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1252 prange->mapped_to_gpu = false;
1255 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1257 p = container_of(prange->svms, struct kfd_process, svms);
1259 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1260 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1261 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1263 pr_debug("failed to find device idx %d\n", gpuidx);
1267 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1268 start, last, trigger);
1270 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1271 drm_priv_to_vm(pdd->drm_priv),
1272 start, last, &fence);
1277 r = dma_fence_wait(fence, false);
1278 dma_fence_put(fence);
1283 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1290 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1291 unsigned long offset, unsigned long npages, bool readonly,
1292 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1293 struct dma_fence **fence, bool flush_tlb)
1295 struct amdgpu_device *adev = pdd->dev->adev;
1296 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1298 unsigned long last_start;
1303 last_start = prange->start + offset;
1305 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1306 last_start, last_start + npages - 1, readonly);
1308 for (i = offset; i < offset + npages; i++) {
1309 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1310 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1312 /* Collect all pages in the same address range and memory domain
1313 * that can be mapped with a single call to update mapping.
1315 if (i < offset + npages - 1 &&
1316 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1319 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1320 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1322 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1324 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1326 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1327 prange->svms, last_start, prange->start + i,
1328 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1331 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1332 last_start, prange->start + i,
1334 (last_start - prange->start) << PAGE_SHIFT,
1335 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1336 NULL, dma_addr, &vm->last_update);
1338 for (j = last_start - prange->start; j <= i; j++)
1339 dma_addr[j] |= last_domain;
1342 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1345 last_start = prange->start + i + 1;
1348 r = amdgpu_vm_update_pdes(adev, vm, false);
1350 pr_debug("failed %d to update directories 0x%lx\n", r,
1356 *fence = dma_fence_get(vm->last_update);
1363 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1364 unsigned long npages, bool readonly,
1365 unsigned long *bitmap, bool wait, bool flush_tlb)
1367 struct kfd_process_device *pdd;
1368 struct amdgpu_device *bo_adev;
1369 struct kfd_process *p;
1370 struct dma_fence *fence = NULL;
1374 if (prange->svm_bo && prange->ttm_res)
1375 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1379 p = container_of(prange->svms, struct kfd_process, svms);
1380 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1381 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1382 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1384 pr_debug("failed to find device idx %d\n", gpuidx);
1388 pdd = kfd_bind_process_to_device(pdd->dev, p);
1392 if (bo_adev && pdd->dev->adev != bo_adev &&
1393 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1394 pr_debug("cannot map to device idx %d\n", gpuidx);
1398 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1399 prange->dma_addr[gpuidx],
1400 bo_adev, wait ? &fence : NULL,
1406 r = dma_fence_wait(fence, false);
1407 dma_fence_put(fence);
1410 pr_debug("failed %d to dma fence wait\n", r);
1415 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1421 struct svm_validate_context {
1422 struct kfd_process *process;
1423 struct svm_range *prange;
1425 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1426 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1427 struct list_head validate_list;
1428 struct ww_acquire_ctx ticket;
1431 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1433 struct kfd_process_device *pdd;
1434 struct amdgpu_vm *vm;
1438 INIT_LIST_HEAD(&ctx->validate_list);
1439 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1440 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1442 pr_debug("failed to find device idx %d\n", gpuidx);
1445 vm = drm_priv_to_vm(pdd->drm_priv);
1447 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1448 ctx->tv[gpuidx].num_shared = 4;
1449 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1452 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1455 pr_debug("failed %d to reserve bo\n", r);
1459 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1460 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1462 pr_debug("failed to find device idx %d\n", gpuidx);
1467 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1468 drm_priv_to_vm(pdd->drm_priv),
1469 svm_range_bo_validate, NULL);
1471 pr_debug("failed %d validate pt bos\n", r);
1479 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1483 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1485 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1488 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1490 struct kfd_process_device *pdd;
1492 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1494 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1498 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1500 * To prevent concurrent destruction or change of range attributes, the
1501 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1502 * because that would block concurrent evictions and lead to deadlocks. To
1503 * serialize concurrent migrations or validations of the same range, the
1504 * prange->migrate_mutex must be held.
1506 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1509 * The following sequence ensures race-free validation and GPU mapping:
1511 * 1. Reserve page table (and SVM BO if range is in VRAM)
1512 * 2. hmm_range_fault to get page addresses (if system memory)
1513 * 3. DMA-map pages (if system memory)
1514 * 4-a. Take notifier lock
1515 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1516 * 4-c. Check that the range was not split or otherwise invalidated
1517 * 4-d. Update GPU page table
1518 * 4.e. Release notifier lock
1519 * 5. Release page table (and SVM BO) reservation
1521 static int svm_range_validate_and_map(struct mm_struct *mm,
1522 struct svm_range *prange, int32_t gpuidx,
1523 bool intr, bool wait, bool flush_tlb)
1525 struct svm_validate_context ctx;
1526 unsigned long start, end, addr;
1527 struct kfd_process *p;
1532 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1533 ctx.prange = prange;
1536 if (gpuidx < MAX_GPU_INSTANCE) {
1537 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1538 bitmap_set(ctx.bitmap, gpuidx, 1);
1539 } else if (ctx.process->xnack_enabled) {
1540 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1542 /* If prefetch range to GPU, or GPU retry fault migrate range to
1543 * GPU, which has ACCESS attribute to the range, create mapping
1546 if (prange->actual_loc) {
1547 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1548 prange->actual_loc);
1550 WARN_ONCE(1, "failed get device by id 0x%x\n",
1551 prange->actual_loc);
1554 if (test_bit(gpuidx, prange->bitmap_access))
1555 bitmap_set(ctx.bitmap, gpuidx, 1);
1558 bitmap_or(ctx.bitmap, prange->bitmap_access,
1559 prange->bitmap_aip, MAX_GPU_INSTANCE);
1562 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
1563 if (!prange->mapped_to_gpu)
1566 bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1569 if (prange->actual_loc && !prange->ttm_res) {
1570 /* This should never happen. actual_loc gets set by
1571 * svm_migrate_ram_to_vram after allocating a BO.
1573 WARN_ONCE(1, "VRAM BO missing during validation\n");
1577 svm_range_reserve_bos(&ctx);
1579 p = container_of(prange->svms, struct kfd_process, svms);
1580 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1582 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1583 if (kfd_svm_page_owner(p, idx) != owner) {
1589 start = prange->start << PAGE_SHIFT;
1590 end = (prange->last + 1) << PAGE_SHIFT;
1591 for (addr = start; addr < end && !r; ) {
1592 struct hmm_range *hmm_range;
1593 struct vm_area_struct *vma;
1595 unsigned long offset;
1596 unsigned long npages;
1599 vma = vma_lookup(mm, addr);
1604 readonly = !(vma->vm_flags & VM_WRITE);
1606 next = min(vma->vm_end, end);
1607 npages = (next - addr) >> PAGE_SHIFT;
1608 WRITE_ONCE(p->svms.faulting_task, current);
1609 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1610 readonly, owner, NULL,
1612 WRITE_ONCE(p->svms.faulting_task, NULL);
1614 pr_debug("failed %d to get svm range pages\n", r);
1618 offset = (addr - start) >> PAGE_SHIFT;
1619 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1620 hmm_range->hmm_pfns);
1622 pr_debug("failed %d to dma map range\n", r);
1626 svm_range_lock(prange);
1627 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1628 pr_debug("hmm update the range, need validate again\n");
1632 if (!list_empty(&prange->child_list)) {
1633 pr_debug("range split by unmap in parallel, validate again\n");
1638 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1639 ctx.bitmap, wait, flush_tlb);
1642 svm_range_unlock(prange);
1648 prange->validated_once = true;
1649 prange->mapped_to_gpu = true;
1653 svm_range_unreserve_bos(&ctx);
1656 prange->validate_timestamp = ktime_get_boottime();
1662 * svm_range_list_lock_and_flush_work - flush pending deferred work
1664 * @svms: the svm range list
1665 * @mm: the mm structure
1667 * Context: Returns with mmap write lock held, pending deferred work flushed
1671 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1672 struct mm_struct *mm)
1675 flush_work(&svms->deferred_list_work);
1676 mmap_write_lock(mm);
1678 if (list_empty(&svms->deferred_range_list))
1680 mmap_write_unlock(mm);
1681 pr_debug("retry flush\n");
1682 goto retry_flush_work;
1685 static void svm_range_restore_work(struct work_struct *work)
1687 struct delayed_work *dwork = to_delayed_work(work);
1688 struct amdkfd_process_info *process_info;
1689 struct svm_range_list *svms;
1690 struct svm_range *prange;
1691 struct kfd_process *p;
1692 struct mm_struct *mm;
1697 svms = container_of(dwork, struct svm_range_list, restore_work);
1698 evicted_ranges = atomic_read(&svms->evicted_ranges);
1699 if (!evicted_ranges)
1702 pr_debug("restore svm ranges\n");
1704 p = container_of(svms, struct kfd_process, svms);
1705 process_info = p->kgd_process_info;
1707 /* Keep mm reference when svm_range_validate_and_map ranges */
1708 mm = get_task_mm(p->lead_thread);
1710 pr_debug("svms 0x%p process mm gone\n", svms);
1714 mutex_lock(&process_info->lock);
1715 svm_range_list_lock_and_flush_work(svms, mm);
1716 mutex_lock(&svms->lock);
1718 evicted_ranges = atomic_read(&svms->evicted_ranges);
1720 list_for_each_entry(prange, &svms->list, list) {
1721 invalid = atomic_read(&prange->invalid);
1725 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1726 prange->svms, prange, prange->start, prange->last,
1730 * If range is migrating, wait for migration is done.
1732 mutex_lock(&prange->migrate_mutex);
1734 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1735 false, true, false);
1737 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1740 mutex_unlock(&prange->migrate_mutex);
1742 goto out_reschedule;
1744 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1745 goto out_reschedule;
1748 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1750 goto out_reschedule;
1754 r = kgd2kfd_resume_mm(mm);
1756 /* No recovery from this failure. Probably the CP is
1757 * hanging. No point trying again.
1759 pr_debug("failed %d to resume KFD\n", r);
1762 pr_debug("restore svm ranges successfully\n");
1765 mutex_unlock(&svms->lock);
1766 mmap_write_unlock(mm);
1767 mutex_unlock(&process_info->lock);
1769 /* If validation failed, reschedule another attempt */
1770 if (evicted_ranges) {
1771 pr_debug("reschedule to restore svm range\n");
1772 schedule_delayed_work(&svms->restore_work,
1773 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1775 kfd_smi_event_queue_restore_rescheduled(mm);
1781 * svm_range_evict - evict svm range
1782 * @prange: svm range structure
1783 * @mm: current process mm_struct
1784 * @start: starting process queue number
1785 * @last: last process queue number
1787 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1788 * return to let CPU evict the buffer and proceed CPU pagetable update.
1790 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1791 * If invalidation happens while restore work is running, restore work will
1792 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1796 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1797 unsigned long start, unsigned long last,
1798 enum mmu_notifier_event event)
1800 struct svm_range_list *svms = prange->svms;
1801 struct svm_range *pchild;
1802 struct kfd_process *p;
1805 p = container_of(svms, struct kfd_process, svms);
1807 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1808 svms, prange->start, prange->last, start, last);
1810 if (!p->xnack_enabled ||
1811 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1813 bool mapped = prange->mapped_to_gpu;
1815 list_for_each_entry(pchild, &prange->child_list, child_list) {
1816 if (!pchild->mapped_to_gpu)
1819 mutex_lock_nested(&pchild->lock, 1);
1820 if (pchild->start <= last && pchild->last >= start) {
1821 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1822 pchild->start, pchild->last);
1823 atomic_inc(&pchild->invalid);
1825 mutex_unlock(&pchild->lock);
1831 if (prange->start <= last && prange->last >= start)
1832 atomic_inc(&prange->invalid);
1834 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1835 if (evicted_ranges != 1)
1838 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1839 prange->svms, prange->start, prange->last);
1841 /* First eviction, stop the queues */
1842 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1844 pr_debug("failed to quiesce KFD\n");
1846 pr_debug("schedule to restore svm %p ranges\n", svms);
1847 schedule_delayed_work(&svms->restore_work,
1848 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1853 if (event == MMU_NOTIFY_MIGRATE)
1854 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1856 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1858 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1859 prange->svms, start, last);
1860 list_for_each_entry(pchild, &prange->child_list, child_list) {
1861 mutex_lock_nested(&pchild->lock, 1);
1862 s = max(start, pchild->start);
1863 l = min(last, pchild->last);
1865 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1866 mutex_unlock(&pchild->lock);
1868 s = max(start, prange->start);
1869 l = min(last, prange->last);
1871 svm_range_unmap_from_gpus(prange, s, l, trigger);
1877 static struct svm_range *svm_range_clone(struct svm_range *old)
1879 struct svm_range *new;
1881 new = svm_range_new(old->svms, old->start, old->last, false);
1886 new->ttm_res = old->ttm_res;
1887 new->offset = old->offset;
1888 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1889 spin_lock(&new->svm_bo->list_lock);
1890 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1891 spin_unlock(&new->svm_bo->list_lock);
1893 new->flags = old->flags;
1894 new->preferred_loc = old->preferred_loc;
1895 new->prefetch_loc = old->prefetch_loc;
1896 new->actual_loc = old->actual_loc;
1897 new->granularity = old->granularity;
1898 new->mapped_to_gpu = old->mapped_to_gpu;
1899 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1900 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1905 void svm_range_set_max_pages(struct amdgpu_device *adev)
1908 uint64_t pages, _pages;
1910 /* 1/32 VRAM size in pages */
1911 pages = adev->gmc.real_vram_size >> 17;
1912 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
1913 pages = rounddown_pow_of_two(pages);
1915 max_pages = READ_ONCE(max_svm_range_pages);
1916 _pages = min_not_zero(max_pages, pages);
1917 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
1921 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
1922 uint64_t max_pages, struct list_head *insert_list,
1923 struct list_head *update_list)
1925 struct svm_range *prange;
1928 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
1929 max_pages, start, last);
1931 while (last >= start) {
1932 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
1934 prange = svm_range_new(svms, start, l, true);
1937 list_add(&prange->list, insert_list);
1938 list_add(&prange->update_list, update_list);
1946 * svm_range_add - add svm range and handle overlap
1947 * @p: the range add to this process svms
1948 * @start: page size aligned
1949 * @size: page size aligned
1950 * @nattr: number of attributes
1951 * @attrs: array of attributes
1952 * @update_list: output, the ranges need validate and update GPU mapping
1953 * @insert_list: output, the ranges need insert to svms
1954 * @remove_list: output, the ranges are replaced and need remove from svms
1956 * Check if the virtual address range has overlap with any existing ranges,
1957 * split partly overlapping ranges and add new ranges in the gaps. All changes
1958 * should be applied to the range_list and interval tree transactionally. If
1959 * any range split or allocation fails, the entire update fails. Therefore any
1960 * existing overlapping svm_ranges are cloned and the original svm_ranges left
1963 * If the transaction succeeds, the caller can update and insert clones and
1964 * new ranges, then free the originals.
1966 * Otherwise the caller can free the clones and new ranges, while the old
1967 * svm_ranges remain unchanged.
1969 * Context: Process context, caller must hold svms->lock
1972 * 0 - OK, otherwise error code
1975 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1976 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1977 struct list_head *update_list, struct list_head *insert_list,
1978 struct list_head *remove_list)
1980 unsigned long last = start + size - 1UL;
1981 struct svm_range_list *svms = &p->svms;
1982 struct interval_tree_node *node;
1983 struct svm_range *prange;
1984 struct svm_range *tmp;
1985 struct list_head new_list;
1988 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1990 INIT_LIST_HEAD(update_list);
1991 INIT_LIST_HEAD(insert_list);
1992 INIT_LIST_HEAD(remove_list);
1993 INIT_LIST_HEAD(&new_list);
1995 node = interval_tree_iter_first(&svms->objects, start, last);
1997 struct interval_tree_node *next;
1998 unsigned long next_start;
2000 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2003 prange = container_of(node, struct svm_range, it_node);
2004 next = interval_tree_iter_next(node, start, last);
2005 next_start = min(node->last, last) + 1;
2007 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
2009 } else if (node->start < start || node->last > last) {
2010 /* node intersects the update range and its attributes
2011 * will change. Clone and split it, apply updates only
2012 * to the overlapping part
2014 struct svm_range *old = prange;
2016 prange = svm_range_clone(old);
2022 list_add(&old->update_list, remove_list);
2023 list_add(&prange->list, insert_list);
2024 list_add(&prange->update_list, update_list);
2026 if (node->start < start) {
2027 pr_debug("change old range start\n");
2028 r = svm_range_split_head(prange, start,
2033 if (node->last > last) {
2034 pr_debug("change old range last\n");
2035 r = svm_range_split_tail(prange, last,
2041 /* The node is contained within start..last,
2044 list_add(&prange->update_list, update_list);
2047 /* insert a new node if needed */
2048 if (node->start > start) {
2049 r = svm_range_split_new(svms, start, node->start - 1,
2050 READ_ONCE(max_svm_range_pages),
2051 &new_list, update_list);
2060 /* add a final range at the end if needed */
2062 r = svm_range_split_new(svms, start, last,
2063 READ_ONCE(max_svm_range_pages),
2064 &new_list, update_list);
2068 list_for_each_entry_safe(prange, tmp, insert_list, list)
2069 svm_range_free(prange, false);
2070 list_for_each_entry_safe(prange, tmp, &new_list, list)
2071 svm_range_free(prange, true);
2073 list_splice(&new_list, insert_list);
2080 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2081 struct svm_range *prange)
2083 unsigned long start;
2086 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2087 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2089 if (prange->start == start && prange->last == last)
2092 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2093 prange->svms, prange, start, last, prange->start,
2096 if (start != 0 && last != 0) {
2097 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2098 svm_range_remove_notifier(prange);
2100 prange->it_node.start = prange->start;
2101 prange->it_node.last = prange->last;
2103 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2104 svm_range_add_notifier_locked(mm, prange);
2108 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2109 struct mm_struct *mm)
2111 switch (prange->work_item.op) {
2113 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2114 svms, prange, prange->start, prange->last);
2116 case SVM_OP_UNMAP_RANGE:
2117 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2118 svms, prange, prange->start, prange->last);
2119 svm_range_unlink(prange);
2120 svm_range_remove_notifier(prange);
2121 svm_range_free(prange, true);
2123 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2124 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2125 svms, prange, prange->start, prange->last);
2126 svm_range_update_notifier_and_interval_tree(mm, prange);
2128 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2129 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2130 svms, prange, prange->start, prange->last);
2131 svm_range_update_notifier_and_interval_tree(mm, prange);
2132 /* TODO: implement deferred validation and mapping */
2134 case SVM_OP_ADD_RANGE:
2135 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2136 prange->start, prange->last);
2137 svm_range_add_to_svms(prange);
2138 svm_range_add_notifier_locked(mm, prange);
2140 case SVM_OP_ADD_RANGE_AND_MAP:
2141 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2142 prange, prange->start, prange->last);
2143 svm_range_add_to_svms(prange);
2144 svm_range_add_notifier_locked(mm, prange);
2145 /* TODO: implement deferred validation and mapping */
2148 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2149 prange->work_item.op);
2153 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2155 struct kfd_process_device *pdd;
2156 struct kfd_process *p;
2160 p = container_of(svms, struct kfd_process, svms);
2163 drain = atomic_read(&svms->drain_pagefaults);
2167 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2172 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2174 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2175 pdd->dev->adev->irq.retry_cam_enabled ?
2176 &pdd->dev->adev->irq.ih :
2177 &pdd->dev->adev->irq.ih1);
2179 if (pdd->dev->adev->irq.retry_cam_enabled)
2180 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2181 &pdd->dev->adev->irq.ih_soft);
2184 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2186 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2190 static void svm_range_deferred_list_work(struct work_struct *work)
2192 struct svm_range_list *svms;
2193 struct svm_range *prange;
2194 struct mm_struct *mm;
2196 svms = container_of(work, struct svm_range_list, deferred_list_work);
2197 pr_debug("enter svms 0x%p\n", svms);
2199 spin_lock(&svms->deferred_list_lock);
2200 while (!list_empty(&svms->deferred_range_list)) {
2201 prange = list_first_entry(&svms->deferred_range_list,
2202 struct svm_range, deferred_list);
2203 spin_unlock(&svms->deferred_list_lock);
2205 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2206 prange->start, prange->last, prange->work_item.op);
2208 mm = prange->work_item.mm;
2210 mmap_write_lock(mm);
2212 /* Checking for the need to drain retry faults must be inside
2213 * mmap write lock to serialize with munmap notifiers.
2215 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2216 mmap_write_unlock(mm);
2217 svm_range_drain_retry_fault(svms);
2221 /* Remove from deferred_list must be inside mmap write lock, for
2223 * 1. unmap_from_cpu may change work_item.op and add the range
2224 * to deferred_list again, cause use after free bug.
2225 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2226 * lock and continue because deferred_list is empty, but
2227 * deferred_list work is actually waiting for mmap lock.
2229 spin_lock(&svms->deferred_list_lock);
2230 list_del_init(&prange->deferred_list);
2231 spin_unlock(&svms->deferred_list_lock);
2233 mutex_lock(&svms->lock);
2234 mutex_lock(&prange->migrate_mutex);
2235 while (!list_empty(&prange->child_list)) {
2236 struct svm_range *pchild;
2238 pchild = list_first_entry(&prange->child_list,
2239 struct svm_range, child_list);
2240 pr_debug("child prange 0x%p op %d\n", pchild,
2241 pchild->work_item.op);
2242 list_del_init(&pchild->child_list);
2243 svm_range_handle_list_op(svms, pchild, mm);
2245 mutex_unlock(&prange->migrate_mutex);
2247 svm_range_handle_list_op(svms, prange, mm);
2248 mutex_unlock(&svms->lock);
2249 mmap_write_unlock(mm);
2251 /* Pairs with mmget in svm_range_add_list_work */
2254 spin_lock(&svms->deferred_list_lock);
2256 spin_unlock(&svms->deferred_list_lock);
2257 pr_debug("exit svms 0x%p\n", svms);
2261 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2262 struct mm_struct *mm, enum svm_work_list_ops op)
2264 spin_lock(&svms->deferred_list_lock);
2265 /* if prange is on the deferred list */
2266 if (!list_empty(&prange->deferred_list)) {
2267 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2268 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2269 if (op != SVM_OP_NULL &&
2270 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2271 prange->work_item.op = op;
2273 prange->work_item.op = op;
2275 /* Pairs with mmput in deferred_list_work */
2277 prange->work_item.mm = mm;
2278 list_add_tail(&prange->deferred_list,
2279 &prange->svms->deferred_range_list);
2280 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2281 prange, prange->start, prange->last, op);
2283 spin_unlock(&svms->deferred_list_lock);
2286 void schedule_deferred_list_work(struct svm_range_list *svms)
2288 spin_lock(&svms->deferred_list_lock);
2289 if (!list_empty(&svms->deferred_range_list))
2290 schedule_work(&svms->deferred_list_work);
2291 spin_unlock(&svms->deferred_list_lock);
2295 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2296 struct svm_range *prange, unsigned long start,
2299 struct svm_range *head;
2300 struct svm_range *tail;
2302 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2303 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2304 prange->start, prange->last);
2307 if (start > prange->last || last < prange->start)
2310 head = tail = prange;
2311 if (start > prange->start)
2312 svm_range_split(prange, prange->start, start - 1, &tail);
2313 if (last < tail->last)
2314 svm_range_split(tail, last + 1, tail->last, &head);
2316 if (head != prange && tail != prange) {
2317 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2318 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2319 } else if (tail != prange) {
2320 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2321 } else if (head != prange) {
2322 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2323 } else if (parent != prange) {
2324 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2329 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2330 unsigned long start, unsigned long last)
2332 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2333 struct svm_range_list *svms;
2334 struct svm_range *pchild;
2335 struct kfd_process *p;
2339 p = kfd_lookup_process_by_mm(mm);
2344 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2345 prange, prange->start, prange->last, start, last);
2347 /* Make sure pending page faults are drained in the deferred worker
2348 * before the range is freed to avoid straggler interrupts on
2349 * unmapped memory causing "phantom faults".
2351 atomic_inc(&svms->drain_pagefaults);
2353 unmap_parent = start <= prange->start && last >= prange->last;
2355 list_for_each_entry(pchild, &prange->child_list, child_list) {
2356 mutex_lock_nested(&pchild->lock, 1);
2357 s = max(start, pchild->start);
2358 l = min(last, pchild->last);
2360 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2361 svm_range_unmap_split(mm, prange, pchild, start, last);
2362 mutex_unlock(&pchild->lock);
2364 s = max(start, prange->start);
2365 l = min(last, prange->last);
2367 svm_range_unmap_from_gpus(prange, s, l, trigger);
2368 svm_range_unmap_split(mm, prange, prange, start, last);
2371 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2373 svm_range_add_list_work(svms, prange, mm,
2374 SVM_OP_UPDATE_RANGE_NOTIFIER);
2375 schedule_deferred_list_work(svms);
2377 kfd_unref_process(p);
2381 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2382 * @mni: mmu_interval_notifier struct
2383 * @range: mmu_notifier_range struct
2384 * @cur_seq: value to pass to mmu_interval_set_seq()
2386 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2387 * is from migration, or CPU page invalidation callback.
2389 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2390 * work thread, and split prange if only part of prange is unmapped.
2392 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2393 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2394 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2395 * update GPU mapping to recover.
2397 * Context: mmap lock, notifier_invalidate_start lock are held
2398 * for invalidate event, prange lock is held if this is from migration
2401 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2402 const struct mmu_notifier_range *range,
2403 unsigned long cur_seq)
2405 struct svm_range *prange;
2406 unsigned long start;
2409 if (range->event == MMU_NOTIFY_RELEASE)
2411 if (!mmget_not_zero(mni->mm))
2414 start = mni->interval_tree.start;
2415 last = mni->interval_tree.last;
2416 start = max(start, range->start) >> PAGE_SHIFT;
2417 last = min(last, range->end - 1) >> PAGE_SHIFT;
2418 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2419 start, last, range->start >> PAGE_SHIFT,
2420 (range->end - 1) >> PAGE_SHIFT,
2421 mni->interval_tree.start >> PAGE_SHIFT,
2422 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2424 prange = container_of(mni, struct svm_range, notifier);
2426 svm_range_lock(prange);
2427 mmu_interval_set_seq(mni, cur_seq);
2429 switch (range->event) {
2430 case MMU_NOTIFY_UNMAP:
2431 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2434 svm_range_evict(prange, mni->mm, start, last, range->event);
2438 svm_range_unlock(prange);
2445 * svm_range_from_addr - find svm range from fault address
2446 * @svms: svm range list header
2447 * @addr: address to search range interval tree, in pages
2448 * @parent: parent range if range is on child list
2450 * Context: The caller must hold svms->lock
2452 * Return: the svm_range found or NULL
2455 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2456 struct svm_range **parent)
2458 struct interval_tree_node *node;
2459 struct svm_range *prange;
2460 struct svm_range *pchild;
2462 node = interval_tree_iter_first(&svms->objects, addr, addr);
2466 prange = container_of(node, struct svm_range, it_node);
2467 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2468 addr, prange->start, prange->last, node->start, node->last);
2470 if (addr >= prange->start && addr <= prange->last) {
2475 list_for_each_entry(pchild, &prange->child_list, child_list)
2476 if (addr >= pchild->start && addr <= pchild->last) {
2477 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2478 addr, pchild->start, pchild->last);
2487 /* svm_range_best_restore_location - decide the best fault restore location
2488 * @prange: svm range structure
2489 * @adev: the GPU on which vm fault happened
2491 * This is only called when xnack is on, to decide the best location to restore
2492 * the range mapping after GPU vm fault. Caller uses the best location to do
2493 * migration if actual loc is not best location, then update GPU page table
2494 * mapping to the best location.
2496 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2497 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2498 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2499 * if range actual loc is cpu, best_loc is cpu
2500 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2502 * Otherwise, GPU no access, best_loc is -1.
2505 * -1 means vm fault GPU no access
2506 * 0 for CPU or GPU id
2509 svm_range_best_restore_location(struct svm_range *prange,
2510 struct amdgpu_device *adev,
2513 struct amdgpu_device *bo_adev, *preferred_adev;
2514 struct kfd_process *p;
2518 p = container_of(prange->svms, struct kfd_process, svms);
2520 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2522 pr_debug("failed to get gpuid from kgd\n");
2526 if (prange->preferred_loc == gpuid ||
2527 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2528 return prange->preferred_loc;
2529 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2530 preferred_adev = svm_range_get_adev_by_id(prange,
2531 prange->preferred_loc);
2532 if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2533 return prange->preferred_loc;
2537 if (test_bit(*gpuidx, prange->bitmap_access))
2540 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2541 if (!prange->actual_loc)
2544 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2545 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2546 return prange->actual_loc;
2555 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2556 unsigned long *start, unsigned long *last,
2557 bool *is_heap_stack)
2559 struct vm_area_struct *vma;
2560 struct interval_tree_node *node;
2561 unsigned long start_limit, end_limit;
2563 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2565 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2569 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2570 vma->vm_end >= vma->vm_mm->start_brk) ||
2571 (vma->vm_start <= vma->vm_mm->start_stack &&
2572 vma->vm_end >= vma->vm_mm->start_stack);
2574 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2575 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2576 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2577 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2578 /* First range that starts after the fault address */
2579 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2581 end_limit = min(end_limit, node->start);
2582 /* Last range that ends before the fault address */
2583 node = container_of(rb_prev(&node->rb),
2584 struct interval_tree_node, rb);
2586 /* Last range must end before addr because
2587 * there was no range after addr
2589 node = container_of(rb_last(&p->svms.objects.rb_root),
2590 struct interval_tree_node, rb);
2593 if (node->last >= addr) {
2594 WARN(1, "Overlap with prev node and page fault addr\n");
2597 start_limit = max(start_limit, node->last + 1);
2600 *start = start_limit;
2601 *last = end_limit - 1;
2603 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2604 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2605 *start, *last, *is_heap_stack);
2611 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2612 uint64_t *bo_s, uint64_t *bo_l)
2614 struct amdgpu_bo_va_mapping *mapping;
2615 struct interval_tree_node *node;
2616 struct amdgpu_bo *bo = NULL;
2617 unsigned long userptr;
2621 for (i = 0; i < p->n_pdds; i++) {
2622 struct amdgpu_vm *vm;
2624 if (!p->pdds[i]->drm_priv)
2627 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2628 r = amdgpu_bo_reserve(vm->root.bo, false);
2632 /* Check userptr by searching entire vm->va interval tree */
2633 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2635 mapping = container_of((struct rb_node *)node,
2636 struct amdgpu_bo_va_mapping, rb);
2637 bo = mapping->bo_va->base.bo;
2639 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2640 start << PAGE_SHIFT,
2643 node = interval_tree_iter_next(node, 0, ~0ULL);
2647 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2650 *bo_s = userptr >> PAGE_SHIFT;
2651 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2653 amdgpu_bo_unreserve(vm->root.bo);
2656 amdgpu_bo_unreserve(vm->root.bo);
2662 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2663 struct kfd_process *p,
2664 struct mm_struct *mm,
2667 struct svm_range *prange = NULL;
2668 unsigned long start, last;
2669 uint32_t gpuid, gpuidx;
2675 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2679 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2680 if (r != -EADDRINUSE)
2681 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2683 if (r == -EADDRINUSE) {
2684 if (addr >= bo_s && addr <= bo_l)
2687 /* Create one page svm range if 2MB range overlapping */
2692 prange = svm_range_new(&p->svms, start, last, true);
2694 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2697 if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2698 pr_debug("failed to get gpuid from kgd\n");
2699 svm_range_free(prange, true);
2704 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2706 svm_range_add_to_svms(prange);
2707 svm_range_add_notifier_locked(mm, prange);
2712 /* svm_range_skip_recover - decide if prange can be recovered
2713 * @prange: svm range structure
2715 * GPU vm retry fault handle skip recover the range for cases:
2716 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2717 * deferred list work will drain the stale fault before free the prange.
2718 * 2. prange is on deferred list to add interval notifier after split, or
2719 * 3. prange is child range, it is split from parent prange, recover later
2720 * after interval notifier is added.
2722 * Return: true to skip recover, false to recover
2724 static bool svm_range_skip_recover(struct svm_range *prange)
2726 struct svm_range_list *svms = prange->svms;
2728 spin_lock(&svms->deferred_list_lock);
2729 if (list_empty(&prange->deferred_list) &&
2730 list_empty(&prange->child_list)) {
2731 spin_unlock(&svms->deferred_list_lock);
2734 spin_unlock(&svms->deferred_list_lock);
2736 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2737 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2738 svms, prange, prange->start, prange->last);
2741 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2742 prange->work_item.op == SVM_OP_ADD_RANGE) {
2743 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2744 svms, prange, prange->start, prange->last);
2751 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2754 struct kfd_process_device *pdd;
2756 /* fault is on different page of same range
2757 * or fault is skipped to recover later
2758 * or fault is on invalid virtual address
2760 if (gpuidx == MAX_GPU_INSTANCE) {
2764 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2769 /* fault is recovered
2770 * or fault cannot recover because GPU no access on the range
2772 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2774 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2778 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2780 unsigned long requested = VM_READ;
2783 requested |= VM_WRITE;
2785 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2787 return (vma->vm_flags & requested) == requested;
2791 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2792 uint64_t addr, bool write_fault)
2794 struct mm_struct *mm = NULL;
2795 struct svm_range_list *svms;
2796 struct svm_range *prange;
2797 struct kfd_process *p;
2798 ktime_t timestamp = ktime_get_boottime();
2800 int32_t gpuidx = MAX_GPU_INSTANCE;
2801 bool write_locked = false;
2802 struct vm_area_struct *vma;
2803 bool migration = false;
2806 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2807 pr_debug("device does not support SVM\n");
2811 p = kfd_lookup_process_by_pasid(pasid);
2813 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2818 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2820 if (atomic_read(&svms->drain_pagefaults)) {
2821 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2826 if (!p->xnack_enabled) {
2827 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2832 /* p->lead_thread is available as kfd_process_wq_release flush the work
2833 * before releasing task ref.
2835 mm = get_task_mm(p->lead_thread);
2837 pr_debug("svms 0x%p failed to get mm\n", svms);
2844 mutex_lock(&svms->lock);
2845 prange = svm_range_from_addr(svms, addr, NULL);
2847 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2849 if (!write_locked) {
2850 /* Need the write lock to create new range with MMU notifier.
2851 * Also flush pending deferred work to make sure the interval
2852 * tree is up to date before we add a new range
2854 mutex_unlock(&svms->lock);
2855 mmap_read_unlock(mm);
2856 mmap_write_lock(mm);
2857 write_locked = true;
2858 goto retry_write_locked;
2860 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2862 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2864 mmap_write_downgrade(mm);
2866 goto out_unlock_svms;
2870 mmap_write_downgrade(mm);
2872 mutex_lock(&prange->migrate_mutex);
2874 if (svm_range_skip_recover(prange)) {
2875 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2877 goto out_unlock_range;
2880 /* skip duplicate vm fault on different pages of same range */
2881 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2882 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2883 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2884 svms, prange->start, prange->last);
2886 goto out_unlock_range;
2889 /* __do_munmap removed VMA, return success as we are handling stale
2892 vma = vma_lookup(mm, addr << PAGE_SHIFT);
2894 pr_debug("address 0x%llx VMA is removed\n", addr);
2896 goto out_unlock_range;
2899 if (!svm_fault_allowed(vma, write_fault)) {
2900 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2901 write_fault ? "write" : "read");
2903 goto out_unlock_range;
2906 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2907 if (best_loc == -1) {
2908 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2909 svms, prange->start, prange->last);
2911 goto out_unlock_range;
2914 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2915 svms, prange->start, prange->last, best_loc,
2916 prange->actual_loc);
2918 kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
2919 write_fault, timestamp);
2921 if (prange->actual_loc != best_loc) {
2924 r = svm_migrate_to_vram(prange, best_loc, mm,
2925 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2927 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2929 /* Fallback to system memory if migration to
2932 if (prange->actual_loc)
2933 r = svm_migrate_vram_to_ram(prange, mm,
2934 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2940 r = svm_migrate_vram_to_ram(prange, mm,
2941 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2945 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2946 r, svms, prange->start, prange->last);
2947 goto out_unlock_range;
2951 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
2953 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2954 r, svms, prange->start, prange->last);
2956 kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
2960 mutex_unlock(&prange->migrate_mutex);
2962 mutex_unlock(&svms->lock);
2963 mmap_read_unlock(mm);
2965 svm_range_count_fault(adev, p, gpuidx);
2969 kfd_unref_process(p);
2972 pr_debug("recover vm fault later\n");
2973 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2980 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
2982 struct svm_range *prange, *pchild;
2983 uint64_t reserved_size = 0;
2987 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
2989 mutex_lock(&p->svms.lock);
2991 list_for_each_entry(prange, &p->svms.list, list) {
2992 svm_range_lock(prange);
2993 list_for_each_entry(pchild, &prange->child_list, child_list) {
2994 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
2995 if (xnack_enabled) {
2996 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
2997 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
2999 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3000 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3003 reserved_size += size;
3007 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3008 if (xnack_enabled) {
3009 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3010 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3012 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3013 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3016 reserved_size += size;
3019 svm_range_unlock(prange);
3025 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3026 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
3028 /* Change xnack mode must be inside svms lock, to avoid race with
3029 * svm_range_deferred_list_work unreserve memory in parallel.
3031 p->xnack_enabled = xnack_enabled;
3033 mutex_unlock(&p->svms.lock);
3037 void svm_range_list_fini(struct kfd_process *p)
3039 struct svm_range *prange;
3040 struct svm_range *next;
3042 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3044 cancel_delayed_work_sync(&p->svms.restore_work);
3046 /* Ensure list work is finished before process is destroyed */
3047 flush_work(&p->svms.deferred_list_work);
3050 * Ensure no retry fault comes in afterwards, as page fault handler will
3051 * not find kfd process and take mm lock to recover fault.
3053 atomic_inc(&p->svms.drain_pagefaults);
3054 svm_range_drain_retry_fault(&p->svms);
3056 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3057 svm_range_unlink(prange);
3058 svm_range_remove_notifier(prange);
3059 svm_range_free(prange, true);
3062 mutex_destroy(&p->svms.lock);
3064 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3067 int svm_range_list_init(struct kfd_process *p)
3069 struct svm_range_list *svms = &p->svms;
3072 svms->objects = RB_ROOT_CACHED;
3073 mutex_init(&svms->lock);
3074 INIT_LIST_HEAD(&svms->list);
3075 atomic_set(&svms->evicted_ranges, 0);
3076 atomic_set(&svms->drain_pagefaults, 0);
3077 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3078 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3079 INIT_LIST_HEAD(&svms->deferred_range_list);
3080 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3081 spin_lock_init(&svms->deferred_list_lock);
3083 for (i = 0; i < p->n_pdds; i++)
3084 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
3085 bitmap_set(svms->bitmap_supported, i, 1);
3091 * svm_range_check_vm - check if virtual address range mapped already
3092 * @p: current kfd_process
3093 * @start: range start address, in pages
3094 * @last: range last address, in pages
3095 * @bo_s: mapping start address in pages if address range already mapped
3096 * @bo_l: mapping last address in pages if address range already mapped
3098 * The purpose is to avoid virtual address ranges already allocated by
3099 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3100 * It looks for each pdd in the kfd_process.
3102 * Context: Process context
3104 * Return 0 - OK, if the range is not mapped.
3105 * Otherwise error code:
3106 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3107 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3108 * a signal. Release all buffer reservations and return to user-space.
3111 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3112 uint64_t *bo_s, uint64_t *bo_l)
3114 struct amdgpu_bo_va_mapping *mapping;
3115 struct interval_tree_node *node;
3119 for (i = 0; i < p->n_pdds; i++) {
3120 struct amdgpu_vm *vm;
3122 if (!p->pdds[i]->drm_priv)
3125 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3126 r = amdgpu_bo_reserve(vm->root.bo, false);
3130 node = interval_tree_iter_first(&vm->va, start, last);
3132 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3134 mapping = container_of((struct rb_node *)node,
3135 struct amdgpu_bo_va_mapping, rb);
3137 *bo_s = mapping->start;
3138 *bo_l = mapping->last;
3140 amdgpu_bo_unreserve(vm->root.bo);
3143 amdgpu_bo_unreserve(vm->root.bo);
3150 * svm_range_is_valid - check if virtual address range is valid
3151 * @p: current kfd_process
3152 * @start: range start address, in pages
3153 * @size: range size, in pages
3155 * Valid virtual address range means it belongs to one or more VMAs
3157 * Context: Process context
3160 * 0 - OK, otherwise error code
3163 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3165 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3166 struct vm_area_struct *vma;
3168 unsigned long start_unchg = start;
3170 start <<= PAGE_SHIFT;
3171 end = start + (size << PAGE_SHIFT);
3173 vma = vma_lookup(p->mm, start);
3174 if (!vma || (vma->vm_flags & device_vma))
3176 start = min(end, vma->vm_end);
3177 } while (start < end);
3179 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3184 * svm_range_best_prefetch_location - decide the best prefetch location
3185 * @prange: svm range structure
3188 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3189 * can be CPU or GPU.
3191 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3192 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3193 * the best prefetch location is always CPU, because GPU can not have coherent
3194 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3197 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3198 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3200 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3201 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3202 * prefetch location is always CPU.
3204 * Context: Process context
3207 * 0 for CPU or GPU id
3210 svm_range_best_prefetch_location(struct svm_range *prange)
3212 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3213 uint32_t best_loc = prange->prefetch_loc;
3214 struct kfd_process_device *pdd;
3215 struct amdgpu_device *bo_adev;
3216 struct kfd_process *p;
3219 p = container_of(prange->svms, struct kfd_process, svms);
3221 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3224 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
3226 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
3231 if (p->xnack_enabled)
3232 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3234 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3237 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3238 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3240 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3244 if (pdd->dev->adev == bo_adev)
3247 if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3254 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3255 p->xnack_enabled, &p->svms, prange->start, prange->last,
3261 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3262 * @mm: current process mm_struct
3263 * @prange: svm range structure
3264 * @migrated: output, true if migration is triggered
3266 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3268 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3271 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3273 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3274 * stops all queues, schedule restore work
3275 * 2. svm_range_restore_work wait for migration is done by
3276 * a. svm_range_validate_vram takes prange->migrate_mutex
3277 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3278 * 3. restore work update mappings of GPU, resume all queues.
3280 * Context: Process context
3283 * 0 - OK, otherwise - error code of migration
3286 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3293 best_loc = svm_range_best_prefetch_location(prange);
3295 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3296 best_loc == prange->actual_loc)
3300 r = svm_migrate_vram_to_ram(prange, mm,
3301 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3306 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3312 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3317 if (dma_fence_is_signaled(&fence->base))
3320 if (fence->svm_bo) {
3321 WRITE_ONCE(fence->svm_bo->evicting, 1);
3322 schedule_work(&fence->svm_bo->eviction_work);
3328 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3330 struct svm_range_bo *svm_bo;
3331 struct mm_struct *mm;
3334 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3335 if (!svm_bo_ref_unless_zero(svm_bo))
3336 return; /* svm_bo was freed while eviction was pending */
3338 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3339 mm = svm_bo->eviction_fence->mm;
3341 svm_range_bo_unref(svm_bo);
3346 spin_lock(&svm_bo->list_lock);
3347 while (!list_empty(&svm_bo->range_list) && !r) {
3348 struct svm_range *prange =
3349 list_first_entry(&svm_bo->range_list,
3350 struct svm_range, svm_bo_list);
3353 list_del_init(&prange->svm_bo_list);
3354 spin_unlock(&svm_bo->list_lock);
3356 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3357 prange->start, prange->last);
3359 mutex_lock(&prange->migrate_mutex);
3361 r = svm_migrate_vram_to_ram(prange, mm,
3362 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3363 } while (!r && prange->actual_loc && --retries);
3365 if (!r && prange->actual_loc)
3366 pr_info_once("Migration failed during eviction");
3368 if (!prange->actual_loc) {
3369 mutex_lock(&prange->lock);
3370 prange->svm_bo = NULL;
3371 mutex_unlock(&prange->lock);
3373 mutex_unlock(&prange->migrate_mutex);
3375 spin_lock(&svm_bo->list_lock);
3377 spin_unlock(&svm_bo->list_lock);
3378 mmap_read_unlock(mm);
3381 dma_fence_signal(&svm_bo->eviction_fence->base);
3383 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3384 * has been called in svm_migrate_vram_to_ram
3386 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3387 svm_range_bo_unref(svm_bo);
3391 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3392 uint64_t start, uint64_t size, uint32_t nattr,
3393 struct kfd_ioctl_svm_attribute *attrs)
3395 struct amdkfd_process_info *process_info = p->kgd_process_info;
3396 struct list_head update_list;
3397 struct list_head insert_list;
3398 struct list_head remove_list;
3399 struct svm_range_list *svms;
3400 struct svm_range *prange;
3401 struct svm_range *next;
3402 bool update_mapping = false;
3406 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3407 p->pasid, &p->svms, start, start + size - 1, size);
3409 r = svm_range_check_attr(p, nattr, attrs);
3415 mutex_lock(&process_info->lock);
3417 svm_range_list_lock_and_flush_work(svms, mm);
3419 r = svm_range_is_valid(p, start, size);
3421 pr_debug("invalid range r=%d\n", r);
3422 mmap_write_unlock(mm);
3426 mutex_lock(&svms->lock);
3428 /* Add new range and split existing ranges as needed */
3429 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3430 &insert_list, &remove_list);
3432 mutex_unlock(&svms->lock);
3433 mmap_write_unlock(mm);
3436 /* Apply changes as a transaction */
3437 list_for_each_entry_safe(prange, next, &insert_list, list) {
3438 svm_range_add_to_svms(prange);
3439 svm_range_add_notifier_locked(mm, prange);
3441 list_for_each_entry(prange, &update_list, update_list) {
3442 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3443 /* TODO: unmap ranges from GPU that lost access */
3445 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3446 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3447 prange->svms, prange, prange->start,
3449 svm_range_unlink(prange);
3450 svm_range_remove_notifier(prange);
3451 svm_range_free(prange, false);
3454 mmap_write_downgrade(mm);
3455 /* Trigger migrations and revalidate and map to GPUs as needed. If
3456 * this fails we may be left with partially completed actions. There
3457 * is no clean way of rolling back to the previous state in such a
3458 * case because the rollback wouldn't be guaranteed to work either.
3460 list_for_each_entry(prange, &update_list, update_list) {
3463 mutex_lock(&prange->migrate_mutex);
3465 r = svm_range_trigger_migration(mm, prange, &migrated);
3467 goto out_unlock_range;
3469 if (migrated && (!p->xnack_enabled ||
3470 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3471 prange->mapped_to_gpu) {
3472 pr_debug("restore_work will update mappings of GPUs\n");
3473 mutex_unlock(&prange->migrate_mutex);
3477 if (!migrated && !update_mapping) {
3478 mutex_unlock(&prange->migrate_mutex);
3482 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3484 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3485 true, true, flush_tlb);
3487 pr_debug("failed %d to map svm range\n", r);
3490 mutex_unlock(&prange->migrate_mutex);
3495 svm_range_debug_dump(svms);
3497 mutex_unlock(&svms->lock);
3498 mmap_read_unlock(mm);
3500 mutex_unlock(&process_info->lock);
3502 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3503 &p->svms, start, start + size - 1, r);
3509 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3510 uint64_t start, uint64_t size, uint32_t nattr,
3511 struct kfd_ioctl_svm_attribute *attrs)
3513 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3514 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3515 bool get_preferred_loc = false;
3516 bool get_prefetch_loc = false;
3517 bool get_granularity = false;
3518 bool get_accessible = false;
3519 bool get_flags = false;
3520 uint64_t last = start + size - 1UL;
3521 uint8_t granularity = 0xff;
3522 struct interval_tree_node *node;
3523 struct svm_range_list *svms;
3524 struct svm_range *prange;
3525 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3526 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3527 uint32_t flags_and = 0xffffffff;
3528 uint32_t flags_or = 0;
3533 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3534 start + size - 1, nattr);
3536 /* Flush pending deferred work to avoid racing with deferred actions from
3537 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3538 * can still race with get_attr because we don't hold the mmap lock. But that
3539 * would be a race condition in the application anyway, and undefined
3540 * behaviour is acceptable in that case.
3542 flush_work(&p->svms.deferred_list_work);
3545 r = svm_range_is_valid(p, start, size);
3546 mmap_read_unlock(mm);
3548 pr_debug("invalid range r=%d\n", r);
3552 for (i = 0; i < nattr; i++) {
3553 switch (attrs[i].type) {
3554 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3555 get_preferred_loc = true;
3557 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3558 get_prefetch_loc = true;
3560 case KFD_IOCTL_SVM_ATTR_ACCESS:
3561 get_accessible = true;
3563 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3564 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3567 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3568 get_granularity = true;
3570 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3571 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3574 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3581 mutex_lock(&svms->lock);
3583 node = interval_tree_iter_first(&svms->objects, start, last);
3585 pr_debug("range attrs not found return default values\n");
3586 svm_range_set_default_attributes(&location, &prefetch_loc,
3587 &granularity, &flags_and);
3588 flags_or = flags_and;
3589 if (p->xnack_enabled)
3590 bitmap_copy(bitmap_access, svms->bitmap_supported,
3593 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3594 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3597 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3598 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3601 struct interval_tree_node *next;
3603 prange = container_of(node, struct svm_range, it_node);
3604 next = interval_tree_iter_next(node, start, last);
3606 if (get_preferred_loc) {
3607 if (prange->preferred_loc ==
3608 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3609 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3610 location != prange->preferred_loc)) {
3611 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3612 get_preferred_loc = false;
3614 location = prange->preferred_loc;
3617 if (get_prefetch_loc) {
3618 if (prange->prefetch_loc ==
3619 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3620 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3621 prefetch_loc != prange->prefetch_loc)) {
3622 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3623 get_prefetch_loc = false;
3625 prefetch_loc = prange->prefetch_loc;
3628 if (get_accessible) {
3629 bitmap_and(bitmap_access, bitmap_access,
3630 prange->bitmap_access, MAX_GPU_INSTANCE);
3631 bitmap_and(bitmap_aip, bitmap_aip,
3632 prange->bitmap_aip, MAX_GPU_INSTANCE);
3635 flags_and &= prange->flags;
3636 flags_or |= prange->flags;
3639 if (get_granularity && prange->granularity < granularity)
3640 granularity = prange->granularity;
3645 mutex_unlock(&svms->lock);
3647 for (i = 0; i < nattr; i++) {
3648 switch (attrs[i].type) {
3649 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3650 attrs[i].value = location;
3652 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3653 attrs[i].value = prefetch_loc;
3655 case KFD_IOCTL_SVM_ATTR_ACCESS:
3656 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3659 pr_debug("invalid gpuid %x\n", attrs[i].value);
3662 if (test_bit(gpuidx, bitmap_access))
3663 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3664 else if (test_bit(gpuidx, bitmap_aip))
3666 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3668 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3670 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3671 attrs[i].value = flags_and;
3673 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3674 attrs[i].value = ~flags_or;
3676 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3677 attrs[i].value = (uint32_t)granularity;
3685 int kfd_criu_resume_svm(struct kfd_process *p)
3687 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3688 int nattr_common = 4, nattr_accessibility = 1;
3689 struct criu_svm_metadata *criu_svm_md = NULL;
3690 struct svm_range_list *svms = &p->svms;
3691 struct criu_svm_metadata *next = NULL;
3692 uint32_t set_flags = 0xffffffff;
3693 int i, j, num_attrs, ret = 0;
3694 uint64_t set_attr_size;
3695 struct mm_struct *mm;
3697 if (list_empty(&svms->criu_svm_metadata_list)) {
3698 pr_debug("No SVM data from CRIU restore stage 2\n");
3702 mm = get_task_mm(p->lead_thread);
3704 pr_err("failed to get mm for the target process\n");
3708 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3711 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3712 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3713 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3715 for (j = 0; j < num_attrs; j++) {
3716 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3717 i, j, criu_svm_md->data.attrs[j].type,
3718 i, j, criu_svm_md->data.attrs[j].value);
3719 switch (criu_svm_md->data.attrs[j].type) {
3720 /* During Checkpoint operation, the query for
3721 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3722 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3723 * not used by the range which was checkpointed. Care
3724 * must be taken to not restore with an invalid value
3725 * otherwise the gpuidx value will be invalid and
3726 * set_attr would eventually fail so just replace those
3727 * with another dummy attribute such as
3728 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3730 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3731 if (criu_svm_md->data.attrs[j].value ==
3732 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3733 criu_svm_md->data.attrs[j].type =
3734 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3735 criu_svm_md->data.attrs[j].value = 0;
3738 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3739 set_flags = criu_svm_md->data.attrs[j].value;
3746 /* CLR_FLAGS is not available via get_attr during checkpoint but
3747 * it needs to be inserted before restoring the ranges so
3748 * allocate extra space for it before calling set_attr
3750 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3752 set_attr_new = krealloc(set_attr, set_attr_size,
3754 if (!set_attr_new) {
3758 set_attr = set_attr_new;
3760 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3761 sizeof(struct kfd_ioctl_svm_attribute));
3762 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3763 set_attr[num_attrs].value = ~set_flags;
3765 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3766 criu_svm_md->data.size, num_attrs + 1,
3769 pr_err("CRIU: failed to set range attributes\n");
3777 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3778 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3779 criu_svm_md->data.start_addr);
3788 int kfd_criu_restore_svm(struct kfd_process *p,
3789 uint8_t __user *user_priv_ptr,
3790 uint64_t *priv_data_offset,
3791 uint64_t max_priv_data_size)
3793 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3794 int nattr_common = 4, nattr_accessibility = 1;
3795 struct criu_svm_metadata *criu_svm_md = NULL;
3796 struct svm_range_list *svms = &p->svms;
3797 uint32_t num_devices;
3800 num_devices = p->n_pdds;
3801 /* Handle one SVM range object at a time, also the number of gpus are
3802 * assumed to be same on the restore node, checking must be done while
3803 * evaluating the topology earlier
3806 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3807 (nattr_common + nattr_accessibility * num_devices);
3808 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3810 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3813 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3815 pr_err("failed to allocate memory to store svm metadata\n");
3818 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3823 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3824 svm_priv_data_size);
3829 *priv_data_offset += svm_priv_data_size;
3831 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3841 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3842 uint64_t *svm_priv_data_size)
3844 uint64_t total_size, accessibility_size, common_attr_size;
3845 int nattr_common = 4, nattr_accessibility = 1;
3846 int num_devices = p->n_pdds;
3847 struct svm_range_list *svms;
3848 struct svm_range *prange;
3851 *svm_priv_data_size = 0;
3857 mutex_lock(&svms->lock);
3858 list_for_each_entry(prange, &svms->list, list) {
3859 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3860 prange, prange->start, prange->npages,
3861 prange->start + prange->npages - 1);
3864 mutex_unlock(&svms->lock);
3866 *num_svm_ranges = count;
3867 /* Only the accessbility attributes need to be queried for all the gpus
3868 * individually, remaining ones are spanned across the entire process
3869 * regardless of the various gpu nodes. Of the remaining attributes,
3870 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3872 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3873 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3874 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3875 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3877 * ** ACCESSBILITY ATTRIBUTES **
3878 * (Considered as one, type is altered during query, value is gpuid)
3879 * KFD_IOCTL_SVM_ATTR_ACCESS
3880 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3881 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3883 if (*num_svm_ranges > 0) {
3884 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3886 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3887 nattr_accessibility * num_devices;
3889 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3890 common_attr_size + accessibility_size;
3892 *svm_priv_data_size = *num_svm_ranges * total_size;
3895 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3896 *svm_priv_data_size);
3900 int kfd_criu_checkpoint_svm(struct kfd_process *p,
3901 uint8_t __user *user_priv_data,
3902 uint64_t *priv_data_offset)
3904 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3905 struct kfd_ioctl_svm_attribute *query_attr = NULL;
3906 uint64_t svm_priv_data_size, query_attr_size = 0;
3907 int index, nattr_common = 4, ret = 0;
3908 struct svm_range_list *svms;
3909 int num_devices = p->n_pdds;
3910 struct svm_range *prange;
3911 struct mm_struct *mm;
3917 mm = get_task_mm(p->lead_thread);
3919 pr_err("failed to get mm for the target process\n");
3923 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3924 (nattr_common + num_devices);
3926 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
3932 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
3933 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
3934 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3935 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
3937 for (index = 0; index < num_devices; index++) {
3938 struct kfd_process_device *pdd = p->pdds[index];
3940 query_attr[index + nattr_common].type =
3941 KFD_IOCTL_SVM_ATTR_ACCESS;
3942 query_attr[index + nattr_common].value = pdd->user_gpu_id;
3945 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
3947 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
3954 list_for_each_entry(prange, &svms->list, list) {
3956 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
3957 svm_priv->start_addr = prange->start;
3958 svm_priv->size = prange->npages;
3959 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
3960 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
3961 prange, prange->start, prange->npages,
3962 prange->start + prange->npages - 1,
3963 prange->npages * PAGE_SIZE);
3965 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
3967 (nattr_common + num_devices),
3970 pr_err("CRIU: failed to obtain range attributes\n");
3974 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
3975 svm_priv_data_size)) {
3976 pr_err("Failed to copy svm priv to user\n");
3981 *priv_data_offset += svm_priv_data_size;
3996 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3997 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3999 struct mm_struct *mm = current->mm;
4002 start >>= PAGE_SHIFT;
4003 size >>= PAGE_SHIFT;
4006 case KFD_IOCTL_SVM_OP_SET_ATTR:
4007 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4009 case KFD_IOCTL_SVM_OP_GET_ATTR:
4010 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);