2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/amd-iommu.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_dbgmgr.h"
44 #include "kfd_iommu.h"
48 * List of struct kfd_process (field kfd_process).
49 * Unique/indexed by mm_struct*
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 static DEFINE_MUTEX(kfd_processes_mutex);
54 DEFINE_SRCU(kfd_processes_srcu);
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
59 /* Ordered, single-threaded workqueue for restoring evicted
60 * processes. Restoring multiple processes concurrently under memory
61 * pressure can lead to processes blocking each other from validating
62 * their BOs and result in a live-lock situation where processes
63 * remain evicted indefinitely.
65 static struct workqueue_struct *kfd_restore_wq;
67 static struct kfd_process *find_process(const struct task_struct *thread);
68 static void kfd_process_ref_release(struct kref *ref);
69 static struct kfd_process *create_process(const struct task_struct *thread);
70 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
75 struct kfd_procfs_tree {
79 static struct kfd_procfs_tree procfs;
82 * Structure for SDMA activity tracking
84 struct kfd_sdma_activity_handler_workarea {
85 struct work_struct sdma_activity_work;
86 struct kfd_process_device *pdd;
87 uint64_t sdma_activity_counter;
90 struct temp_sdma_queue_list {
91 uint64_t __user *rptr;
93 unsigned int queue_id;
94 struct list_head list;
97 static void kfd_sdma_activity_worker(struct work_struct *work)
99 struct kfd_sdma_activity_handler_workarea *workarea;
100 struct kfd_process_device *pdd;
102 struct mm_struct *mm;
104 struct qcm_process_device *qpd;
105 struct device_queue_manager *dqm;
107 struct temp_sdma_queue_list sdma_q_list;
108 struct temp_sdma_queue_list *sdma_q, *next;
110 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
121 * Total SDMA activity is current SDMA activity + past SDMA activity
122 * Past SDMA count is stored in pdd.
123 * To get the current activity counters for all active SDMA queues,
124 * we loop over all SDMA queues and get their counts from user-space.
126 * We cannot call get_user() with dqm_lock held as it can cause
127 * a circular lock dependency situation. To read the SDMA stats,
128 * we need to do the following:
130 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
131 * with dqm_lock/dqm_unlock().
132 * 2. Call get_user() for each node in temporary list without dqm_lock.
133 * Save the SDMA count for each node and also add the count to the total
134 * SDMA count counter.
135 * Its possible, during this step, a few SDMA queue nodes got deleted
136 * from the qpd->queues_list.
137 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
138 * If any node got deleted, its SDMA count would be captured in the sdma
139 * past activity counter. So subtract the SDMA counter stored in step 2
140 * for this node from the total SDMA count.
142 INIT_LIST_HEAD(&sdma_q_list.list);
145 * Create the temp list of all SDMA queues
149 list_for_each_entry(q, &qpd->queues_list, list) {
150 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
151 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
160 INIT_LIST_HEAD(&sdma_q->list);
161 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
162 sdma_q->queue_id = q->properties.queue_id;
163 list_add_tail(&sdma_q->list, &sdma_q_list.list);
167 * If the temp list is empty, then no SDMA queues nodes were found in
168 * qpd->queues_list. Return the past activity count as the total sdma
171 if (list_empty(&sdma_q_list.list)) {
172 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
180 * Get the usage count for each SDMA queue in temp_list.
182 mm = get_task_mm(pdd->process->lead_thread);
188 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
190 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
192 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195 sdma_q->sdma_val = val;
196 workarea->sdma_activity_counter += val;
200 kthread_unuse_mm(mm);
204 * Do a second iteration over qpd_queues_list to check if any SDMA
205 * nodes got deleted while fetching SDMA counter.
209 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
211 list_for_each_entry(q, &qpd->queues_list, list) {
212 if (list_empty(&sdma_q_list.list))
215 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
216 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
220 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
221 (sdma_q->queue_id == q->properties.queue_id)) {
222 list_del(&sdma_q->list);
232 * If temp list is not empty, it implies some queues got deleted
233 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
234 * count for each node from the total SDMA count.
236 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
237 workarea->sdma_activity_counter -= sdma_q->sdma_val;
238 list_del(&sdma_q->list);
245 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
246 list_del(&sdma_q->list);
252 * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device
253 * by current process. Translates acquired wave count into number of compute units
256 * @atr: Handle of attribute that allows reporting of wave count. The attribute
257 * handle encapsulates GPU device it is associated with, thereby allowing collection
258 * of waves in flight, etc
260 * @buffer: Handle of user provided buffer updated with wave count
262 * Return: Number of bytes written to user buffer or an error value
264 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
268 int max_waves_per_cu;
269 struct kfd_dev *dev = NULL;
270 struct kfd_process *proc = NULL;
271 struct kfd_process_device *pdd = NULL;
273 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
275 if (dev->kfd2kgd->get_cu_occupancy == NULL)
280 if (pdd->qpd.queue_count == 0) {
281 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
282 dev->id, proc->pasid);
283 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
286 /* Collect wave count from device if it supports */
288 max_waves_per_cu = 0;
289 dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
292 /* Translate wave count to number of compute units */
293 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
294 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
297 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
300 if (strcmp(attr->name, "pasid") == 0) {
301 struct kfd_process *p = container_of(attr, struct kfd_process,
304 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
305 } else if (strncmp(attr->name, "vram_", 5) == 0) {
306 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
308 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
309 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
310 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
312 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
314 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
315 kfd_sdma_activity_worker);
317 sdma_activity_work_handler.pdd = pdd;
318 sdma_activity_work_handler.sdma_activity_counter = 0;
320 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
322 flush_work(&sdma_activity_work_handler.sdma_activity_work);
324 return snprintf(buffer, PAGE_SIZE, "%llu\n",
325 (sdma_activity_work_handler.sdma_activity_counter)/
326 SDMA_ACTIVITY_DIVISOR);
328 pr_err("Invalid attribute");
335 static void kfd_procfs_kobj_release(struct kobject *kobj)
340 static const struct sysfs_ops kfd_procfs_ops = {
341 .show = kfd_procfs_show,
344 static struct kobj_type procfs_type = {
345 .release = kfd_procfs_kobj_release,
346 .sysfs_ops = &kfd_procfs_ops,
349 void kfd_procfs_init(void)
353 procfs.kobj = kfd_alloc_struct(procfs.kobj);
357 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
358 &kfd_device->kobj, "proc");
360 pr_warn("Could not create procfs proc folder");
361 /* If we fail to create the procfs, clean up */
362 kfd_procfs_shutdown();
366 void kfd_procfs_shutdown(void)
369 kobject_del(procfs.kobj);
370 kobject_put(procfs.kobj);
375 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
376 struct attribute *attr, char *buffer)
378 struct queue *q = container_of(kobj, struct queue, kobj);
380 if (!strcmp(attr->name, "size"))
381 return snprintf(buffer, PAGE_SIZE, "%llu",
382 q->properties.queue_size);
383 else if (!strcmp(attr->name, "type"))
384 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
385 else if (!strcmp(attr->name, "gpuid"))
386 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
388 pr_err("Invalid attribute");
393 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
394 struct attribute *attr, char *buffer)
396 if (strcmp(attr->name, "evicted_ms") == 0) {
397 struct kfd_process_device *pdd = container_of(attr,
398 struct kfd_process_device,
400 uint64_t evict_jiffies;
402 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
404 return snprintf(buffer,
407 jiffies64_to_msecs(evict_jiffies));
409 /* Sysfs handle that gets CU occupancy is per device */
410 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
411 return kfd_get_cu_occupancy(attr, buffer);
413 pr_err("Invalid attribute");
419 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
420 struct attribute *attr, char *buf)
422 struct kfd_process_device *pdd;
424 if (!strcmp(attr->name, "faults")) {
425 pdd = container_of(attr, struct kfd_process_device,
427 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
429 if (!strcmp(attr->name, "page_in")) {
430 pdd = container_of(attr, struct kfd_process_device,
432 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
434 if (!strcmp(attr->name, "page_out")) {
435 pdd = container_of(attr, struct kfd_process_device,
437 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
442 static struct attribute attr_queue_size = {
444 .mode = KFD_SYSFS_FILE_MODE
447 static struct attribute attr_queue_type = {
449 .mode = KFD_SYSFS_FILE_MODE
452 static struct attribute attr_queue_gpuid = {
454 .mode = KFD_SYSFS_FILE_MODE
457 static struct attribute *procfs_queue_attrs[] = {
464 static const struct sysfs_ops procfs_queue_ops = {
465 .show = kfd_procfs_queue_show,
468 static struct kobj_type procfs_queue_type = {
469 .sysfs_ops = &procfs_queue_ops,
470 .default_attrs = procfs_queue_attrs,
473 static const struct sysfs_ops procfs_stats_ops = {
474 .show = kfd_procfs_stats_show,
477 static struct kobj_type procfs_stats_type = {
478 .sysfs_ops = &procfs_stats_ops,
479 .release = kfd_procfs_kobj_release,
482 static const struct sysfs_ops sysfs_counters_ops = {
483 .show = kfd_sysfs_counters_show,
486 static struct kobj_type sysfs_counters_type = {
487 .sysfs_ops = &sysfs_counters_ops,
488 .release = kfd_procfs_kobj_release,
491 int kfd_procfs_add_queue(struct queue *q)
493 struct kfd_process *proc;
496 if (!q || !q->process)
500 /* Create proc/<pid>/queues/<queue id> folder */
501 if (!proc->kobj_queues)
503 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
504 proc->kobj_queues, "%u", q->properties.queue_id);
506 pr_warn("Creating proc/<pid>/queues/%u failed",
507 q->properties.queue_id);
508 kobject_put(&q->kobj);
515 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
520 if (!kobj || !attr || !name)
524 attr->mode = KFD_SYSFS_FILE_MODE;
525 sysfs_attr_init(attr);
527 ret = sysfs_create_file(kobj, attr);
529 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
532 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
536 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
542 * Create sysfs files for each GPU:
543 * - proc/<pid>/stats_<gpuid>/
544 * - proc/<pid>/stats_<gpuid>/evicted_ms
545 * - proc/<pid>/stats_<gpuid>/cu_occupancy
547 for (i = 0; i < p->n_pdds; i++) {
548 struct kfd_process_device *pdd = p->pdds[i];
550 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
551 "stats_%u", pdd->dev->id);
552 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
553 if (!pdd->kobj_stats)
556 ret = kobject_init_and_add(pdd->kobj_stats,
562 pr_warn("Creating KFD proc/stats_%s folder failed",
564 kobject_put(pdd->kobj_stats);
565 pdd->kobj_stats = NULL;
569 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
571 /* Add sysfs file to report compute unit occupancy */
572 if (pdd->dev->kfd2kgd->get_cu_occupancy)
573 kfd_sysfs_create_file(pdd->kobj_stats,
574 &pdd->attr_cu_occupancy,
579 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
583 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
589 * Create sysfs files for each GPU which supports SVM
590 * - proc/<pid>/counters_<gpuid>/
591 * - proc/<pid>/counters_<gpuid>/faults
592 * - proc/<pid>/counters_<gpuid>/page_in
593 * - proc/<pid>/counters_<gpuid>/page_out
595 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
596 struct kfd_process_device *pdd = p->pdds[i];
597 struct kobject *kobj_counters;
599 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
600 "counters_%u", pdd->dev->id);
601 kobj_counters = kfd_alloc_struct(kobj_counters);
605 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
606 p->kobj, counters_dir_filename);
608 pr_warn("Creating KFD proc/%s folder failed",
609 counters_dir_filename);
610 kobject_put(kobj_counters);
614 pdd->kobj_counters = kobj_counters;
615 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
617 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
624 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
632 * Create sysfs files for each GPU:
633 * - proc/<pid>/vram_<gpuid>
634 * - proc/<pid>/sdma_<gpuid>
636 for (i = 0; i < p->n_pdds; i++) {
637 struct kfd_process_device *pdd = p->pdds[i];
639 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
641 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
644 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
646 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
651 void kfd_procfs_del_queue(struct queue *q)
656 kobject_del(&q->kobj);
657 kobject_put(&q->kobj);
660 int kfd_process_create_wq(void)
663 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
665 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
667 if (!kfd_process_wq || !kfd_restore_wq) {
668 kfd_process_destroy_wq();
675 void kfd_process_destroy_wq(void)
677 if (kfd_process_wq) {
678 destroy_workqueue(kfd_process_wq);
679 kfd_process_wq = NULL;
681 if (kfd_restore_wq) {
682 destroy_workqueue(kfd_restore_wq);
683 kfd_restore_wq = NULL;
687 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
688 struct kfd_process_device *pdd)
690 struct kfd_dev *dev = pdd->dev;
692 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
693 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
697 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
698 * This function should be only called right after the process
699 * is created and when kfd_processes_mutex is still being held
700 * to avoid concurrency. Because of that exclusiveness, we do
701 * not need to take p->mutex.
703 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
704 uint64_t gpu_va, uint32_t size,
705 uint32_t flags, void **kptr)
707 struct kfd_dev *kdev = pdd->dev;
708 struct kgd_mem *mem = NULL;
712 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
713 pdd->drm_priv, &mem, NULL, flags);
717 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
721 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
723 pr_debug("Sync memory failed, wait interrupted by user signal\n");
724 goto sync_memory_failed;
727 /* Create an obj handle so kfd_process_device_remove_obj_handle
728 * will take care of the bo removal when the process finishes.
729 * We do not need to take p->mutex, because the process is just
730 * created and the ioctls have not had the chance to run.
732 handle = kfd_process_device_create_obj_handle(pdd, mem);
740 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
741 (struct kgd_mem *)mem, kptr, NULL);
743 pr_debug("Map GTT BO to kernel failed\n");
744 goto free_obj_handle;
751 kfd_process_device_remove_obj_handle(pdd, handle);
754 kfd_process_free_gpuvm(mem, pdd);
758 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
765 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
766 * process for IB usage The memory reserved is for KFD to submit
767 * IB to AMDGPU from kernel. If the memory is reserved
768 * successfully, ib_kaddr will have the CPU/kernel
769 * address. Check ib_kaddr before accessing the memory.
771 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
773 struct qcm_process_device *qpd = &pdd->qpd;
774 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
775 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
776 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
777 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
781 if (qpd->ib_kaddr || !qpd->ib_base)
784 /* ib_base is only set for dGPU */
785 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
790 qpd->ib_kaddr = kaddr;
795 struct kfd_process *kfd_create_process(struct file *filep)
797 struct kfd_process *process;
798 struct task_struct *thread = current;
802 return ERR_PTR(-EINVAL);
804 /* Only the pthreads threading model is supported. */
805 if (thread->group_leader->mm != thread->mm)
806 return ERR_PTR(-EINVAL);
809 * take kfd processes mutex before starting of process creation
810 * so there won't be a case where two threads of the same process
811 * create two kfd_process structures
813 mutex_lock(&kfd_processes_mutex);
815 /* A prior open of /dev/kfd could have already created the process. */
816 process = find_process(thread);
818 pr_debug("Process already found\n");
820 process = create_process(thread);
824 ret = kfd_process_init_cwsr_apu(process, filep);
831 process->kobj = kfd_alloc_struct(process->kobj);
832 if (!process->kobj) {
833 pr_warn("Creating procfs kobject failed");
836 ret = kobject_init_and_add(process->kobj, &procfs_type,
838 (int)process->lead_thread->pid);
840 pr_warn("Creating procfs pid directory failed");
841 kobject_put(process->kobj);
845 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
848 process->kobj_queues = kobject_create_and_add("queues",
850 if (!process->kobj_queues)
851 pr_warn("Creating KFD proc/queues folder failed");
853 kfd_procfs_add_sysfs_stats(process);
854 kfd_procfs_add_sysfs_files(process);
855 kfd_procfs_add_sysfs_counters(process);
858 if (!IS_ERR(process))
859 kref_get(&process->ref);
860 mutex_unlock(&kfd_processes_mutex);
865 hash_del_rcu(&process->kfd_processes);
866 mutex_unlock(&kfd_processes_mutex);
867 synchronize_srcu(&kfd_processes_srcu);
868 /* kfd_process_free_notifier will trigger the cleanup */
869 mmu_notifier_put(&process->mmu_notifier);
873 struct kfd_process *kfd_get_process(const struct task_struct *thread)
875 struct kfd_process *process;
878 return ERR_PTR(-EINVAL);
880 /* Only the pthreads threading model is supported. */
881 if (thread->group_leader->mm != thread->mm)
882 return ERR_PTR(-EINVAL);
884 process = find_process(thread);
886 return ERR_PTR(-EINVAL);
891 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
893 struct kfd_process *process;
895 hash_for_each_possible_rcu(kfd_processes_table, process,
896 kfd_processes, (uintptr_t)mm)
897 if (process->mm == mm)
903 static struct kfd_process *find_process(const struct task_struct *thread)
905 struct kfd_process *p;
908 idx = srcu_read_lock(&kfd_processes_srcu);
909 p = find_process_by_mm(thread->mm);
910 srcu_read_unlock(&kfd_processes_srcu, idx);
915 void kfd_unref_process(struct kfd_process *p)
917 kref_put(&p->ref, kfd_process_ref_release);
921 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
923 struct kfd_process *p = pdd->process;
929 * Remove all handles from idr and release appropriate
930 * local memory object
932 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
934 for (i = 0; i < p->n_pdds; i++) {
935 struct kfd_process_device *peer_pdd = p->pdds[i];
937 if (!peer_pdd->drm_priv)
939 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
940 peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
943 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
944 pdd->drm_priv, NULL);
945 kfd_process_device_remove_obj_handle(pdd, id);
949 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
953 for (i = 0; i < p->n_pdds; i++)
954 kfd_process_device_free_bos(p->pdds[i]);
957 static void kfd_process_destroy_pdds(struct kfd_process *p)
961 for (i = 0; i < p->n_pdds; i++) {
962 struct kfd_process_device *pdd = p->pdds[i];
964 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
965 pdd->dev->id, p->pasid);
968 amdgpu_amdkfd_gpuvm_release_process_vm(
969 pdd->dev->kgd, pdd->drm_priv);
973 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
974 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
975 get_order(KFD_CWSR_TBA_TMA_SIZE));
977 kfree(pdd->qpd.doorbell_bitmap);
978 idr_destroy(&pdd->alloc_idr);
980 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
983 * before destroying pdd, make sure to report availability
986 if (pdd->runtime_inuse) {
987 pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
988 pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
989 pdd->runtime_inuse = false;
998 static void kfd_process_remove_sysfs(struct kfd_process *p)
1000 struct kfd_process_device *pdd;
1006 sysfs_remove_file(p->kobj, &p->attr_pasid);
1007 kobject_del(p->kobj_queues);
1008 kobject_put(p->kobj_queues);
1009 p->kobj_queues = NULL;
1011 for (i = 0; i < p->n_pdds; i++) {
1014 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1015 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1017 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1018 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1019 sysfs_remove_file(pdd->kobj_stats,
1020 &pdd->attr_cu_occupancy);
1021 kobject_del(pdd->kobj_stats);
1022 kobject_put(pdd->kobj_stats);
1023 pdd->kobj_stats = NULL;
1026 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1029 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1030 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1031 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1032 kobject_del(pdd->kobj_counters);
1033 kobject_put(pdd->kobj_counters);
1034 pdd->kobj_counters = NULL;
1037 kobject_del(p->kobj);
1038 kobject_put(p->kobj);
1042 /* No process locking is needed in this function, because the process
1043 * is not findable any more. We must assume that no other thread is
1044 * using it any more, otherwise we couldn't safely free the process
1045 * structure in the end.
1047 static void kfd_process_wq_release(struct work_struct *work)
1049 struct kfd_process *p = container_of(work, struct kfd_process,
1051 kfd_process_remove_sysfs(p);
1052 kfd_iommu_unbind_process(p);
1054 kfd_process_free_outstanding_kfd_bos(p);
1055 svm_range_list_fini(p);
1057 kfd_process_destroy_pdds(p);
1058 dma_fence_put(p->ef);
1060 kfd_event_free_process(p);
1062 kfd_pasid_free(p->pasid);
1063 mutex_destroy(&p->mutex);
1065 put_task_struct(p->lead_thread);
1070 static void kfd_process_ref_release(struct kref *ref)
1072 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1074 INIT_WORK(&p->release_work, kfd_process_wq_release);
1075 queue_work(kfd_process_wq, &p->release_work);
1078 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1080 int idx = srcu_read_lock(&kfd_processes_srcu);
1081 struct kfd_process *p = find_process_by_mm(mm);
1083 srcu_read_unlock(&kfd_processes_srcu, idx);
1085 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1088 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1090 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1093 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1094 struct mm_struct *mm)
1096 struct kfd_process *p;
1100 * The kfd_process structure can not be free because the
1101 * mmu_notifier srcu is read locked
1103 p = container_of(mn, struct kfd_process, mmu_notifier);
1104 if (WARN_ON(p->mm != mm))
1107 mutex_lock(&kfd_processes_mutex);
1108 hash_del_rcu(&p->kfd_processes);
1109 mutex_unlock(&kfd_processes_mutex);
1110 synchronize_srcu(&kfd_processes_srcu);
1112 cancel_delayed_work_sync(&p->eviction_work);
1113 cancel_delayed_work_sync(&p->restore_work);
1114 cancel_delayed_work_sync(&p->svms.restore_work);
1116 mutex_lock(&p->mutex);
1118 /* Iterate over all process device data structures and if the
1119 * pdd is in debug mode, we should first force unregistration,
1120 * then we will be able to destroy the queues
1122 for (i = 0; i < p->n_pdds; i++) {
1123 struct kfd_dev *dev = p->pdds[i]->dev;
1125 mutex_lock(kfd_get_dbgmgr_mutex());
1126 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
1127 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
1128 kfd_dbgmgr_destroy(dev->dbgmgr);
1132 mutex_unlock(kfd_get_dbgmgr_mutex());
1135 kfd_process_dequeue_from_all_devices(p);
1136 pqm_uninit(&p->pqm);
1138 /* Indicate to other users that MM is no longer valid */
1140 /* Signal the eviction fence after user mode queues are
1141 * destroyed. This allows any BOs to be freed without
1142 * triggering pointless evictions or waiting for fences.
1144 dma_fence_signal(p->ef);
1146 mutex_unlock(&p->mutex);
1148 mmu_notifier_put(&p->mmu_notifier);
1151 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1152 .release = kfd_process_notifier_release,
1153 .alloc_notifier = kfd_process_alloc_notifier,
1154 .free_notifier = kfd_process_free_notifier,
1157 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1159 unsigned long offset;
1162 for (i = 0; i < p->n_pdds; i++) {
1163 struct kfd_dev *dev = p->pdds[i]->dev;
1164 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1166 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1169 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1170 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1171 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1172 MAP_SHARED, offset);
1174 if (IS_ERR_VALUE(qpd->tba_addr)) {
1175 int err = qpd->tba_addr;
1177 pr_err("Failure to set tba address. error %d.\n", err);
1179 qpd->cwsr_kaddr = NULL;
1183 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1185 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1186 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1187 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1193 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1195 struct kfd_dev *dev = pdd->dev;
1196 struct qcm_process_device *qpd = &pdd->qpd;
1197 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1198 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1199 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1203 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1206 /* cwsr_base is only set for dGPU */
1207 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1208 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
1212 qpd->cwsr_kaddr = kaddr;
1213 qpd->tba_addr = qpd->cwsr_base;
1215 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1217 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1218 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1219 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1224 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1228 if (qpd->cwsr_kaddr) {
1229 /* KFD trap handler is bound, record as second-level TBA/TMA
1230 * in first-level TMA. First-level trap will jump to second.
1233 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1237 /* No trap handler bound, bind as first-level TBA/TMA. */
1238 qpd->tba_addr = tba_addr;
1239 qpd->tma_addr = tma_addr;
1243 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1247 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1248 * boot time retry setting. Mixing processes with different
1249 * XNACK/retry settings can hang the GPU.
1251 * Different GPUs can have different noretry settings depending
1252 * on HW bugs or limitations. We need to find at least one
1253 * XNACK mode for this process that's compatible with all GPUs.
1254 * Fortunately GPUs with retry enabled (noretry=0) can run code
1255 * built for XNACK-off. On GFXv9 it may perform slower.
1257 * Therefore applications built for XNACK-off can always be
1258 * supported and will be our fallback if any GPU does not
1261 for (i = 0; i < p->n_pdds; i++) {
1262 struct kfd_dev *dev = p->pdds[i]->dev;
1264 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1265 * support the SVM APIs and don't need to be considered
1266 * for the XNACK mode selection.
1268 if (dev->device_info->asic_family < CHIP_VEGA10)
1270 /* Aldebaran can always support XNACK because it can support
1271 * per-process XNACK mode selection. But let the dev->noretry
1272 * setting still influence the default XNACK mode.
1275 dev->device_info->asic_family == CHIP_ALDEBARAN)
1278 /* GFXv10 and later GPUs do not support shader preemption
1279 * during page faults. This can lead to poor QoS for queue
1280 * management and memory-manager-related preemptions or
1283 if (dev->device_info->asic_family >= CHIP_NAVI10)
1294 * On return the kfd_process is fully operational and will be freed when the
1297 static struct kfd_process *create_process(const struct task_struct *thread)
1299 struct kfd_process *process;
1300 struct mmu_notifier *mn;
1303 process = kzalloc(sizeof(*process), GFP_KERNEL);
1305 goto err_alloc_process;
1307 kref_init(&process->ref);
1308 mutex_init(&process->mutex);
1309 process->mm = thread->mm;
1310 process->lead_thread = thread->group_leader;
1311 process->n_pdds = 0;
1312 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1313 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1314 process->last_restore_timestamp = get_jiffies_64();
1315 kfd_event_init_process(process);
1316 process->is_32bit_user_mode = in_compat_syscall();
1318 process->pasid = kfd_pasid_alloc();
1319 if (process->pasid == 0)
1320 goto err_alloc_pasid;
1322 err = pqm_init(&process->pqm, process);
1324 goto err_process_pqm_init;
1326 /* init process apertures*/
1327 err = kfd_init_apertures(process);
1329 goto err_init_apertures;
1331 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1332 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1334 err = svm_range_list_init(process);
1336 goto err_init_svm_range_list;
1338 /* alloc_notifier needs to find the process in the hash table */
1339 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1340 (uintptr_t)process->mm);
1342 /* MMU notifier registration must be the last call that can fail
1343 * because after this point we cannot unwind the process creation.
1344 * After this point, mmu_notifier_put will trigger the cleanup by
1345 * dropping the last process reference in the free_notifier.
1347 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1350 goto err_register_notifier;
1352 BUG_ON(mn != &process->mmu_notifier);
1354 get_task_struct(process->lead_thread);
1358 err_register_notifier:
1359 hash_del_rcu(&process->kfd_processes);
1360 svm_range_list_fini(process);
1361 err_init_svm_range_list:
1362 kfd_process_free_outstanding_kfd_bos(process);
1363 kfd_process_destroy_pdds(process);
1365 pqm_uninit(&process->pqm);
1366 err_process_pqm_init:
1367 kfd_pasid_free(process->pasid);
1369 mutex_destroy(&process->mutex);
1372 return ERR_PTR(err);
1375 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1376 struct kfd_dev *dev)
1379 int range_start = dev->shared_resources.non_cp_doorbells_start;
1380 int range_end = dev->shared_resources.non_cp_doorbells_end;
1382 if (!KFD_IS_SOC15(dev->device_info->asic_family))
1385 qpd->doorbell_bitmap =
1386 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1387 BITS_PER_BYTE), GFP_KERNEL);
1388 if (!qpd->doorbell_bitmap)
1391 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1392 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1393 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1394 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1395 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1397 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1398 if (i >= range_start && i <= range_end) {
1399 set_bit(i, qpd->doorbell_bitmap);
1400 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1401 qpd->doorbell_bitmap);
1408 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1409 struct kfd_process *p)
1413 for (i = 0; i < p->n_pdds; i++)
1414 if (p->pdds[i]->dev == dev)
1420 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1421 struct kfd_process *p)
1423 struct kfd_process_device *pdd = NULL;
1425 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1427 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1431 if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
1432 pr_err("Failed to alloc doorbell for pdd\n");
1436 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1437 pr_err("Failed to init doorbell for process\n");
1442 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1443 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1444 pdd->qpd.dqm = dev->dqm;
1445 pdd->qpd.pqm = &p->pqm;
1446 pdd->qpd.evicted = 0;
1447 pdd->qpd.mapped_gws_queue = false;
1449 pdd->bound = PDD_UNBOUND;
1450 pdd->already_dequeued = false;
1451 pdd->runtime_inuse = false;
1452 pdd->vram_usage = 0;
1453 pdd->sdma_past_activity_counter = 0;
1454 atomic64_set(&pdd->evict_duration_counter, 0);
1455 p->pdds[p->n_pdds++] = pdd;
1457 /* Init idr used for memory handle translation */
1458 idr_init(&pdd->alloc_idr);
1468 * kfd_process_device_init_vm - Initialize a VM for a process-device
1470 * @pdd: The process-device
1471 * @drm_file: Optional pointer to a DRM file descriptor
1473 * If @drm_file is specified, it will be used to acquire the VM from
1474 * that file descriptor. If successful, the @pdd takes ownership of
1475 * the file descriptor.
1477 * If @drm_file is NULL, a new VM is created.
1479 * Returns 0 on success, -errno on failure.
1481 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1482 struct file *drm_file)
1484 struct kfd_process *p;
1485 struct kfd_dev *dev;
1497 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
1498 dev->kgd, drm_file, p->pasid,
1499 &p->kgd_process_info, &p->ef);
1501 pr_err("Failed to create process VM object\n");
1504 pdd->drm_priv = drm_file->private_data;
1506 ret = kfd_process_device_reserve_ib_mem(pdd);
1508 goto err_reserve_ib_mem;
1509 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1513 pdd->drm_file = drm_file;
1519 kfd_process_device_free_bos(pdd);
1520 pdd->drm_priv = NULL;
1526 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1528 * Unbinding occurs when the process dies or the device is removed.
1530 * Assumes that the process lock is held.
1532 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1533 struct kfd_process *p)
1535 struct kfd_process_device *pdd;
1538 pdd = kfd_get_process_device_data(dev, p);
1540 pr_err("Process device data doesn't exist\n");
1541 return ERR_PTR(-ENOMEM);
1545 return ERR_PTR(-ENODEV);
1548 * signal runtime-pm system to auto resume and prevent
1549 * further runtime suspend once device pdd is created until
1552 if (!pdd->runtime_inuse) {
1553 err = pm_runtime_get_sync(dev->ddev->dev);
1555 pm_runtime_put_autosuspend(dev->ddev->dev);
1556 return ERR_PTR(err);
1560 err = kfd_iommu_bind_process_to_device(pdd);
1565 * make sure that runtime_usage counter is incremented just once
1568 pdd->runtime_inuse = true;
1573 /* balance runpm reference count and exit with error */
1574 if (!pdd->runtime_inuse) {
1575 pm_runtime_mark_last_busy(dev->ddev->dev);
1576 pm_runtime_put_autosuspend(dev->ddev->dev);
1579 return ERR_PTR(err);
1582 /* Create specific handle mapped to mem from process local memory idr
1583 * Assumes that the process lock is held.
1585 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1588 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1591 /* Translate specific handle from process local memory idr
1592 * Assumes that the process lock is held.
1594 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1600 return idr_find(&pdd->alloc_idr, handle);
1603 /* Remove specific handle from process local memory idr
1604 * Assumes that the process lock is held.
1606 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1610 idr_remove(&pdd->alloc_idr, handle);
1613 /* This increments the process->ref counter. */
1614 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1616 struct kfd_process *p, *ret_p = NULL;
1619 int idx = srcu_read_lock(&kfd_processes_srcu);
1621 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1622 if (p->pasid == pasid) {
1629 srcu_read_unlock(&kfd_processes_srcu, idx);
1634 /* This increments the process->ref counter. */
1635 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1637 struct kfd_process *p;
1639 int idx = srcu_read_lock(&kfd_processes_srcu);
1641 p = find_process_by_mm(mm);
1645 srcu_read_unlock(&kfd_processes_srcu, idx);
1650 /* kfd_process_evict_queues - Evict all user queues of a process
1652 * Eviction is reference-counted per process-device. This means multiple
1653 * evictions from different sources can be nested safely.
1655 int kfd_process_evict_queues(struct kfd_process *p)
1659 unsigned int n_evicted = 0;
1661 for (i = 0; i < p->n_pdds; i++) {
1662 struct kfd_process_device *pdd = p->pdds[i];
1664 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1667 pr_err("Failed to evict process queues\n");
1676 /* To keep state consistent, roll back partial eviction by
1679 for (i = 0; i < p->n_pdds; i++) {
1680 struct kfd_process_device *pdd = p->pdds[i];
1684 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1686 pr_err("Failed to restore queues\n");
1694 /* kfd_process_restore_queues - Restore all user queues of a process */
1695 int kfd_process_restore_queues(struct kfd_process *p)
1700 for (i = 0; i < p->n_pdds; i++) {
1701 struct kfd_process_device *pdd = p->pdds[i];
1703 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1706 pr_err("Failed to restore process queues\n");
1715 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1719 for (i = 0; i < p->n_pdds; i++)
1720 if (p->pdds[i] && gpu_id == p->pdds[i]->dev->id)
1726 kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev,
1727 uint32_t *gpuid, uint32_t *gpuidx)
1729 struct kgd_dev *kgd = (struct kgd_dev *)adev;
1732 for (i = 0; i < p->n_pdds; i++)
1733 if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) {
1734 *gpuid = p->pdds[i]->dev->id;
1741 static void evict_process_worker(struct work_struct *work)
1744 struct kfd_process *p;
1745 struct delayed_work *dwork;
1747 dwork = to_delayed_work(work);
1749 /* Process termination destroys this worker thread. So during the
1750 * lifetime of this thread, kfd_process p will be valid
1752 p = container_of(dwork, struct kfd_process, eviction_work);
1753 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1754 "Eviction fence mismatch\n");
1756 /* Narrow window of overlap between restore and evict work
1757 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1758 * unreserves KFD BOs, it is possible to evicted again. But
1759 * restore has few more steps of finish. So lets wait for any
1760 * previous restore work to complete
1762 flush_delayed_work(&p->restore_work);
1764 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1765 ret = kfd_process_evict_queues(p);
1767 dma_fence_signal(p->ef);
1768 dma_fence_put(p->ef);
1770 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1771 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1773 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1775 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1778 static void restore_process_worker(struct work_struct *work)
1780 struct delayed_work *dwork;
1781 struct kfd_process *p;
1784 dwork = to_delayed_work(work);
1786 /* Process termination destroys this worker thread. So during the
1787 * lifetime of this thread, kfd_process p will be valid
1789 p = container_of(dwork, struct kfd_process, restore_work);
1790 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1792 /* Setting last_restore_timestamp before successful restoration.
1793 * Otherwise this would have to be set by KGD (restore_process_bos)
1794 * before KFD BOs are unreserved. If not, the process can be evicted
1795 * again before the timestamp is set.
1796 * If restore fails, the timestamp will be set again in the next
1797 * attempt. This would mean that the minimum GPU quanta would be
1798 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1802 p->last_restore_timestamp = get_jiffies_64();
1803 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1806 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1807 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1808 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1809 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1810 WARN(!ret, "reschedule restore work failed\n");
1814 ret = kfd_process_restore_queues(p);
1816 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1818 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1821 void kfd_suspend_all_processes(void)
1823 struct kfd_process *p;
1825 int idx = srcu_read_lock(&kfd_processes_srcu);
1827 WARN(debug_evictions, "Evicting all processes");
1828 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1829 cancel_delayed_work_sync(&p->eviction_work);
1830 cancel_delayed_work_sync(&p->restore_work);
1832 if (kfd_process_evict_queues(p))
1833 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1834 dma_fence_signal(p->ef);
1835 dma_fence_put(p->ef);
1838 srcu_read_unlock(&kfd_processes_srcu, idx);
1841 int kfd_resume_all_processes(void)
1843 struct kfd_process *p;
1845 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1847 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1848 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1849 pr_err("Restore process %d failed during resume\n",
1854 srcu_read_unlock(&kfd_processes_srcu, idx);
1858 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1859 struct vm_area_struct *vma)
1861 struct kfd_process_device *pdd;
1862 struct qcm_process_device *qpd;
1864 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1865 pr_err("Incorrect CWSR mapping size.\n");
1869 pdd = kfd_get_process_device_data(dev, process);
1874 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1875 get_order(KFD_CWSR_TBA_TMA_SIZE));
1876 if (!qpd->cwsr_kaddr) {
1877 pr_err("Error allocating per process CWSR buffer.\n");
1881 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1882 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1883 /* Mapping pages to user process */
1884 return remap_pfn_range(vma, vma->vm_start,
1885 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1886 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1889 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
1891 struct kfd_dev *dev = pdd->dev;
1893 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1894 /* Nothing to flush until a VMID is assigned, which
1895 * only happens when the first queue is created.
1898 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
1901 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
1902 pdd->process->pasid, type);
1906 #if defined(CONFIG_DEBUG_FS)
1908 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1910 struct kfd_process *p;
1914 int idx = srcu_read_lock(&kfd_processes_srcu);
1916 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1917 seq_printf(m, "Process %d PASID 0x%x:\n",
1918 p->lead_thread->tgid, p->pasid);
1920 mutex_lock(&p->mutex);
1921 r = pqm_debugfs_mqds(m, &p->pqm);
1922 mutex_unlock(&p->mutex);
1928 srcu_read_unlock(&kfd_processes_srcu, idx);