1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/device.h>
25 #include <linux/export.h>
26 #include <linux/err.h>
28 #include <linux/file.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/compat.h>
33 #include <uapi/linux/kfd_ioctl.h>
34 #include <linux/time.h>
36 #include <linux/mman.h>
37 #include <linux/ptrace.h>
38 #include <linux/dma-buf.h>
39 #include <linux/fdtable.h>
40 #include <linux/processor.h>
42 #include "kfd_device_queue_manager.h"
44 #include "amdgpu_amdkfd.h"
45 #include "kfd_smi_events.h"
46 #include "amdgpu_dma_buf.h"
48 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
49 static int kfd_open(struct inode *, struct file *);
50 static int kfd_release(struct inode *, struct file *);
51 static int kfd_mmap(struct file *, struct vm_area_struct *);
53 static const char kfd_dev_name[] = "kfd";
55 static const struct file_operations kfd_fops = {
57 .unlocked_ioctl = kfd_ioctl,
58 .compat_ioctl = compat_ptr_ioctl,
60 .release = kfd_release,
64 static int kfd_char_dev_major = -1;
65 static struct class *kfd_class;
66 struct device *kfd_device;
68 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
70 struct kfd_process_device *pdd;
72 mutex_lock(&p->mutex);
73 pdd = kfd_process_device_data_by_id(p, gpu_id);
78 mutex_unlock(&p->mutex);
82 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
84 mutex_unlock(&pdd->process->mutex);
87 int kfd_chardev_init(void)
91 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
92 err = kfd_char_dev_major;
94 goto err_register_chrdev;
96 kfd_class = class_create(THIS_MODULE, kfd_dev_name);
97 err = PTR_ERR(kfd_class);
98 if (IS_ERR(kfd_class))
99 goto err_class_create;
101 kfd_device = device_create(kfd_class, NULL,
102 MKDEV(kfd_char_dev_major, 0),
104 err = PTR_ERR(kfd_device);
105 if (IS_ERR(kfd_device))
106 goto err_device_create;
111 class_destroy(kfd_class);
113 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
118 void kfd_chardev_exit(void)
120 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
121 class_destroy(kfd_class);
122 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
127 static int kfd_open(struct inode *inode, struct file *filep)
129 struct kfd_process *process;
130 bool is_32bit_user_mode;
132 if (iminor(inode) != 0)
135 is_32bit_user_mode = in_compat_syscall();
137 if (is_32bit_user_mode) {
139 "Process %d (32-bit) failed to open /dev/kfd\n"
140 "32-bit processes are not supported by amdkfd\n",
145 process = kfd_create_process(filep);
147 return PTR_ERR(process);
149 if (kfd_is_locked()) {
150 dev_dbg(kfd_device, "kfd is locked!\n"
151 "process %d unreferenced", process->pasid);
152 kfd_unref_process(process);
156 /* filep now owns the reference returned by kfd_create_process */
157 filep->private_data = process;
159 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
160 process->pasid, process->is_32bit_user_mode);
165 static int kfd_release(struct inode *inode, struct file *filep)
167 struct kfd_process *process = filep->private_data;
170 kfd_unref_process(process);
175 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
178 struct kfd_ioctl_get_version_args *args = data;
180 args->major_version = KFD_IOCTL_MAJOR_VERSION;
181 args->minor_version = KFD_IOCTL_MINOR_VERSION;
186 static int set_queue_properties_from_user(struct queue_properties *q_properties,
187 struct kfd_ioctl_create_queue_args *args)
189 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
190 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
194 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
195 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
199 if ((args->ring_base_address) &&
200 (!access_ok((const void __user *) args->ring_base_address,
201 sizeof(uint64_t)))) {
202 pr_err("Can't access ring base address\n");
206 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
207 pr_err("Ring size must be a power of 2 or 0\n");
211 if (!access_ok((const void __user *) args->read_pointer_address,
213 pr_err("Can't access read pointer\n");
217 if (!access_ok((const void __user *) args->write_pointer_address,
219 pr_err("Can't access write pointer\n");
223 if (args->eop_buffer_address &&
224 !access_ok((const void __user *) args->eop_buffer_address,
226 pr_debug("Can't access eop buffer");
230 if (args->ctx_save_restore_address &&
231 !access_ok((const void __user *) args->ctx_save_restore_address,
233 pr_debug("Can't access ctx save restore buffer");
237 q_properties->is_interop = false;
238 q_properties->is_gws = false;
239 q_properties->queue_percent = args->queue_percentage;
240 q_properties->priority = args->queue_priority;
241 q_properties->queue_address = args->ring_base_address;
242 q_properties->queue_size = args->ring_size;
243 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
244 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
245 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
246 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
247 q_properties->ctx_save_restore_area_address =
248 args->ctx_save_restore_address;
249 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
250 q_properties->ctl_stack_size = args->ctl_stack_size;
251 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
252 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
253 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
254 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
255 q_properties->type = KFD_QUEUE_TYPE_SDMA;
256 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
257 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
261 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
262 q_properties->format = KFD_QUEUE_FORMAT_AQL;
264 q_properties->format = KFD_QUEUE_FORMAT_PM4;
266 pr_debug("Queue Percentage: %d, %d\n",
267 q_properties->queue_percent, args->queue_percentage);
269 pr_debug("Queue Priority: %d, %d\n",
270 q_properties->priority, args->queue_priority);
272 pr_debug("Queue Address: 0x%llX, 0x%llX\n",
273 q_properties->queue_address, args->ring_base_address);
275 pr_debug("Queue Size: 0x%llX, %u\n",
276 q_properties->queue_size, args->ring_size);
278 pr_debug("Queue r/w Pointers: %px, %px\n",
279 q_properties->read_ptr,
280 q_properties->write_ptr);
282 pr_debug("Queue Format: %d\n", q_properties->format);
284 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
286 pr_debug("Queue CTX save area: 0x%llX\n",
287 q_properties->ctx_save_restore_area_address);
292 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
295 struct kfd_ioctl_create_queue_args *args = data;
298 unsigned int queue_id;
299 struct kfd_process_device *pdd;
300 struct queue_properties q_properties;
301 uint32_t doorbell_offset_in_process = 0;
302 struct amdgpu_bo *wptr_bo = NULL;
304 memset(&q_properties, 0, sizeof(struct queue_properties));
306 pr_debug("Creating queue ioctl\n");
308 err = set_queue_properties_from_user(&q_properties, args);
312 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
314 mutex_lock(&p->mutex);
316 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
318 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
324 pdd = kfd_bind_process_to_device(dev, p);
327 goto err_bind_process;
330 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
331 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
333 if (dev->shared_resources.enable_mes &&
334 ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
335 >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
336 struct amdgpu_bo_va_mapping *wptr_mapping;
337 struct amdgpu_vm *wptr_vm;
339 wptr_vm = drm_priv_to_vm(pdd->drm_priv);
340 err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
342 goto err_wptr_map_gart;
344 wptr_mapping = amdgpu_vm_bo_lookup_mapping(
345 wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
346 amdgpu_bo_unreserve(wptr_vm->root.bo);
348 pr_err("Failed to lookup wptr bo\n");
350 goto err_wptr_map_gart;
353 wptr_bo = wptr_mapping->bo_va->base.bo;
354 if (wptr_bo->tbo.base.size > PAGE_SIZE) {
355 pr_err("Requested GART mapping for wptr bo larger than one page\n");
357 goto err_wptr_map_gart;
360 err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
362 pr_err("Failed to map wptr bo to GART\n");
363 goto err_wptr_map_gart;
367 pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
371 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
372 NULL, NULL, NULL, &doorbell_offset_in_process);
374 goto err_create_queue;
376 args->queue_id = queue_id;
379 /* Return gpu_id as doorbell offset for mmap usage */
380 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
381 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
382 if (KFD_IS_SOC15(dev))
383 /* On SOC15 ASICs, include the doorbell offset within the
384 * process doorbell frame, which is 2 pages.
386 args->doorbell_offset |= doorbell_offset_in_process;
388 mutex_unlock(&p->mutex);
390 pr_debug("Queue id %d was created successfully\n", args->queue_id);
392 pr_debug("Ring buffer address == 0x%016llX\n",
393 args->ring_base_address);
395 pr_debug("Read ptr address == 0x%016llX\n",
396 args->read_pointer_address);
398 pr_debug("Write ptr address == 0x%016llX\n",
399 args->write_pointer_address);
405 amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
409 mutex_unlock(&p->mutex);
413 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
417 struct kfd_ioctl_destroy_queue_args *args = data;
419 pr_debug("Destroying queue id %d for pasid 0x%x\n",
423 mutex_lock(&p->mutex);
425 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
427 mutex_unlock(&p->mutex);
431 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
435 struct kfd_ioctl_update_queue_args *args = data;
436 struct queue_properties properties;
438 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
439 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
443 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
444 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
448 if ((args->ring_base_address) &&
449 (!access_ok((const void __user *) args->ring_base_address,
450 sizeof(uint64_t)))) {
451 pr_err("Can't access ring base address\n");
455 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
456 pr_err("Ring size must be a power of 2 or 0\n");
460 properties.queue_address = args->ring_base_address;
461 properties.queue_size = args->ring_size;
462 properties.queue_percent = args->queue_percentage;
463 properties.priority = args->queue_priority;
465 pr_debug("Updating queue id %d for pasid 0x%x\n",
466 args->queue_id, p->pasid);
468 mutex_lock(&p->mutex);
470 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
472 mutex_unlock(&p->mutex);
477 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
481 const int max_num_cus = 1024;
482 struct kfd_ioctl_set_cu_mask_args *args = data;
483 struct mqd_update_info minfo = {0};
484 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
485 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
487 if ((args->num_cu_mask % 32) != 0) {
488 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
493 minfo.cu_mask.count = args->num_cu_mask;
494 if (minfo.cu_mask.count == 0) {
495 pr_debug("CU mask cannot be 0");
499 /* To prevent an unreasonably large CU mask size, set an arbitrary
500 * limit of max_num_cus bits. We can then just drop any CU mask bits
501 * past max_num_cus bits and just use the first max_num_cus bits.
503 if (minfo.cu_mask.count > max_num_cus) {
504 pr_debug("CU mask cannot be greater than 1024 bits");
505 minfo.cu_mask.count = max_num_cus;
506 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
509 minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
510 if (!minfo.cu_mask.ptr)
513 retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
515 pr_debug("Could not copy CU mask from userspace");
520 minfo.update_flag = UPDATE_FLAG_CU_MASK;
522 mutex_lock(&p->mutex);
524 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
526 mutex_unlock(&p->mutex);
529 kfree(minfo.cu_mask.ptr);
533 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
534 struct kfd_process *p, void *data)
536 struct kfd_ioctl_get_queue_wave_state_args *args = data;
539 mutex_lock(&p->mutex);
541 r = pqm_get_wave_state(&p->pqm, args->queue_id,
542 (void __user *)args->ctl_stack_address,
543 &args->ctl_stack_used_size,
544 &args->save_area_used_size);
546 mutex_unlock(&p->mutex);
551 static int kfd_ioctl_set_memory_policy(struct file *filep,
552 struct kfd_process *p, void *data)
554 struct kfd_ioctl_set_memory_policy_args *args = data;
556 struct kfd_process_device *pdd;
557 enum cache_policy default_policy, alternate_policy;
559 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
560 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
564 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
565 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
569 mutex_lock(&p->mutex);
570 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
572 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
577 pdd = kfd_bind_process_to_device(pdd->dev, p);
583 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
584 ? cache_policy_coherent : cache_policy_noncoherent;
587 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
588 ? cache_policy_coherent : cache_policy_noncoherent;
590 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
594 (void __user *)args->alternate_aperture_base,
595 args->alternate_aperture_size))
600 mutex_unlock(&p->mutex);
605 static int kfd_ioctl_set_trap_handler(struct file *filep,
606 struct kfd_process *p, void *data)
608 struct kfd_ioctl_set_trap_handler_args *args = data;
610 struct kfd_process_device *pdd;
612 mutex_lock(&p->mutex);
614 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
620 pdd = kfd_bind_process_to_device(pdd->dev, p);
626 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
630 mutex_unlock(&p->mutex);
635 static int kfd_ioctl_dbg_register(struct file *filep,
636 struct kfd_process *p, void *data)
641 static int kfd_ioctl_dbg_unregister(struct file *filep,
642 struct kfd_process *p, void *data)
647 static int kfd_ioctl_dbg_address_watch(struct file *filep,
648 struct kfd_process *p, void *data)
653 /* Parse and generate fixed size data structure for wave control */
654 static int kfd_ioctl_dbg_wave_control(struct file *filep,
655 struct kfd_process *p, void *data)
660 static int kfd_ioctl_get_clock_counters(struct file *filep,
661 struct kfd_process *p, void *data)
663 struct kfd_ioctl_get_clock_counters_args *args = data;
664 struct kfd_process_device *pdd;
666 mutex_lock(&p->mutex);
667 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
668 mutex_unlock(&p->mutex);
670 /* Reading GPU clock counter from KGD */
671 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
673 /* Node without GPU resource */
674 args->gpu_clock_counter = 0;
676 /* No access to rdtsc. Using raw monotonic time */
677 args->cpu_clock_counter = ktime_get_raw_ns();
678 args->system_clock_counter = ktime_get_boottime_ns();
680 /* Since the counter is in nano-seconds we use 1GHz frequency */
681 args->system_clock_freq = 1000000000;
687 static int kfd_ioctl_get_process_apertures(struct file *filp,
688 struct kfd_process *p, void *data)
690 struct kfd_ioctl_get_process_apertures_args *args = data;
691 struct kfd_process_device_apertures *pAperture;
694 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
696 args->num_of_nodes = 0;
698 mutex_lock(&p->mutex);
699 /* Run over all pdd of the process */
700 for (i = 0; i < p->n_pdds; i++) {
701 struct kfd_process_device *pdd = p->pdds[i];
704 &args->process_apertures[args->num_of_nodes];
705 pAperture->gpu_id = pdd->dev->id;
706 pAperture->lds_base = pdd->lds_base;
707 pAperture->lds_limit = pdd->lds_limit;
708 pAperture->gpuvm_base = pdd->gpuvm_base;
709 pAperture->gpuvm_limit = pdd->gpuvm_limit;
710 pAperture->scratch_base = pdd->scratch_base;
711 pAperture->scratch_limit = pdd->scratch_limit;
714 "node id %u\n", args->num_of_nodes);
716 "gpu id %u\n", pdd->dev->id);
718 "lds_base %llX\n", pdd->lds_base);
720 "lds_limit %llX\n", pdd->lds_limit);
722 "gpuvm_base %llX\n", pdd->gpuvm_base);
724 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
726 "scratch_base %llX\n", pdd->scratch_base);
728 "scratch_limit %llX\n", pdd->scratch_limit);
730 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
733 mutex_unlock(&p->mutex);
738 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
739 struct kfd_process *p, void *data)
741 struct kfd_ioctl_get_process_apertures_new_args *args = data;
742 struct kfd_process_device_apertures *pa;
746 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
748 if (args->num_of_nodes == 0) {
749 /* Return number of nodes, so that user space can alloacate
752 mutex_lock(&p->mutex);
753 args->num_of_nodes = p->n_pdds;
757 /* Fill in process-aperture information for all available
758 * nodes, but not more than args->num_of_nodes as that is
759 * the amount of memory allocated by user
761 pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
762 args->num_of_nodes), GFP_KERNEL);
766 mutex_lock(&p->mutex);
769 args->num_of_nodes = 0;
774 /* Run over all pdd of the process */
775 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
776 struct kfd_process_device *pdd = p->pdds[i];
778 pa[i].gpu_id = pdd->dev->id;
779 pa[i].lds_base = pdd->lds_base;
780 pa[i].lds_limit = pdd->lds_limit;
781 pa[i].gpuvm_base = pdd->gpuvm_base;
782 pa[i].gpuvm_limit = pdd->gpuvm_limit;
783 pa[i].scratch_base = pdd->scratch_base;
784 pa[i].scratch_limit = pdd->scratch_limit;
787 "gpu id %u\n", pdd->dev->id);
789 "lds_base %llX\n", pdd->lds_base);
791 "lds_limit %llX\n", pdd->lds_limit);
793 "gpuvm_base %llX\n", pdd->gpuvm_base);
795 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
797 "scratch_base %llX\n", pdd->scratch_base);
799 "scratch_limit %llX\n", pdd->scratch_limit);
801 mutex_unlock(&p->mutex);
803 args->num_of_nodes = i;
805 (void __user *)args->kfd_process_device_apertures_ptr,
807 (i * sizeof(struct kfd_process_device_apertures)));
809 return ret ? -EFAULT : 0;
812 mutex_unlock(&p->mutex);
816 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
819 struct kfd_ioctl_create_event_args *args = data;
822 /* For dGPUs the event page is allocated in user mode. The
823 * handle is passed to KFD with the first call to this IOCTL
824 * through the event_page_offset field.
826 if (args->event_page_offset) {
827 mutex_lock(&p->mutex);
828 err = kfd_kmap_event_page(p, args->event_page_offset);
829 mutex_unlock(&p->mutex);
834 err = kfd_event_create(filp, p, args->event_type,
835 args->auto_reset != 0, args->node_id,
836 &args->event_id, &args->event_trigger_data,
837 &args->event_page_offset,
838 &args->event_slot_index);
840 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
844 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
847 struct kfd_ioctl_destroy_event_args *args = data;
849 return kfd_event_destroy(p, args->event_id);
852 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
855 struct kfd_ioctl_set_event_args *args = data;
857 return kfd_set_event(p, args->event_id);
860 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
863 struct kfd_ioctl_reset_event_args *args = data;
865 return kfd_reset_event(p, args->event_id);
868 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
871 struct kfd_ioctl_wait_events_args *args = data;
874 err = kfd_wait_on_events(p, args->num_events,
875 (void __user *)args->events_ptr,
876 (args->wait_for_all != 0),
877 &args->timeout, &args->wait_result);
881 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
882 struct kfd_process *p, void *data)
884 struct kfd_ioctl_set_scratch_backing_va_args *args = data;
885 struct kfd_process_device *pdd;
889 mutex_lock(&p->mutex);
890 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
897 pdd = kfd_bind_process_to_device(dev, p);
900 goto bind_process_to_device_fail;
903 pdd->qpd.sh_hidden_private_base = args->va_addr;
905 mutex_unlock(&p->mutex);
907 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
908 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
909 dev->kfd2kgd->set_scratch_backing_va(
910 dev->adev, args->va_addr, pdd->qpd.vmid);
914 bind_process_to_device_fail:
916 mutex_unlock(&p->mutex);
920 static int kfd_ioctl_get_tile_config(struct file *filep,
921 struct kfd_process *p, void *data)
923 struct kfd_ioctl_get_tile_config_args *args = data;
924 struct kfd_process_device *pdd;
925 struct tile_config config;
928 mutex_lock(&p->mutex);
929 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
930 mutex_unlock(&p->mutex);
934 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
936 args->gb_addr_config = config.gb_addr_config;
937 args->num_banks = config.num_banks;
938 args->num_ranks = config.num_ranks;
940 if (args->num_tile_configs > config.num_tile_configs)
941 args->num_tile_configs = config.num_tile_configs;
942 err = copy_to_user((void __user *)args->tile_config_ptr,
943 config.tile_config_ptr,
944 args->num_tile_configs * sizeof(uint32_t));
946 args->num_tile_configs = 0;
950 if (args->num_macro_tile_configs > config.num_macro_tile_configs)
951 args->num_macro_tile_configs =
952 config.num_macro_tile_configs;
953 err = copy_to_user((void __user *)args->macro_tile_config_ptr,
954 config.macro_tile_config_ptr,
955 args->num_macro_tile_configs * sizeof(uint32_t));
957 args->num_macro_tile_configs = 0;
964 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
967 struct kfd_ioctl_acquire_vm_args *args = data;
968 struct kfd_process_device *pdd;
969 struct file *drm_file;
972 drm_file = fget(args->drm_fd);
976 mutex_lock(&p->mutex);
977 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
984 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
988 ret = kfd_process_device_init_vm(pdd, drm_file);
992 /* On success, the PDD keeps the drm_file reference */
993 mutex_unlock(&p->mutex);
1000 mutex_unlock(&p->mutex);
1005 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1007 if (debug_largebar) {
1008 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1012 if (dev->use_iommu_v2)
1015 if (dev->local_mem_info.local_mem_size_private == 0 &&
1016 dev->local_mem_info.local_mem_size_public > 0)
1021 static int kfd_ioctl_get_available_memory(struct file *filep,
1022 struct kfd_process *p, void *data)
1024 struct kfd_ioctl_get_available_memory_args *args = data;
1025 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1029 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
1030 kfd_unlock_pdd(pdd);
1034 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1035 struct kfd_process *p, void *data)
1037 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1038 struct kfd_process_device *pdd;
1040 struct kfd_dev *dev;
1043 uint64_t offset = args->mmap_offset;
1044 uint32_t flags = args->flags;
1046 if (args->size == 0)
1049 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1050 /* Flush pending deferred work to avoid racing with deferred actions
1051 * from previous memory map changes (e.g. munmap).
1053 svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1054 mutex_lock(&p->svms.lock);
1055 mmap_write_unlock(current->mm);
1056 if (interval_tree_iter_first(&p->svms.objects,
1057 args->va_addr >> PAGE_SHIFT,
1058 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1059 pr_err("Address: 0x%llx already allocated by SVM\n",
1061 mutex_unlock(&p->svms.lock);
1064 mutex_unlock(&p->svms.lock);
1066 mutex_lock(&p->mutex);
1067 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1075 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1076 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1077 !kfd_dev_is_large_bar(dev)) {
1078 pr_err("Alloc host visible vram on small bar is not allowed\n");
1083 pdd = kfd_bind_process_to_device(dev, p);
1089 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1090 if (args->size != kfd_doorbell_process_slice(dev)) {
1094 offset = kfd_get_process_doorbells(pdd);
1095 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1096 if (args->size != PAGE_SIZE) {
1100 offset = dev->adev->rmmio_remap.bus_addr;
1107 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1108 dev->adev, args->va_addr, args->size,
1109 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1115 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1116 if (idr_handle < 0) {
1121 /* Update the VRAM usage count */
1122 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
1123 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
1125 mutex_unlock(&p->mutex);
1127 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1128 args->mmap_offset = offset;
1130 /* MMIO is mapped through kfd device
1131 * Generate a kfd mmap offset
1133 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1134 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1135 | KFD_MMAP_GPU_ID(args->gpu_id);
1140 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1141 pdd->drm_priv, NULL);
1145 mutex_unlock(&p->mutex);
1149 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1150 struct kfd_process *p, void *data)
1152 struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1153 struct kfd_process_device *pdd;
1158 mutex_lock(&p->mutex);
1160 * Safeguard to prevent user space from freeing signal BO.
1161 * It will be freed at process termination.
1163 if (p->signal_handle && (p->signal_handle == args->handle)) {
1164 pr_err("Free signal BO is not allowed\n");
1169 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1171 pr_err("Process device data doesn't exist\n");
1176 mem = kfd_process_device_translate_handle(
1177 pdd, GET_IDR_HANDLE(args->handle));
1183 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1184 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1186 /* If freeing the buffer failed, leave the handle in place for
1187 * clean-up during process tear-down.
1190 kfd_process_device_remove_obj_handle(
1191 pdd, GET_IDR_HANDLE(args->handle));
1193 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1197 mutex_unlock(&p->mutex);
1201 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1202 struct kfd_process *p, void *data)
1204 struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1205 struct kfd_process_device *pdd, *peer_pdd;
1207 struct kfd_dev *dev;
1210 uint32_t *devices_arr = NULL;
1212 if (!args->n_devices) {
1213 pr_debug("Device IDs array empty\n");
1216 if (args->n_success > args->n_devices) {
1217 pr_debug("n_success exceeds n_devices\n");
1221 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1226 err = copy_from_user(devices_arr,
1227 (void __user *)args->device_ids_array_ptr,
1228 args->n_devices * sizeof(*devices_arr));
1231 goto copy_from_user_failed;
1234 mutex_lock(&p->mutex);
1235 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1238 goto get_process_device_data_failed;
1242 pdd = kfd_bind_process_to_device(dev, p);
1245 goto bind_process_to_device_failed;
1248 mem = kfd_process_device_translate_handle(pdd,
1249 GET_IDR_HANDLE(args->handle));
1252 goto get_mem_obj_from_handle_failed;
1255 for (i = args->n_success; i < args->n_devices; i++) {
1256 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1258 pr_debug("Getting device by id failed for 0x%x\n",
1261 goto get_mem_obj_from_handle_failed;
1264 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1265 if (IS_ERR(peer_pdd)) {
1266 err = PTR_ERR(peer_pdd);
1267 goto get_mem_obj_from_handle_failed;
1270 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1271 peer_pdd->dev->adev, (struct kgd_mem *)mem,
1272 peer_pdd->drm_priv);
1274 struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1276 dev_err(dev->adev->dev,
1277 "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1278 pci_domain_nr(pdev->bus),
1280 PCI_SLOT(pdev->devfn),
1281 PCI_FUNC(pdev->devfn),
1282 ((struct kgd_mem *)mem)->domain);
1283 goto map_memory_to_gpu_failed;
1285 args->n_success = i+1;
1288 mutex_unlock(&p->mutex);
1290 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1292 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1293 goto sync_memory_failed;
1296 /* Flush TLBs after waiting for the page table updates to complete */
1297 for (i = 0; i < args->n_devices; i++) {
1298 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1299 if (WARN_ON_ONCE(!peer_pdd))
1301 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1307 get_process_device_data_failed:
1308 bind_process_to_device_failed:
1309 get_mem_obj_from_handle_failed:
1310 map_memory_to_gpu_failed:
1311 mutex_unlock(&p->mutex);
1312 copy_from_user_failed:
1319 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1320 struct kfd_process *p, void *data)
1322 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1323 struct kfd_process_device *pdd, *peer_pdd;
1326 uint32_t *devices_arr = NULL, i;
1328 if (!args->n_devices) {
1329 pr_debug("Device IDs array empty\n");
1332 if (args->n_success > args->n_devices) {
1333 pr_debug("n_success exceeds n_devices\n");
1337 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1342 err = copy_from_user(devices_arr,
1343 (void __user *)args->device_ids_array_ptr,
1344 args->n_devices * sizeof(*devices_arr));
1347 goto copy_from_user_failed;
1350 mutex_lock(&p->mutex);
1351 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1354 goto bind_process_to_device_failed;
1357 mem = kfd_process_device_translate_handle(pdd,
1358 GET_IDR_HANDLE(args->handle));
1361 goto get_mem_obj_from_handle_failed;
1364 for (i = args->n_success; i < args->n_devices; i++) {
1365 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1368 goto get_mem_obj_from_handle_failed;
1370 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1371 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1373 pr_err("Failed to unmap from gpu %d/%d\n",
1374 i, args->n_devices);
1375 goto unmap_memory_from_gpu_failed;
1377 args->n_success = i+1;
1379 mutex_unlock(&p->mutex);
1381 if (kfd_flush_tlb_after_unmap(pdd->dev)) {
1382 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1383 (struct kgd_mem *) mem, true);
1385 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1386 goto sync_memory_failed;
1389 /* Flush TLBs after waiting for the page table updates to complete */
1390 for (i = 0; i < args->n_devices; i++) {
1391 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1392 if (WARN_ON_ONCE(!peer_pdd))
1394 kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1401 bind_process_to_device_failed:
1402 get_mem_obj_from_handle_failed:
1403 unmap_memory_from_gpu_failed:
1404 mutex_unlock(&p->mutex);
1405 copy_from_user_failed:
1411 static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1412 struct kfd_process *p, void *data)
1415 struct kfd_ioctl_alloc_queue_gws_args *args = data;
1417 struct kfd_dev *dev;
1419 mutex_lock(&p->mutex);
1420 q = pqm_get_user_queue(&p->pqm, args->queue_id);
1434 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1439 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1440 mutex_unlock(&p->mutex);
1442 args->first_gws = 0;
1446 mutex_unlock(&p->mutex);
1450 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1451 struct kfd_process *p, void *data)
1453 struct kfd_ioctl_get_dmabuf_info_args *args = data;
1454 struct kfd_dev *dev = NULL;
1455 struct amdgpu_device *dmabuf_adev;
1456 void *metadata_buffer = NULL;
1461 /* Find a KFD GPU device that supports the get_dmabuf_info query */
1462 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1468 if (args->metadata_ptr) {
1469 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1470 if (!metadata_buffer)
1474 /* Get dmabuf info from KGD */
1475 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1476 &dmabuf_adev, &args->size,
1477 metadata_buffer, args->metadata_size,
1478 &args->metadata_size, &flags);
1482 /* Reverse-lookup gpu_id from kgd pointer */
1483 dev = kfd_device_by_adev(dmabuf_adev);
1488 args->gpu_id = dev->id;
1489 args->flags = flags;
1491 /* Copy metadata buffer to user mode */
1492 if (metadata_buffer) {
1493 r = copy_to_user((void __user *)args->metadata_ptr,
1494 metadata_buffer, args->metadata_size);
1500 kfree(metadata_buffer);
1505 static int kfd_ioctl_import_dmabuf(struct file *filep,
1506 struct kfd_process *p, void *data)
1508 struct kfd_ioctl_import_dmabuf_args *args = data;
1509 struct kfd_process_device *pdd;
1510 struct dma_buf *dmabuf;
1516 dmabuf = dma_buf_get(args->dmabuf_fd);
1518 return PTR_ERR(dmabuf);
1520 mutex_lock(&p->mutex);
1521 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1527 pdd = kfd_bind_process_to_device(pdd->dev, p);
1533 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
1534 args->va_addr, pdd->drm_priv,
1535 (struct kgd_mem **)&mem, &size,
1540 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1541 if (idr_handle < 0) {
1546 mutex_unlock(&p->mutex);
1547 dma_buf_put(dmabuf);
1549 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1554 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1555 pdd->drm_priv, NULL);
1557 mutex_unlock(&p->mutex);
1558 dma_buf_put(dmabuf);
1562 /* Handle requests for watching SMI events */
1563 static int kfd_ioctl_smi_events(struct file *filep,
1564 struct kfd_process *p, void *data)
1566 struct kfd_ioctl_smi_events_args *args = data;
1567 struct kfd_process_device *pdd;
1569 mutex_lock(&p->mutex);
1571 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1572 mutex_unlock(&p->mutex);
1576 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1579 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1580 struct kfd_process *p, void *data)
1582 struct kfd_ioctl_set_xnack_mode_args *args = data;
1585 mutex_lock(&p->mutex);
1586 if (args->xnack_enabled >= 0) {
1587 if (!list_empty(&p->pqm.queues)) {
1588 pr_debug("Process has user queues running\n");
1589 mutex_unlock(&p->mutex);
1592 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true))
1595 p->xnack_enabled = args->xnack_enabled;
1597 args->xnack_enabled = p->xnack_enabled;
1599 mutex_unlock(&p->mutex);
1604 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1605 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1607 struct kfd_ioctl_svm_args *args = data;
1610 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1611 args->start_addr, args->size, args->op, args->nattr);
1613 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1615 if (!args->start_addr || !args->size)
1618 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1624 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1630 static int criu_checkpoint_process(struct kfd_process *p,
1631 uint8_t __user *user_priv_data,
1632 uint64_t *priv_offset)
1634 struct kfd_criu_process_priv_data process_priv;
1637 memset(&process_priv, 0, sizeof(process_priv));
1639 process_priv.version = KFD_CRIU_PRIV_VERSION;
1640 /* For CR, we don't consider negative xnack mode which is used for
1641 * querying without changing it, here 0 simply means disabled and 1
1642 * means enabled so retry for finding a valid PTE.
1644 process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1646 ret = copy_to_user(user_priv_data + *priv_offset,
1647 &process_priv, sizeof(process_priv));
1650 pr_err("Failed to copy process information to user\n");
1654 *priv_offset += sizeof(process_priv);
1658 static int criu_checkpoint_devices(struct kfd_process *p,
1659 uint32_t num_devices,
1660 uint8_t __user *user_addr,
1661 uint8_t __user *user_priv_data,
1662 uint64_t *priv_offset)
1664 struct kfd_criu_device_priv_data *device_priv = NULL;
1665 struct kfd_criu_device_bucket *device_buckets = NULL;
1668 device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1669 if (!device_buckets) {
1674 device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1680 for (i = 0; i < num_devices; i++) {
1681 struct kfd_process_device *pdd = p->pdds[i];
1683 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1684 device_buckets[i].actual_gpu_id = pdd->dev->id;
1687 * priv_data does not contain useful information for now and is reserved for
1688 * future use, so we do not set its contents.
1692 ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1694 pr_err("Failed to copy device information to user\n");
1699 ret = copy_to_user(user_priv_data + *priv_offset,
1701 num_devices * sizeof(*device_priv));
1703 pr_err("Failed to copy device information to user\n");
1706 *priv_offset += num_devices * sizeof(*device_priv);
1709 kvfree(device_buckets);
1710 kvfree(device_priv);
1714 static uint32_t get_process_num_bos(struct kfd_process *p)
1716 uint32_t num_of_bos = 0;
1719 /* Run over all PDDs of the process */
1720 for (i = 0; i < p->n_pdds; i++) {
1721 struct kfd_process_device *pdd = p->pdds[i];
1725 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1726 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1728 if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
1735 static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
1738 struct dma_buf *dmabuf;
1741 dmabuf = amdgpu_gem_prime_export(gobj, flags);
1742 if (IS_ERR(dmabuf)) {
1743 ret = PTR_ERR(dmabuf);
1744 pr_err("dmabuf export failed for the BO\n");
1748 ret = dma_buf_fd(dmabuf, flags);
1750 pr_err("dmabuf create fd failed, ret:%d\n", ret);
1751 goto out_free_dmabuf;
1758 dma_buf_put(dmabuf);
1762 static int criu_checkpoint_bos(struct kfd_process *p,
1764 uint8_t __user *user_bos,
1765 uint8_t __user *user_priv_data,
1766 uint64_t *priv_offset)
1768 struct kfd_criu_bo_bucket *bo_buckets;
1769 struct kfd_criu_bo_priv_data *bo_privs;
1770 int ret = 0, pdd_index, bo_index = 0, id;
1773 bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1777 bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1783 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1784 struct kfd_process_device *pdd = p->pdds[pdd_index];
1785 struct amdgpu_bo *dumper_bo;
1786 struct kgd_mem *kgd_mem;
1788 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1789 struct kfd_criu_bo_bucket *bo_bucket;
1790 struct kfd_criu_bo_priv_data *bo_priv;
1798 kgd_mem = (struct kgd_mem *)mem;
1799 dumper_bo = kgd_mem->bo;
1801 if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
1804 bo_bucket = &bo_buckets[bo_index];
1805 bo_priv = &bo_privs[bo_index];
1807 bo_bucket->gpu_id = pdd->user_gpu_id;
1808 bo_bucket->addr = (uint64_t)kgd_mem->va;
1809 bo_bucket->size = amdgpu_bo_size(dumper_bo);
1810 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1811 bo_priv->idr_handle = id;
1813 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1814 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1815 &bo_priv->user_addr);
1817 pr_err("Failed to obtain user address for user-pointer bo\n");
1821 if (bo_bucket->alloc_flags
1822 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1823 ret = criu_get_prime_handle(&dumper_bo->tbo.base,
1824 bo_bucket->alloc_flags &
1825 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1826 &bo_bucket->dmabuf_fd);
1830 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1833 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1834 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1835 KFD_MMAP_GPU_ID(pdd->dev->id);
1836 else if (bo_bucket->alloc_flags &
1837 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1838 bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1839 KFD_MMAP_GPU_ID(pdd->dev->id);
1841 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1843 for (i = 0; i < p->n_pdds; i++) {
1844 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
1845 bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1848 pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1849 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1854 bo_bucket->alloc_flags,
1855 bo_priv->idr_handle);
1860 ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1862 pr_err("Failed to copy BO information to user\n");
1867 ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1869 pr_err("Failed to copy BO priv information to user\n");
1874 *priv_offset += num_bos * sizeof(*bo_privs);
1877 while (ret && bo_index--) {
1878 if (bo_buckets[bo_index].alloc_flags
1879 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
1880 close_fd(bo_buckets[bo_index].dmabuf_fd);
1888 static int criu_get_process_object_info(struct kfd_process *p,
1889 uint32_t *num_devices,
1891 uint32_t *num_objects,
1892 uint64_t *objs_priv_size)
1894 uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
1895 uint32_t num_queues, num_events, num_svm_ranges;
1898 *num_devices = p->n_pdds;
1899 *num_bos = get_process_num_bos(p);
1901 ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
1905 num_events = kfd_get_num_events(p);
1907 ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
1911 *num_objects = num_queues + num_events + num_svm_ranges;
1913 if (objs_priv_size) {
1914 priv_size = sizeof(struct kfd_criu_process_priv_data);
1915 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
1916 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
1917 priv_size += queues_priv_data_size;
1918 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
1919 priv_size += svm_priv_data_size;
1920 *objs_priv_size = priv_size;
1925 static int criu_checkpoint(struct file *filep,
1926 struct kfd_process *p,
1927 struct kfd_ioctl_criu_args *args)
1930 uint32_t num_devices, num_bos, num_objects;
1931 uint64_t priv_size, priv_offset = 0;
1933 if (!args->devices || !args->bos || !args->priv_data)
1936 mutex_lock(&p->mutex);
1939 pr_err("No pdd for given process\n");
1944 /* Confirm all process queues are evicted */
1945 if (!p->queues_paused) {
1946 pr_err("Cannot dump process when queues are not in evicted state\n");
1947 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */
1952 ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
1956 if (num_devices != args->num_devices ||
1957 num_bos != args->num_bos ||
1958 num_objects != args->num_objects ||
1959 priv_size != args->priv_data_size) {
1965 /* each function will store private data inside priv_data and adjust priv_offset */
1966 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
1970 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
1971 (uint8_t __user *)args->priv_data, &priv_offset);
1975 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
1976 (uint8_t __user *)args->priv_data, &priv_offset);
1981 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
1986 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
1991 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
1998 /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
2000 struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
2002 for (i = 0; i < num_bos; i++) {
2003 if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
2004 close_fd(bo_buckets[i].dmabuf_fd);
2009 mutex_unlock(&p->mutex);
2011 pr_err("Failed to dump CRIU ret:%d\n", ret);
2013 pr_debug("CRIU dump ret:%d\n", ret);
2018 static int criu_restore_process(struct kfd_process *p,
2019 struct kfd_ioctl_criu_args *args,
2020 uint64_t *priv_offset,
2021 uint64_t max_priv_data_size)
2024 struct kfd_criu_process_priv_data process_priv;
2026 if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2029 ret = copy_from_user(&process_priv,
2030 (void __user *)(args->priv_data + *priv_offset),
2031 sizeof(process_priv));
2033 pr_err("Failed to copy process private information from user\n");
2037 *priv_offset += sizeof(process_priv);
2039 if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2040 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2041 process_priv.version, KFD_CRIU_PRIV_VERSION);
2045 pr_debug("Setting XNACK mode\n");
2046 if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2047 pr_err("xnack mode cannot be set\n");
2051 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2052 p->xnack_enabled = process_priv.xnack_mode;
2059 static int criu_restore_devices(struct kfd_process *p,
2060 struct kfd_ioctl_criu_args *args,
2061 uint64_t *priv_offset,
2062 uint64_t max_priv_data_size)
2064 struct kfd_criu_device_bucket *device_buckets;
2065 struct kfd_criu_device_priv_data *device_privs;
2069 if (args->num_devices != p->n_pdds)
2072 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2075 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2076 if (!device_buckets)
2079 ret = copy_from_user(device_buckets, (void __user *)args->devices,
2080 args->num_devices * sizeof(*device_buckets));
2082 pr_err("Failed to copy devices buckets from user\n");
2087 for (i = 0; i < args->num_devices; i++) {
2088 struct kfd_dev *dev;
2089 struct kfd_process_device *pdd;
2090 struct file *drm_file;
2092 /* device private data is not currently used */
2094 if (!device_buckets[i].user_gpu_id) {
2095 pr_err("Invalid user gpu_id\n");
2100 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2102 pr_err("Failed to find device with gpu_id = %x\n",
2103 device_buckets[i].actual_gpu_id);
2108 pdd = kfd_get_process_device_data(dev, p);
2110 pr_err("Failed to get pdd for gpu_id = %x\n",
2111 device_buckets[i].actual_gpu_id);
2115 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2117 drm_file = fget(device_buckets[i].drm_fd);
2119 pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2120 device_buckets[i].drm_fd);
2125 if (pdd->drm_file) {
2130 /* create the vm using render nodes for kfd pdd */
2131 if (kfd_process_device_init_vm(pdd, drm_file)) {
2132 pr_err("could not init vm for given pdd\n");
2133 /* On success, the PDD keeps the drm_file reference */
2139 * pdd now already has the vm bound to render node so below api won't create a new
2140 * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2141 * for iommu v2 binding and runtime pm.
2143 pdd = kfd_bind_process_to_device(dev, p);
2151 * We are not copying device private data from user as we are not using the data for now,
2152 * but we still adjust for its private data.
2154 *priv_offset += args->num_devices * sizeof(*device_privs);
2157 kfree(device_buckets);
2161 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2162 struct kfd_criu_bo_bucket *bo_bucket,
2163 struct kfd_criu_bo_priv_data *bo_priv,
2164 struct kgd_mem **kgd_mem)
2168 const bool criu_resume = true;
2171 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2172 if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
2175 offset = kfd_get_process_doorbells(pdd);
2176 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2177 /* MMIO BOs need remapped bus address */
2178 if (bo_bucket->size != PAGE_SIZE) {
2179 pr_err("Invalid page size\n");
2182 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2184 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2187 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2188 offset = bo_priv->user_addr;
2191 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2192 bo_bucket->size, pdd->drm_priv, kgd_mem,
2193 &offset, bo_bucket->alloc_flags, criu_resume);
2195 pr_err("Could not create the BO\n");
2198 pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2199 bo_bucket->size, bo_bucket->addr, offset);
2201 /* Restore previous IDR handle */
2202 pr_debug("Restoring old IDR handle for the BO");
2203 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2204 bo_priv->idr_handle + 1, GFP_KERNEL);
2206 if (idr_handle < 0) {
2207 pr_err("Could not allocate idr\n");
2208 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2213 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2214 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2215 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2216 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2217 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2218 bo_bucket->restored_offset = offset;
2219 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2220 bo_bucket->restored_offset = offset;
2221 /* Update the VRAM usage count */
2222 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
2227 static int criu_restore_bo(struct kfd_process *p,
2228 struct kfd_criu_bo_bucket *bo_bucket,
2229 struct kfd_criu_bo_priv_data *bo_priv)
2231 struct kfd_process_device *pdd;
2232 struct kgd_mem *kgd_mem;
2236 pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2237 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2238 bo_priv->idr_handle);
2240 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2242 pr_err("Failed to get pdd\n");
2246 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2250 /* now map these BOs to GPU/s */
2251 for (j = 0; j < p->n_pdds; j++) {
2252 struct kfd_dev *peer;
2253 struct kfd_process_device *peer_pdd;
2255 if (!bo_priv->mapped_gpuids[j])
2258 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2262 peer = peer_pdd->dev;
2264 peer_pdd = kfd_bind_process_to_device(peer, p);
2265 if (IS_ERR(peer_pdd))
2266 return PTR_ERR(peer_pdd);
2268 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2269 peer_pdd->drm_priv);
2271 pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2276 pr_debug("map memory was successful for the BO\n");
2277 /* create the dmabuf object and export the bo */
2278 if (bo_bucket->alloc_flags
2279 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2280 ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
2281 &bo_bucket->dmabuf_fd);
2285 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2291 static int criu_restore_bos(struct kfd_process *p,
2292 struct kfd_ioctl_criu_args *args,
2293 uint64_t *priv_offset,
2294 uint64_t max_priv_data_size)
2296 struct kfd_criu_bo_bucket *bo_buckets = NULL;
2297 struct kfd_criu_bo_priv_data *bo_privs = NULL;
2301 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2304 /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2305 amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2307 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2311 ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2312 args->num_bos * sizeof(*bo_buckets));
2314 pr_err("Failed to copy BOs information from user\n");
2319 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2325 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2326 args->num_bos * sizeof(*bo_privs));
2328 pr_err("Failed to copy BOs information from user\n");
2332 *priv_offset += args->num_bos * sizeof(*bo_privs);
2334 /* Create and map new BOs */
2335 for (; i < args->num_bos; i++) {
2336 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
2338 pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2343 /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2344 ret = copy_to_user((void __user *)args->bos,
2346 (args->num_bos * sizeof(*bo_buckets)));
2351 while (ret && i--) {
2352 if (bo_buckets[i].alloc_flags
2353 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2354 close_fd(bo_buckets[i].dmabuf_fd);
2361 static int criu_restore_objects(struct file *filep,
2362 struct kfd_process *p,
2363 struct kfd_ioctl_criu_args *args,
2364 uint64_t *priv_offset,
2365 uint64_t max_priv_data_size)
2370 BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2371 BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2372 BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2374 for (i = 0; i < args->num_objects; i++) {
2375 uint32_t object_type;
2377 if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2378 pr_err("Invalid private data size\n");
2382 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2384 pr_err("Failed to copy private information from user\n");
2388 switch (object_type) {
2389 case KFD_CRIU_OBJECT_TYPE_QUEUE:
2390 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2391 priv_offset, max_priv_data_size);
2395 case KFD_CRIU_OBJECT_TYPE_EVENT:
2396 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2397 priv_offset, max_priv_data_size);
2401 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2402 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2403 priv_offset, max_priv_data_size);
2408 pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2417 static int criu_restore(struct file *filep,
2418 struct kfd_process *p,
2419 struct kfd_ioctl_criu_args *args)
2421 uint64_t priv_offset = 0;
2424 pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2425 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2427 if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2428 !args->num_devices || !args->num_bos)
2431 mutex_lock(&p->mutex);
2434 * Set the process to evicted state to avoid running any new queues before all the memory
2435 * mappings are ready.
2437 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2441 /* Each function will adjust priv_offset based on how many bytes they consumed */
2442 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2446 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2450 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2454 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2458 if (priv_offset != args->priv_data_size) {
2459 pr_err("Invalid private data size\n");
2464 mutex_unlock(&p->mutex);
2466 pr_err("Failed to restore CRIU ret:%d\n", ret);
2468 pr_debug("CRIU restore successful\n");
2473 static int criu_unpause(struct file *filep,
2474 struct kfd_process *p,
2475 struct kfd_ioctl_criu_args *args)
2479 mutex_lock(&p->mutex);
2481 if (!p->queues_paused) {
2482 mutex_unlock(&p->mutex);
2486 ret = kfd_process_restore_queues(p);
2488 pr_err("Failed to unpause queues ret:%d\n", ret);
2490 p->queues_paused = false;
2492 mutex_unlock(&p->mutex);
2497 static int criu_resume(struct file *filep,
2498 struct kfd_process *p,
2499 struct kfd_ioctl_criu_args *args)
2501 struct kfd_process *target = NULL;
2502 struct pid *pid = NULL;
2505 pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2508 pid = find_get_pid(args->pid);
2510 pr_err("Cannot find pid info for %i\n", args->pid);
2514 pr_debug("calling kfd_lookup_process_by_pid\n");
2515 target = kfd_lookup_process_by_pid(pid);
2520 pr_debug("Cannot find process info for %i\n", args->pid);
2524 mutex_lock(&target->mutex);
2525 ret = kfd_criu_resume_svm(target);
2527 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2531 ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2533 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2536 mutex_unlock(&target->mutex);
2538 kfd_unref_process(target);
2542 static int criu_process_info(struct file *filep,
2543 struct kfd_process *p,
2544 struct kfd_ioctl_criu_args *args)
2548 mutex_lock(&p->mutex);
2551 pr_err("No pdd for given process\n");
2556 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2560 p->queues_paused = true;
2562 args->pid = task_pid_nr_ns(p->lead_thread,
2563 task_active_pid_ns(p->lead_thread));
2565 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2566 &args->num_objects, &args->priv_data_size);
2570 dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2571 args->num_devices, args->num_bos, args->num_objects,
2572 args->priv_data_size);
2576 kfd_process_restore_queues(p);
2577 p->queues_paused = false;
2579 mutex_unlock(&p->mutex);
2583 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2585 struct kfd_ioctl_criu_args *args = data;
2588 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2590 case KFD_CRIU_OP_PROCESS_INFO:
2591 ret = criu_process_info(filep, p, args);
2593 case KFD_CRIU_OP_CHECKPOINT:
2594 ret = criu_checkpoint(filep, p, args);
2596 case KFD_CRIU_OP_UNPAUSE:
2597 ret = criu_unpause(filep, p, args);
2599 case KFD_CRIU_OP_RESTORE:
2600 ret = criu_restore(filep, p, args);
2602 case KFD_CRIU_OP_RESUME:
2603 ret = criu_resume(filep, p, args);
2606 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2612 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2617 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
2618 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
2619 .cmd_drv = 0, .name = #ioctl}
2622 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
2623 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
2624 kfd_ioctl_get_version, 0),
2626 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
2627 kfd_ioctl_create_queue, 0),
2629 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
2630 kfd_ioctl_destroy_queue, 0),
2632 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
2633 kfd_ioctl_set_memory_policy, 0),
2635 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
2636 kfd_ioctl_get_clock_counters, 0),
2638 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
2639 kfd_ioctl_get_process_apertures, 0),
2641 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
2642 kfd_ioctl_update_queue, 0),
2644 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
2645 kfd_ioctl_create_event, 0),
2647 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
2648 kfd_ioctl_destroy_event, 0),
2650 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
2651 kfd_ioctl_set_event, 0),
2653 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
2654 kfd_ioctl_reset_event, 0),
2656 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
2657 kfd_ioctl_wait_events, 0),
2659 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
2660 kfd_ioctl_dbg_register, 0),
2662 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
2663 kfd_ioctl_dbg_unregister, 0),
2665 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
2666 kfd_ioctl_dbg_address_watch, 0),
2668 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
2669 kfd_ioctl_dbg_wave_control, 0),
2671 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
2672 kfd_ioctl_set_scratch_backing_va, 0),
2674 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
2675 kfd_ioctl_get_tile_config, 0),
2677 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
2678 kfd_ioctl_set_trap_handler, 0),
2680 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
2681 kfd_ioctl_get_process_apertures_new, 0),
2683 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
2684 kfd_ioctl_acquire_vm, 0),
2686 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
2687 kfd_ioctl_alloc_memory_of_gpu, 0),
2689 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
2690 kfd_ioctl_free_memory_of_gpu, 0),
2692 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
2693 kfd_ioctl_map_memory_to_gpu, 0),
2695 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
2696 kfd_ioctl_unmap_memory_from_gpu, 0),
2698 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
2699 kfd_ioctl_set_cu_mask, 0),
2701 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
2702 kfd_ioctl_get_queue_wave_state, 0),
2704 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
2705 kfd_ioctl_get_dmabuf_info, 0),
2707 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
2708 kfd_ioctl_import_dmabuf, 0),
2710 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
2711 kfd_ioctl_alloc_queue_gws, 0),
2713 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
2714 kfd_ioctl_smi_events, 0),
2716 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
2718 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
2719 kfd_ioctl_set_xnack_mode, 0),
2721 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
2722 kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
2724 AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
2725 kfd_ioctl_get_available_memory, 0),
2728 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
2730 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2732 struct kfd_process *process;
2733 amdkfd_ioctl_t *func;
2734 const struct amdkfd_ioctl_desc *ioctl = NULL;
2735 unsigned int nr = _IOC_NR(cmd);
2736 char stack_kdata[128];
2738 unsigned int usize, asize;
2739 int retcode = -EINVAL;
2740 bool ptrace_attached = false;
2742 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
2745 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
2748 ioctl = &amdkfd_ioctls[nr];
2750 amdkfd_size = _IOC_SIZE(ioctl->cmd);
2751 usize = asize = _IOC_SIZE(cmd);
2752 if (amdkfd_size > asize)
2753 asize = amdkfd_size;
2759 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
2761 /* Get the process struct from the filep. Only the process
2762 * that opened /dev/kfd can use the file descriptor. Child
2763 * processes need to create their own KFD device context.
2765 process = filep->private_data;
2768 if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
2769 ptrace_parent(process->lead_thread) == current)
2770 ptrace_attached = true;
2773 if (process->lead_thread != current->group_leader
2774 && !ptrace_attached) {
2775 dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
2780 /* Do not trust userspace, use our own definition */
2783 if (unlikely(!func)) {
2784 dev_dbg(kfd_device, "no function\n");
2790 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
2791 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
2792 * more priviledged access.
2794 if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
2795 if (!capable(CAP_CHECKPOINT_RESTORE) &&
2796 !capable(CAP_SYS_ADMIN)) {
2802 if (cmd & (IOC_IN | IOC_OUT)) {
2803 if (asize <= sizeof(stack_kdata)) {
2804 kdata = stack_kdata;
2806 kdata = kmalloc(asize, GFP_KERNEL);
2813 memset(kdata + usize, 0, asize - usize);
2817 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
2821 } else if (cmd & IOC_OUT) {
2822 memset(kdata, 0, usize);
2825 retcode = func(filep, process, kdata);
2828 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
2833 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
2834 task_pid_nr(current), cmd, nr);
2836 if (kdata != stack_kdata)
2840 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
2846 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
2847 struct vm_area_struct *vma)
2849 phys_addr_t address;
2852 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2855 address = dev->adev->rmmio_remap.bus_addr;
2857 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
2858 VM_DONTDUMP | VM_PFNMAP;
2860 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2862 pr_debug("pasid 0x%x mapping mmio page\n"
2863 " target user address == 0x%08llX\n"
2864 " physical address == 0x%08llX\n"
2865 " vm_flags == 0x%04lX\n"
2866 " size == 0x%04lX\n",
2867 process->pasid, (unsigned long long) vma->vm_start,
2868 address, vma->vm_flags, PAGE_SIZE);
2870 ret = io_remap_pfn_range(vma,
2872 address >> PAGE_SHIFT,
2879 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
2881 struct kfd_process *process;
2882 struct kfd_dev *dev = NULL;
2883 unsigned long mmap_offset;
2884 unsigned int gpu_id;
2886 process = kfd_get_process(current);
2887 if (IS_ERR(process))
2888 return PTR_ERR(process);
2890 mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
2891 gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
2893 dev = kfd_device_by_id(gpu_id);
2895 switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
2896 case KFD_MMAP_TYPE_DOORBELL:
2899 return kfd_doorbell_mmap(dev, process, vma);
2901 case KFD_MMAP_TYPE_EVENTS:
2902 return kfd_event_mmap(process, vma);
2904 case KFD_MMAP_TYPE_RESERVED_MEM:
2907 return kfd_reserved_mem_mmap(dev, process, vma);
2908 case KFD_MMAP_TYPE_MMIO:
2911 return kfd_mmio_mmap(dev, process, vma);