2 * Copyright © 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
32 #include <sys/ioctl.h>
36 #include "libdrm_macros.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
42 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
45 struct drm_gem_close args = {};
48 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
51 static int amdgpu_bo_create(amdgpu_device_handle dev,
54 amdgpu_bo_handle *buf_handle)
58 bo = calloc(1, sizeof(struct amdgpu_bo));
62 atomic_set(&bo->refcount, 1);
64 bo->alloc_size = size;
66 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
72 int amdgpu_bo_alloc(amdgpu_device_handle dev,
73 struct amdgpu_bo_alloc_request *alloc_buffer,
74 amdgpu_bo_handle *buf_handle)
76 union drm_amdgpu_gem_create args;
79 memset(&args, 0, sizeof(args));
80 args.in.bo_size = alloc_buffer->alloc_size;
81 args.in.alignment = alloc_buffer->phys_alignment;
83 /* Set the placement. */
84 args.in.domains = alloc_buffer->preferred_heap;
85 args.in.domain_flags = alloc_buffer->flags;
87 /* Allocate the buffer with the preferred heap. */
88 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
93 r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
96 amdgpu_close_kms_handle(dev, args.out.handle);
100 pthread_mutex_lock(&dev->bo_table_mutex);
101 r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
103 pthread_mutex_unlock(&dev->bo_table_mutex);
105 amdgpu_bo_free(*buf_handle);
110 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
111 struct amdgpu_bo_metadata *info)
113 struct drm_amdgpu_gem_metadata args = {};
115 args.handle = bo->handle;
116 args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
117 args.data.flags = info->flags;
118 args.data.tiling_info = info->tiling_info;
120 if (info->size_metadata > sizeof(args.data.data))
123 if (info->size_metadata) {
124 args.data.data_size_bytes = info->size_metadata;
125 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
128 return drmCommandWriteRead(bo->dev->fd,
129 DRM_AMDGPU_GEM_METADATA,
130 &args, sizeof(args));
133 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
134 struct amdgpu_bo_info *info)
136 struct drm_amdgpu_gem_metadata metadata = {};
137 struct drm_amdgpu_gem_create_in bo_info = {};
138 struct drm_amdgpu_gem_op gem_op = {};
141 /* Validate the BO passed in */
145 /* Query metadata. */
146 metadata.handle = bo->handle;
147 metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
149 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
150 &metadata, sizeof(metadata));
154 if (metadata.data.data_size_bytes >
155 sizeof(info->metadata.umd_metadata))
158 /* Query buffer info. */
159 gem_op.handle = bo->handle;
160 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
161 gem_op.value = (uintptr_t)&bo_info;
163 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
164 &gem_op, sizeof(gem_op));
168 memset(info, 0, sizeof(*info));
169 info->alloc_size = bo_info.bo_size;
170 info->phys_alignment = bo_info.alignment;
171 info->preferred_heap = bo_info.domains;
172 info->alloc_flags = bo_info.domain_flags;
173 info->metadata.flags = metadata.data.flags;
174 info->metadata.tiling_info = metadata.data.tiling_info;
176 info->metadata.size_metadata = metadata.data.data_size_bytes;
177 if (metadata.data.data_size_bytes > 0)
178 memcpy(info->metadata.umd_metadata, metadata.data.data,
179 metadata.data.data_size_bytes);
184 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
186 struct drm_gem_flink flink;
197 if (bo->dev->flink_fd != bo->dev->fd) {
198 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
201 r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
206 fd = bo->dev->flink_fd;
208 memset(&flink, 0, sizeof(flink));
209 flink.handle = handle;
211 r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
215 bo->flink_name = flink.name;
217 if (bo->dev->flink_fd != bo->dev->fd) {
218 struct drm_gem_close args = {};
219 args.handle = handle;
220 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
223 pthread_mutex_lock(&bo->dev->bo_table_mutex);
224 r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
225 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
230 int amdgpu_bo_export(amdgpu_bo_handle bo,
231 enum amdgpu_bo_handle_type type,
232 uint32_t *shared_handle)
237 case amdgpu_bo_handle_type_gem_flink_name:
238 r = amdgpu_bo_export_flink(bo);
242 *shared_handle = bo->flink_name;
245 case amdgpu_bo_handle_type_kms:
246 case amdgpu_bo_handle_type_kms_noimport:
247 *shared_handle = bo->handle;
250 case amdgpu_bo_handle_type_dma_buf_fd:
251 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
252 DRM_CLOEXEC | DRM_RDWR,
253 (int*)shared_handle);
258 int amdgpu_bo_import(amdgpu_device_handle dev,
259 enum amdgpu_bo_handle_type type,
260 uint32_t shared_handle,
261 struct amdgpu_bo_import_result *output)
263 struct drm_gem_open open_arg = {};
264 struct drm_gem_close close_arg = {};
265 struct amdgpu_bo *bo = NULL;
266 uint32_t handle = 0, flink_name = 0;
267 uint64_t alloc_size = 0;
270 uint64_t dma_buf_size = 0;
272 /* We must maintain a list of pairs <handle, bo>, so that we always
273 * return the same amdgpu_bo instance for the same handle. */
274 pthread_mutex_lock(&dev->bo_table_mutex);
276 /* Convert a DMA buf handle to a KMS handle now. */
277 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
280 /* Get a KMS handle. */
281 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
285 /* Query the buffer size. */
286 size = lseek(shared_handle, 0, SEEK_END);
287 if (size == (off_t)-1) {
291 lseek(shared_handle, 0, SEEK_SET);
294 shared_handle = handle;
297 /* If we have already created a buffer with this handle, find it. */
299 case amdgpu_bo_handle_type_gem_flink_name:
300 bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
303 case amdgpu_bo_handle_type_dma_buf_fd:
304 bo = handle_table_lookup(&dev->bo_handles, shared_handle);
307 case amdgpu_bo_handle_type_kms:
308 case amdgpu_bo_handle_type_kms_noimport:
309 /* Importing a KMS handle in not allowed. */
319 /* The buffer already exists, just bump the refcount. */
320 atomic_inc(&bo->refcount);
321 pthread_mutex_unlock(&dev->bo_table_mutex);
323 output->buf_handle = bo;
324 output->alloc_size = bo->alloc_size;
328 /* Open the handle. */
330 case amdgpu_bo_handle_type_gem_flink_name:
331 open_arg.name = shared_handle;
332 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
336 flink_name = shared_handle;
337 handle = open_arg.handle;
338 alloc_size = open_arg.size;
339 if (dev->flink_fd != dev->fd) {
340 r = drmPrimeHandleToFD(dev->flink_fd, handle,
341 DRM_CLOEXEC, &dma_fd);
344 r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
348 close_arg.handle = open_arg.handle;
349 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
356 case amdgpu_bo_handle_type_dma_buf_fd:
357 handle = shared_handle;
358 alloc_size = dma_buf_size;
361 case amdgpu_bo_handle_type_kms:
362 case amdgpu_bo_handle_type_kms_noimport:
363 assert(0); /* unreachable */
367 r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
371 r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
375 bo->flink_name = flink_name;
376 r = handle_table_insert(&dev->bo_flink_names, flink_name,
383 output->buf_handle = bo;
384 output->alloc_size = bo->alloc_size;
385 pthread_mutex_unlock(&dev->bo_table_mutex);
389 handle_table_remove(&dev->bo_handles, bo->handle);
391 if (flink_name && !close_arg.handle && open_arg.handle) {
392 close_arg.handle = open_arg.handle;
393 drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
398 amdgpu_close_kms_handle(dev, handle);
400 pthread_mutex_unlock(&dev->bo_table_mutex);
404 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
406 struct amdgpu_device *dev;
407 struct amdgpu_bo *bo = buf_handle;
411 pthread_mutex_lock(&dev->bo_table_mutex);
413 if (update_references(&bo->refcount, NULL)) {
414 /* Remove the buffer from the hash tables. */
415 handle_table_remove(&dev->bo_handles, bo->handle);
418 handle_table_remove(&dev->bo_flink_names,
421 /* Release CPU access. */
422 if (bo->cpu_map_count > 0) {
423 bo->cpu_map_count = 1;
424 amdgpu_bo_cpu_unmap(bo);
427 amdgpu_close_kms_handle(dev, bo->handle);
428 pthread_mutex_destroy(&bo->cpu_access_mutex);
432 pthread_mutex_unlock(&dev->bo_table_mutex);
436 void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
438 atomic_inc(&bo->refcount);
441 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
443 union drm_amdgpu_gem_mmap args;
447 pthread_mutex_lock(&bo->cpu_access_mutex);
451 assert(bo->cpu_map_count > 0);
454 pthread_mutex_unlock(&bo->cpu_access_mutex);
458 assert(bo->cpu_map_count == 0);
460 memset(&args, 0, sizeof(args));
462 /* Query the buffer address (args.addr_ptr).
463 * The kernel driver ignores the offset and size parameters. */
464 args.in.handle = bo->handle;
466 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
469 pthread_mutex_unlock(&bo->cpu_access_mutex);
473 /* Map the buffer. */
474 ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
475 bo->dev->fd, args.out.addr_ptr);
476 if (ptr == MAP_FAILED) {
477 pthread_mutex_unlock(&bo->cpu_access_mutex);
482 bo->cpu_map_count = 1;
483 pthread_mutex_unlock(&bo->cpu_access_mutex);
489 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
493 pthread_mutex_lock(&bo->cpu_access_mutex);
494 assert(bo->cpu_map_count >= 0);
496 if (bo->cpu_map_count == 0) {
498 pthread_mutex_unlock(&bo->cpu_access_mutex);
503 if (bo->cpu_map_count > 0) {
504 /* mapped multiple times */
505 pthread_mutex_unlock(&bo->cpu_access_mutex);
509 r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
511 pthread_mutex_unlock(&bo->cpu_access_mutex);
515 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
516 struct amdgpu_buffer_size_alignments *info)
518 info->size_local = dev->dev_info.pte_fragment_size;
519 info->size_remote = dev->dev_info.gart_page_size;
523 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
527 union drm_amdgpu_gem_wait_idle args;
530 memset(&args, 0, sizeof(args));
531 args.in.handle = bo->handle;
532 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
534 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
535 &args, sizeof(args));
538 *busy = args.out.status;
541 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
546 int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
549 amdgpu_bo_handle *buf_handle,
550 uint64_t *offset_in_bo)
552 struct amdgpu_bo *bo;
556 if (cpu == NULL || size == 0)
560 * Workaround for a buggy application which tries to import previously
561 * exposed CPU pointers. If we find a real world use case we should
562 * improve that by asking the kernel for the right handle.
564 pthread_mutex_lock(&dev->bo_table_mutex);
565 for (i = 0; i < dev->bo_handles.max_key; i++) {
566 bo = handle_table_lookup(&dev->bo_handles, i);
567 if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
569 if (cpu >= bo->cpu_ptr &&
570 cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
574 if (i < dev->bo_handles.max_key) {
575 atomic_inc(&bo->refcount);
577 *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
583 pthread_mutex_unlock(&dev->bo_table_mutex);
588 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
591 amdgpu_bo_handle *buf_handle)
594 struct drm_amdgpu_gem_userptr args;
596 args.addr = (uintptr_t)cpu;
597 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
598 AMDGPU_GEM_USERPTR_VALIDATE;
600 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
601 &args, sizeof(args));
605 r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
607 amdgpu_close_kms_handle(dev, args.handle);
611 pthread_mutex_lock(&dev->bo_table_mutex);
612 r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
614 pthread_mutex_unlock(&dev->bo_table_mutex);
616 amdgpu_bo_free(*buf_handle);
621 int amdgpu_bo_list_create(amdgpu_device_handle dev,
622 uint32_t number_of_resources,
623 amdgpu_bo_handle *resources,
624 uint8_t *resource_prios,
625 amdgpu_bo_list_handle *result)
627 struct drm_amdgpu_bo_list_entry *list;
628 union drm_amdgpu_bo_list args;
632 if (!number_of_resources)
635 /* overflow check for multiplication */
636 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
639 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
643 *result = malloc(sizeof(struct amdgpu_bo_list));
649 memset(&args, 0, sizeof(args));
650 args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
651 args.in.bo_number = number_of_resources;
652 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
653 args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
655 for (i = 0; i < number_of_resources; i++) {
656 list[i].bo_handle = resources[i]->handle;
658 list[i].bo_priority = resource_prios[i];
660 list[i].bo_priority = 0;
663 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
664 &args, sizeof(args));
671 (*result)->dev = dev;
672 (*result)->handle = args.out.list_handle;
676 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
678 union drm_amdgpu_bo_list args;
681 memset(&args, 0, sizeof(args));
682 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
683 args.in.list_handle = list->handle;
685 r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
686 &args, sizeof(args));
694 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
695 uint32_t number_of_resources,
696 amdgpu_bo_handle *resources,
697 uint8_t *resource_prios)
699 struct drm_amdgpu_bo_list_entry *list;
700 union drm_amdgpu_bo_list args;
704 if (!number_of_resources)
707 /* overflow check for multiplication */
708 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
711 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
715 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
716 args.in.list_handle = handle->handle;
717 args.in.bo_number = number_of_resources;
718 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
719 args.in.bo_info_ptr = (uintptr_t)list;
721 for (i = 0; i < number_of_resources; i++) {
722 list[i].bo_handle = resources[i]->handle;
724 list[i].bo_priority = resource_prios[i];
726 list[i].bo_priority = 0;
729 r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
730 &args, sizeof(args));
735 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
742 amdgpu_device_handle dev = bo->dev;
744 size = ALIGN(size, getpagesize());
746 return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
747 AMDGPU_VM_PAGE_READABLE |
748 AMDGPU_VM_PAGE_WRITEABLE |
749 AMDGPU_VM_PAGE_EXECUTABLE, ops);
752 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
760 struct drm_amdgpu_gem_va va;
763 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
764 ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
767 memset(&va, 0, sizeof(va));
768 va.handle = bo ? bo->handle : 0;
771 va.va_address = addr;
772 va.offset_in_bo = offset;
775 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));