2 * Copyright © 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
32 #include <sys/ioctl.h>
36 #include "libdrm_macros.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
42 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
45 struct drm_gem_close args = {};
48 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
51 int amdgpu_bo_alloc(amdgpu_device_handle dev,
52 struct amdgpu_bo_alloc_request *alloc_buffer,
53 amdgpu_bo_handle *buf_handle)
56 union drm_amdgpu_gem_create args;
57 unsigned heap = alloc_buffer->preferred_heap;
60 /* It's an error if the heap is not specified */
61 if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
64 bo = calloc(1, sizeof(struct amdgpu_bo));
68 atomic_set(&bo->refcount, 1);
70 bo->alloc_size = alloc_buffer->alloc_size;
72 memset(&args, 0, sizeof(args));
73 args.in.bo_size = alloc_buffer->alloc_size;
74 args.in.alignment = alloc_buffer->phys_alignment;
76 /* Set the placement. */
77 args.in.domains = heap;
78 args.in.domain_flags = alloc_buffer->flags;
80 /* Allocate the buffer with the preferred heap. */
81 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
88 bo->handle = args.out.handle;
90 pthread_mutex_lock(&bo->dev->bo_table_mutex);
91 r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
92 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
94 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
104 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
105 struct amdgpu_bo_metadata *info)
107 struct drm_amdgpu_gem_metadata args = {};
109 args.handle = bo->handle;
110 args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
111 args.data.flags = info->flags;
112 args.data.tiling_info = info->tiling_info;
114 if (info->size_metadata > sizeof(args.data.data))
117 if (info->size_metadata) {
118 args.data.data_size_bytes = info->size_metadata;
119 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
122 return drmCommandWriteRead(bo->dev->fd,
123 DRM_AMDGPU_GEM_METADATA,
124 &args, sizeof(args));
127 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
128 struct amdgpu_bo_info *info)
130 struct drm_amdgpu_gem_metadata metadata = {};
131 struct drm_amdgpu_gem_create_in bo_info = {};
132 struct drm_amdgpu_gem_op gem_op = {};
135 /* Validate the BO passed in */
139 /* Query metadata. */
140 metadata.handle = bo->handle;
141 metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
143 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
144 &metadata, sizeof(metadata));
148 if (metadata.data.data_size_bytes >
149 sizeof(info->metadata.umd_metadata))
152 /* Query buffer info. */
153 gem_op.handle = bo->handle;
154 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
155 gem_op.value = (uintptr_t)&bo_info;
157 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
158 &gem_op, sizeof(gem_op));
162 memset(info, 0, sizeof(*info));
163 info->alloc_size = bo_info.bo_size;
164 info->phys_alignment = bo_info.alignment;
165 info->preferred_heap = bo_info.domains;
166 info->alloc_flags = bo_info.domain_flags;
167 info->metadata.flags = metadata.data.flags;
168 info->metadata.tiling_info = metadata.data.tiling_info;
170 info->metadata.size_metadata = metadata.data.data_size_bytes;
171 if (metadata.data.data_size_bytes > 0)
172 memcpy(info->metadata.umd_metadata, metadata.data.data,
173 metadata.data.data_size_bytes);
178 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
180 struct drm_gem_flink flink;
191 if (bo->dev->flink_fd != bo->dev->fd) {
192 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
195 r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
200 fd = bo->dev->flink_fd;
202 memset(&flink, 0, sizeof(flink));
203 flink.handle = handle;
205 r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
209 bo->flink_name = flink.name;
211 if (bo->dev->flink_fd != bo->dev->fd) {
212 struct drm_gem_close args = {};
213 args.handle = handle;
214 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
217 pthread_mutex_lock(&bo->dev->bo_table_mutex);
218 r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
219 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
224 int amdgpu_bo_export(amdgpu_bo_handle bo,
225 enum amdgpu_bo_handle_type type,
226 uint32_t *shared_handle)
231 case amdgpu_bo_handle_type_gem_flink_name:
232 r = amdgpu_bo_export_flink(bo);
236 *shared_handle = bo->flink_name;
239 case amdgpu_bo_handle_type_kms:
240 case amdgpu_bo_handle_type_kms_noimport:
241 *shared_handle = bo->handle;
244 case amdgpu_bo_handle_type_dma_buf_fd:
245 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
246 DRM_CLOEXEC | DRM_RDWR,
247 (int*)shared_handle);
252 int amdgpu_bo_import(amdgpu_device_handle dev,
253 enum amdgpu_bo_handle_type type,
254 uint32_t shared_handle,
255 struct amdgpu_bo_import_result *output)
257 struct drm_gem_open open_arg = {};
258 struct amdgpu_bo *bo = NULL;
261 uint64_t dma_buf_size = 0;
263 /* We must maintain a list of pairs <handle, bo>, so that we always
264 * return the same amdgpu_bo instance for the same handle. */
265 pthread_mutex_lock(&dev->bo_table_mutex);
267 /* Convert a DMA buf handle to a KMS handle now. */
268 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
272 /* Get a KMS handle. */
273 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
275 pthread_mutex_unlock(&dev->bo_table_mutex);
279 /* Query the buffer size. */
280 size = lseek(shared_handle, 0, SEEK_END);
281 if (size == (off_t)-1) {
282 pthread_mutex_unlock(&dev->bo_table_mutex);
283 amdgpu_close_kms_handle(dev, handle);
286 lseek(shared_handle, 0, SEEK_SET);
289 shared_handle = handle;
292 /* If we have already created a buffer with this handle, find it. */
294 case amdgpu_bo_handle_type_gem_flink_name:
295 bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
298 case amdgpu_bo_handle_type_dma_buf_fd:
299 bo = handle_table_lookup(&dev->bo_handles, shared_handle);
302 case amdgpu_bo_handle_type_kms:
303 case amdgpu_bo_handle_type_kms_noimport:
304 /* Importing a KMS handle in not allowed. */
305 pthread_mutex_unlock(&dev->bo_table_mutex);
309 pthread_mutex_unlock(&dev->bo_table_mutex);
314 /* The buffer already exists, just bump the refcount. */
315 atomic_inc(&bo->refcount);
316 pthread_mutex_unlock(&dev->bo_table_mutex);
318 output->buf_handle = bo;
319 output->alloc_size = bo->alloc_size;
323 bo = calloc(1, sizeof(struct amdgpu_bo));
325 pthread_mutex_unlock(&dev->bo_table_mutex);
326 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
327 amdgpu_close_kms_handle(dev, shared_handle);
332 /* Open the handle. */
334 case amdgpu_bo_handle_type_gem_flink_name:
335 open_arg.name = shared_handle;
336 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
339 pthread_mutex_unlock(&dev->bo_table_mutex);
343 bo->handle = open_arg.handle;
344 if (dev->flink_fd != dev->fd) {
345 r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
348 pthread_mutex_unlock(&dev->bo_table_mutex);
351 r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
357 pthread_mutex_unlock(&dev->bo_table_mutex);
361 bo->flink_name = shared_handle;
362 bo->alloc_size = open_arg.size;
363 r = handle_table_insert(&dev->bo_flink_names, shared_handle,
366 pthread_mutex_unlock(&dev->bo_table_mutex);
372 case amdgpu_bo_handle_type_dma_buf_fd:
373 bo->handle = shared_handle;
374 bo->alloc_size = dma_buf_size;
377 case amdgpu_bo_handle_type_kms:
378 case amdgpu_bo_handle_type_kms_noimport:
379 assert(0); /* unreachable */
383 atomic_set(&bo->refcount, 1);
385 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
387 handle_table_insert(&dev->bo_handles, bo->handle, bo);
388 pthread_mutex_unlock(&dev->bo_table_mutex);
390 output->buf_handle = bo;
391 output->alloc_size = bo->alloc_size;
395 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
397 struct amdgpu_device *dev;
398 struct amdgpu_bo *bo = buf_handle;
402 pthread_mutex_lock(&dev->bo_table_mutex);
404 if (update_references(&bo->refcount, NULL)) {
405 /* Remove the buffer from the hash tables. */
406 handle_table_remove(&dev->bo_handles, bo->handle);
409 handle_table_remove(&dev->bo_flink_names,
412 /* Release CPU access. */
413 if (bo->cpu_map_count > 0) {
414 bo->cpu_map_count = 1;
415 amdgpu_bo_cpu_unmap(bo);
418 amdgpu_close_kms_handle(dev, bo->handle);
419 pthread_mutex_destroy(&bo->cpu_access_mutex);
423 pthread_mutex_unlock(&dev->bo_table_mutex);
427 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
429 union drm_amdgpu_gem_mmap args;
433 pthread_mutex_lock(&bo->cpu_access_mutex);
437 assert(bo->cpu_map_count > 0);
440 pthread_mutex_unlock(&bo->cpu_access_mutex);
444 assert(bo->cpu_map_count == 0);
446 memset(&args, 0, sizeof(args));
448 /* Query the buffer address (args.addr_ptr).
449 * The kernel driver ignores the offset and size parameters. */
450 args.in.handle = bo->handle;
452 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
455 pthread_mutex_unlock(&bo->cpu_access_mutex);
459 /* Map the buffer. */
460 ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
461 bo->dev->fd, args.out.addr_ptr);
462 if (ptr == MAP_FAILED) {
463 pthread_mutex_unlock(&bo->cpu_access_mutex);
468 bo->cpu_map_count = 1;
469 pthread_mutex_unlock(&bo->cpu_access_mutex);
475 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
479 pthread_mutex_lock(&bo->cpu_access_mutex);
480 assert(bo->cpu_map_count >= 0);
482 if (bo->cpu_map_count == 0) {
484 pthread_mutex_unlock(&bo->cpu_access_mutex);
489 if (bo->cpu_map_count > 0) {
490 /* mapped multiple times */
491 pthread_mutex_unlock(&bo->cpu_access_mutex);
495 r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
497 pthread_mutex_unlock(&bo->cpu_access_mutex);
501 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
502 struct amdgpu_buffer_size_alignments *info)
504 info->size_local = dev->dev_info.pte_fragment_size;
505 info->size_remote = dev->dev_info.gart_page_size;
509 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
513 union drm_amdgpu_gem_wait_idle args;
516 memset(&args, 0, sizeof(args));
517 args.in.handle = bo->handle;
518 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
520 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
521 &args, sizeof(args));
524 *busy = args.out.status;
527 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
532 int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
535 amdgpu_bo_handle *buf_handle,
536 uint64_t *offset_in_bo)
539 struct amdgpu_bo *bo;
541 if (cpu == NULL || size == 0)
545 * Workaround for a buggy application which tries to import previously
546 * exposed CPU pointers. If we find a real world use case we should
547 * improve that by asking the kernel for the right handle.
549 pthread_mutex_lock(&dev->bo_table_mutex);
550 for (i = 0; i < dev->bo_handles.max_key; i++) {
551 bo = handle_table_lookup(&dev->bo_handles, i);
552 if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
554 if (cpu >= bo->cpu_ptr &&
555 cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
559 if (i < dev->bo_handles.max_key) {
560 atomic_inc(&bo->refcount);
562 *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
567 pthread_mutex_unlock(&dev->bo_table_mutex);
572 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
575 amdgpu_bo_handle *buf_handle)
578 struct amdgpu_bo *bo;
579 struct drm_amdgpu_gem_userptr args;
581 args.addr = (uintptr_t)cpu;
582 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
583 AMDGPU_GEM_USERPTR_VALIDATE;
585 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
586 &args, sizeof(args));
590 bo = calloc(1, sizeof(struct amdgpu_bo));
594 atomic_set(&bo->refcount, 1);
596 bo->alloc_size = size;
597 bo->handle = args.handle;
599 pthread_mutex_lock(&bo->dev->bo_table_mutex);
600 r = handle_table_insert(&bo->dev->bo_handles, bo->handle, bo);
601 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
603 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
613 int amdgpu_bo_list_create(amdgpu_device_handle dev,
614 uint32_t number_of_resources,
615 amdgpu_bo_handle *resources,
616 uint8_t *resource_prios,
617 amdgpu_bo_list_handle *result)
619 struct drm_amdgpu_bo_list_entry *list;
620 union drm_amdgpu_bo_list args;
624 if (!number_of_resources)
627 /* overflow check for multiplication */
628 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
631 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
635 *result = malloc(sizeof(struct amdgpu_bo_list));
641 memset(&args, 0, sizeof(args));
642 args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
643 args.in.bo_number = number_of_resources;
644 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
645 args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
647 for (i = 0; i < number_of_resources; i++) {
648 list[i].bo_handle = resources[i]->handle;
650 list[i].bo_priority = resource_prios[i];
652 list[i].bo_priority = 0;
655 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
656 &args, sizeof(args));
663 (*result)->dev = dev;
664 (*result)->handle = args.out.list_handle;
668 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
670 union drm_amdgpu_bo_list args;
673 memset(&args, 0, sizeof(args));
674 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
675 args.in.list_handle = list->handle;
677 r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
678 &args, sizeof(args));
686 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
687 uint32_t number_of_resources,
688 amdgpu_bo_handle *resources,
689 uint8_t *resource_prios)
691 struct drm_amdgpu_bo_list_entry *list;
692 union drm_amdgpu_bo_list args;
696 if (!number_of_resources)
699 /* overflow check for multiplication */
700 if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
703 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
707 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
708 args.in.list_handle = handle->handle;
709 args.in.bo_number = number_of_resources;
710 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
711 args.in.bo_info_ptr = (uintptr_t)list;
713 for (i = 0; i < number_of_resources; i++) {
714 list[i].bo_handle = resources[i]->handle;
716 list[i].bo_priority = resource_prios[i];
718 list[i].bo_priority = 0;
721 r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
722 &args, sizeof(args));
727 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
734 amdgpu_device_handle dev = bo->dev;
736 size = ALIGN(size, getpagesize());
738 return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
739 AMDGPU_VM_PAGE_READABLE |
740 AMDGPU_VM_PAGE_WRITEABLE |
741 AMDGPU_VM_PAGE_EXECUTABLE, ops);
744 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
752 struct drm_amdgpu_gem_va va;
755 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
756 ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
759 memset(&va, 0, sizeof(va));
760 va.handle = bo ? bo->handle : 0;
763 va.va_address = addr;
764 va.offset_in_bo = offset;
767 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));