2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/ioctl.h>
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
36 * Create an IB buffer.
38 * \param dev - \c [in] Device handle
39 * \param context - \c [in] GPU Context
40 * \param ib_size - \c [in] Size of allocation
41 * \param ib - \c [out] return the pointer to the created IB buffer
43 * \return 0 on success otherwise POSIX Error code
45 static int amdgpu_cs_create_ib(amdgpu_device_handle dev,
46 amdgpu_context_handle context,
47 enum amdgpu_cs_ib_size ib_size,
50 struct amdgpu_bo_alloc_request alloc_buffer;
51 struct amdgpu_bo_alloc_result info;
54 struct amdgpu_ib *new_ib;
56 memset(&alloc_buffer, 0, sizeof(alloc_buffer));
59 case amdgpu_cs_ib_size_4K:
60 alloc_buffer.alloc_size = 4 * 1024;
62 case amdgpu_cs_ib_size_16K:
63 alloc_buffer.alloc_size = 16 * 1024;
65 case amdgpu_cs_ib_size_32K:
66 alloc_buffer.alloc_size = 32 * 1024;
68 case amdgpu_cs_ib_size_64K:
69 alloc_buffer.alloc_size = 64 * 1024;
71 case amdgpu_cs_ib_size_128K:
72 alloc_buffer.alloc_size = 128 * 1024;
78 alloc_buffer.phys_alignment = 4 * 1024;
80 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
82 r = amdgpu_bo_alloc(dev,
88 r = amdgpu_bo_cpu_map(info.buf_handle, &cpu);
90 amdgpu_bo_free(info.buf_handle);
94 new_ib = malloc(sizeof(struct amdgpu_ib));
96 amdgpu_bo_cpu_unmap(info.buf_handle);
97 amdgpu_bo_free(info.buf_handle);
101 new_ib->buf_handle = info.buf_handle;
103 new_ib->virtual_mc_base_address = info.virtual_mc_base_address;
104 new_ib->ib_size = ib_size;
110 * Destroy an IB buffer.
112 * \param dev - \c [in] Device handle
113 * \param ib - \c [in] the IB buffer
115 * \return 0 on success otherwise POSIX Error code
117 static int amdgpu_cs_destroy_ib(amdgpu_device_handle dev,
121 r = amdgpu_bo_cpu_unmap(ib->buf_handle);
125 r = amdgpu_bo_free(ib->buf_handle);
134 * Initialize IB pools to empty.
136 * \param context - \c [in] GPU Context
138 * \return 0 on success otherwise POSIX Error code
140 static int amdgpu_cs_init_ib_pool(amdgpu_context_handle context)
145 r = pthread_mutex_init(&context->pool_mutex, NULL);
149 for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++)
150 LIST_INITHEAD(&context->ib_pools[i]);
156 * Allocate an IB buffer from IB pools.
158 * \param dev - \c [in] Device handle
159 * \param context - \c [in] GPU Context
160 * \param ib_size - \c [in] Size of allocation
161 * \param ib - \c [out] return the pointer to the allocated IB buffer
163 * \return 0 on success otherwise POSIX Error code
165 static int amdgpu_cs_alloc_from_ib_pool(amdgpu_device_handle dev,
166 amdgpu_context_handle context,
167 enum amdgpu_cs_ib_size ib_size,
168 amdgpu_ib_handle *ib)
171 struct list_head *head;
172 head = &context->ib_pools[ib_size];
175 pthread_mutex_lock(&context->pool_mutex);
176 if (!LIST_IS_EMPTY(head)) {
177 *ib = LIST_ENTRY(struct amdgpu_ib, head->next, list_node);
178 LIST_DEL(&(*ib)->list_node);
181 pthread_mutex_unlock(&context->pool_mutex);
187 * Free an IB buffer to IB pools.
189 * \param context - \c [in] GPU Context
190 * \param ib - \c [in] the IB buffer
194 static void amdgpu_cs_free_to_ib_pool(amdgpu_context_handle context,
197 struct list_head *head;
198 head = &context->ib_pools[ib->ib_size];
199 pthread_mutex_lock(&context->pool_mutex);
200 LIST_ADD(&ib->list_node, head);
201 pthread_mutex_unlock(&context->pool_mutex);
206 * Destroy all IB buffers in pools
208 * \param dev - \c [in] Device handle
209 * \param context - \c [in] GPU Context
211 * \return 0 on success otherwise POSIX Error code
213 static int amdgpu_cs_destroy_ib_pool(amdgpu_device_handle dev,
214 amdgpu_context_handle context)
218 struct list_head *head;
219 struct amdgpu_ib *next;
220 struct amdgpu_ib *storage;
223 pthread_mutex_lock(&context->pool_mutex);
224 for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++) {
225 head = &context->ib_pools[i];
226 LIST_FOR_EACH_ENTRY_SAFE(next, storage, head, list_node) {
227 r = amdgpu_cs_destroy_ib(dev, next);
232 pthread_mutex_unlock(&context->pool_mutex);
233 pthread_mutex_destroy(&context->pool_mutex);
238 * Initialize pending IB lists
240 * \param context - \c [in] GPU Context
242 * \return 0 on success otherwise POSIX Error code
244 static int amdgpu_cs_init_pendings(amdgpu_context_handle context)
250 r = pthread_mutex_init(&context->pendings_mutex, NULL);
254 for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
255 for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
256 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
257 LIST_INITHEAD(&context->pendings[ip][inst][ring]);
259 LIST_INITHEAD(&context->freed);
266 * \param dev - \c [in] Device handle
267 * \param context - \c [in] GPU Context
269 * \return 0 on success otherwise POSIX Error code
271 static int amdgpu_cs_destroy_pendings(amdgpu_device_handle dev,
272 amdgpu_context_handle context)
277 struct amdgpu_ib *next;
279 struct list_head *head;
282 pthread_mutex_lock(&context->pendings_mutex);
283 for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
284 for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
285 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++) {
286 head = &context->pendings[ip][inst][ring];
287 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
288 r = amdgpu_cs_destroy_ib(dev, next);
294 head = &context->freed;
295 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
296 r = amdgpu_cs_destroy_ib(dev, next);
301 pthread_mutex_unlock(&context->pendings_mutex);
302 pthread_mutex_destroy(&context->pendings_mutex);
307 * Add IB to pending IB lists without holding sequence_mutex.
309 * \param context - \c [in] GPU Context
310 * \param ib - \c [in] ib to added to pending lists
311 * \param ip - \c [in] hw ip block
312 * \param ip_instance - \c [in] instance of the hw ip block
313 * \param ring - \c [in] Ring of hw ip
317 static void amdgpu_cs_add_pending(amdgpu_context_handle context,
319 unsigned ip, unsigned ip_instance,
322 struct list_head *head;
323 pthread_mutex_lock(&context->pendings_mutex);
324 head = &context->pendings[ip][ip_instance][ring];
325 LIST_ADDTAIL(&ib->list_node, head);
326 pthread_mutex_unlock(&context->pendings_mutex);
331 * Garbage collector on a pending IB list without holding pendings_mutex.
332 * This function by itself is not multithread safe.
334 * \param context - \c [in] GPU Context
335 * \param ip - \c [in] hw ip block
336 * \param ip_instance - \c [in] instance of the hw ip block
337 * \param ring - \c [in] Ring of hw ip
338 * \param expired_fence - \c [in] fence expired
341 * \note Hold pendings_mutex before calling this function.
343 static void amdgpu_cs_pending_gc_not_safe(amdgpu_context_handle context,
344 unsigned ip, unsigned ip_instance,
346 uint64_t expired_fence)
348 struct list_head *head;
349 struct amdgpu_ib *next;
353 head = &context->pendings[ip][ip_instance][ring];
354 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node)
355 if (next->cs_handle <= expired_fence) {
356 LIST_DEL(&next->list_node);
357 amdgpu_cs_free_to_ib_pool(context, next);
359 /* The pending list is a sorted list.
360 There is no need to continue. */
364 /* walk the freed list as well */
365 head = &context->freed;
366 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
369 r = amdgpu_bo_wait_for_idle(next->buf_handle, 0, &busy);
373 LIST_DEL(&next->list_node);
374 amdgpu_cs_free_to_ib_pool(context, next);
381 * Garbage collector on a pending IB list
383 * \param context - \c [in] GPU Context
384 * \param ip - \c [in] hw ip block
385 * \param ip_instance - \c [in] instance of the hw ip block
386 * \param ring - \c [in] Ring of hw ip
387 * \param expired_fence - \c [in] fence expired
391 static void amdgpu_cs_pending_gc(amdgpu_context_handle context,
392 unsigned ip, unsigned ip_instance,
394 uint64_t expired_fence)
396 pthread_mutex_lock(&context->pendings_mutex);
397 amdgpu_cs_pending_gc_not_safe(context, ip, ip_instance, ring,
399 pthread_mutex_unlock(&context->pendings_mutex);
404 * Garbage collector on all pending IB lists
406 * \param context - \c [in] GPU Context
410 static void amdgpu_cs_all_pending_gc(amdgpu_context_handle context)
414 uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
416 pthread_mutex_lock(&context->sequence_mutex);
417 for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
418 for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
419 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
420 expired_fences[ip][inst][ring] =
421 context->expired_fences[ip][inst][ring];
422 pthread_mutex_unlock(&context->sequence_mutex);
424 pthread_mutex_lock(&context->pendings_mutex);
425 for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
426 for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
427 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
428 amdgpu_cs_pending_gc_not_safe(context, ip, inst, ring,
429 expired_fences[ip][inst][ring]);
430 pthread_mutex_unlock(&context->pendings_mutex);
434 * Allocate an IB buffer
435 * If there is no free IB buffer in pools, create one.
437 * \param dev - \c [in] Device handle
438 * \param context - \c [in] GPU Context
439 * \param ib_size - \c [in] Size of allocation
440 * \param ib - \c [out] return the pointer to the allocated IB buffer
442 * \return 0 on success otherwise POSIX Error code
444 static int amdgpu_cs_alloc_ib_local(amdgpu_device_handle dev,
445 amdgpu_context_handle context,
446 enum amdgpu_cs_ib_size ib_size,
447 amdgpu_ib_handle *ib)
451 r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib);
455 amdgpu_cs_all_pending_gc(context);
457 /* Retry to allocate from free IB pools after garbage collector. */
458 r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib);
462 /* There is no suitable IB in free pools. Create one. */
463 r = amdgpu_cs_create_ib(dev, context, ib_size, ib);
467 int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
468 amdgpu_context_handle context,
469 enum amdgpu_cs_ib_size ib_size,
470 struct amdgpu_cs_ib_alloc_result *output)
481 if (ib_size >= AMDGPU_CS_IB_SIZE_NUM)
484 r = amdgpu_cs_alloc_ib_local(dev, context, ib_size, &ib);
487 output->cpu = ib->cpu;
488 output->mc_address = ib->virtual_mc_base_address;
494 int amdgpu_cs_free_ib(amdgpu_device_handle dev,
495 amdgpu_context_handle context,
496 amdgpu_ib_handle handle)
505 pthread_mutex_lock(&context->pendings_mutex);
506 LIST_ADD(&handle->list_node, &context->freed);
507 pthread_mutex_unlock(&context->pendings_mutex);
512 * Create command submission context
514 * \param dev - \c [in] amdgpu device handle
515 * \param context - \c [out] amdgpu context handle
517 * \return 0 on success otherwise POSIX Error code
519 int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
520 amdgpu_context_handle *context)
522 struct amdgpu_context *gpu_context;
523 union drm_amdgpu_ctx args;
531 gpu_context = calloc(1, sizeof(struct amdgpu_context));
532 if (NULL == gpu_context)
535 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
539 r = amdgpu_cs_init_ib_pool(gpu_context);
543 r = amdgpu_cs_init_pendings(gpu_context);
547 r = amdgpu_cs_alloc_ib_local(dev, gpu_context, amdgpu_cs_ib_size_4K,
548 &gpu_context->fence_ib);
553 memset(&args, 0, sizeof(args));
554 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
555 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
559 gpu_context->id = args.out.alloc.ctx_id;
560 *context = (amdgpu_context_handle)gpu_context;
565 amdgpu_cs_free_ib(dev, gpu_context, gpu_context->fence_ib);
568 amdgpu_cs_destroy_pendings(dev, gpu_context);
571 amdgpu_cs_destroy_ib_pool(dev, gpu_context);
574 pthread_mutex_destroy(&gpu_context->sequence_mutex);
582 * Release command submission context
584 * \param dev - \c [in] amdgpu device handle
585 * \param context - \c [in] amdgpu context handle
587 * \return 0 on success otherwise POSIX Error code
589 int amdgpu_cs_ctx_free(amdgpu_device_handle dev,
590 amdgpu_context_handle context)
593 union drm_amdgpu_ctx args;
600 r = amdgpu_cs_free_ib(dev, context, context->fence_ib);
604 r = amdgpu_cs_destroy_pendings(dev, context);
608 r = amdgpu_cs_destroy_ib_pool(dev, context);
612 pthread_mutex_destroy(&context->sequence_mutex);
614 /* now deal with kernel side */
615 memset(&args, 0, sizeof(args));
616 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
617 args.in.ctx_id = context->id;
618 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
625 static int amdgpu_cs_create_bo_list(amdgpu_device_handle dev,
626 amdgpu_context_handle context,
627 struct amdgpu_cs_request *request,
628 amdgpu_ib_handle fence_ib,
631 struct drm_amdgpu_bo_list_entry *list;
632 union drm_amdgpu_bo_list args;
633 unsigned num_resources;
637 num_resources = request->number_of_resources;
639 if (!num_resources) {
647 list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * num_resources);
649 memset(&args, 0, sizeof(args));
650 args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
651 args.in.bo_number = num_resources;
652 args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
653 args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
655 for (i = 0; i < request->number_of_resources; i++) {
656 list[i].bo_handle = request->resources[i]->handle;
657 if (request->resource_flags)
658 list[i].bo_priority = request->resource_flags[i];
660 list[i].bo_priority = 0;
664 list[i].bo_handle = fence_ib->buf_handle->handle;
666 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
667 &args, sizeof(args));
671 *handle = args.out.list_handle;
675 static int amdgpu_cs_free_bo_list(amdgpu_device_handle dev, uint32_t handle)
677 union drm_amdgpu_bo_list args;
683 memset(&args, 0, sizeof(args));
684 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
685 args.in.list_handle = handle;
687 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
688 &args, sizeof(args));
693 static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
695 return ip * AMDGPU_CS_MAX_RINGS + ring;
699 * Submit command to kernel DRM
700 * \param dev - \c [in] Device handle
701 * \param context - \c [in] GPU Context
702 * \param ibs_request - \c [in] Pointer to submission requests
703 * \param fence - \c [out] return fence for this submission
705 * \return 0 on success otherwise POSIX Error code
706 * \sa amdgpu_cs_submit()
708 static int amdgpu_cs_submit_one(amdgpu_device_handle dev,
709 amdgpu_context_handle context,
710 struct amdgpu_cs_request *ibs_request,
715 union drm_amdgpu_cs cs;
716 uint64_t *chunk_array;
717 struct drm_amdgpu_cs_chunk *chunks;
718 struct drm_amdgpu_cs_chunk_data *chunk_data;
719 uint32_t bo_list_handle;
721 if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
723 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
725 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
728 size = (ibs_request->number_of_ibs + 1) * ((sizeof(uint64_t) +
729 sizeof(struct drm_amdgpu_cs_chunk) +
730 sizeof(struct drm_amdgpu_cs_chunk_data)) +
731 ibs_request->number_of_resources + 1) *
732 sizeof(struct drm_amdgpu_bo_list_entry);
733 chunk_array = malloc(size);
734 if (NULL == chunk_array)
736 memset(chunk_array, 0, size);
738 chunks = (struct drm_amdgpu_cs_chunk *)(chunk_array + ibs_request->number_of_ibs + 1);
739 chunk_data = (struct drm_amdgpu_cs_chunk_data *)(chunks + ibs_request->number_of_ibs + 1);
741 memset(&cs, 0, sizeof(cs));
742 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
743 cs.in.ctx_id = context->id;
744 cs.in.num_chunks = ibs_request->number_of_ibs;
746 for (i = 0; i < ibs_request->number_of_ibs; i++) {
747 struct amdgpu_cs_ib_info *ib;
748 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
749 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
750 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
751 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
753 ib = &ibs_request->ibs[i];
755 chunk_data[i].ib_data.handle = ib->ib_handle->buf_handle->handle;
756 chunk_data[i].ib_data.va_start = ib->ib_handle->virtual_mc_base_address;
757 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
758 chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
759 chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
760 chunk_data[i].ib_data.ring = ibs_request->ring;
762 if (ib->flags & AMDGPU_CS_GFX_IB_CE)
763 chunk_data[i].ib_data.flags = AMDGPU_IB_FLAG_CE;
766 r = amdgpu_cs_create_bo_list(dev, context, ibs_request, NULL,
771 cs.in.bo_list_handle = bo_list_handle;
772 pthread_mutex_lock(&context->sequence_mutex);
774 if (ibs_request->ip_type != AMDGPU_HW_IP_UVD &&
775 ibs_request->ip_type != AMDGPU_HW_IP_VCE) {
776 i = cs.in.num_chunks++;
779 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
780 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
781 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
782 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
784 /* fence bo handle */
785 chunk_data[i].fence_data.handle = context->fence_ib->buf_handle->handle;
787 chunk_data[i].fence_data.offset = amdgpu_cs_fence_index(
788 ibs_request->ip_type, ibs_request->ring);
789 chunk_data[i].fence_data.offset *= sizeof(uint64_t);
792 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
798 /* Hold sequence_mutex while adding record to the pending list.
799 So the pending list is a sorted list according to fence value. */
801 for (i = 0; i < ibs_request->number_of_ibs; i++) {
802 struct amdgpu_cs_ib_info *ib;
804 ib = &ibs_request->ibs[i];
805 if (ib->flags & AMDGPU_CS_REUSE_IB)
808 ib->ib_handle->cs_handle = cs.out.handle;
810 amdgpu_cs_add_pending(context, ib->ib_handle, ibs_request->ip_type,
811 ibs_request->ip_instance,
815 *fence = cs.out.handle;
817 pthread_mutex_unlock(&context->sequence_mutex);
819 r = amdgpu_cs_free_bo_list(dev, bo_list_handle);
827 pthread_mutex_unlock(&context->sequence_mutex);
834 int amdgpu_cs_submit(amdgpu_device_handle dev,
835 amdgpu_context_handle context,
837 struct amdgpu_cs_request *ibs_request,
838 uint32_t number_of_requests,
848 if (NULL == ibs_request)
854 for (i = 0; i < number_of_requests; i++) {
855 r = amdgpu_cs_submit_one(dev, context, ibs_request, fences);
866 * Calculate absolute timeout.
868 * \param timeout - \c [in] timeout in nanoseconds.
870 * \return absolute timeout in nanoseconds
872 uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
876 if (timeout != AMDGPU_TIMEOUT_INFINITE) {
877 struct timespec current;
878 r = clock_gettime(CLOCK_MONOTONIC, ¤t);
882 timeout += ((uint64_t)current.tv_sec) * 1000000000ull;
883 timeout += current.tv_nsec;
888 static int amdgpu_ioctl_wait_cs(amdgpu_device_handle dev,
890 unsigned ip_instance,
896 union drm_amdgpu_wait_cs args;
899 memset(&args, 0, sizeof(args));
900 args.in.handle = handle;
901 args.in.ip_type = ip;
902 args.in.ip_instance = ip_instance;
904 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
906 /* Handle errors manually here because of timeout */
907 r = ioctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
908 if (r == -1 && (errno == EINTR || errno == EAGAIN)) {
914 *busy = args.out.status;
918 int amdgpu_cs_query_fence_status(amdgpu_device_handle dev,
919 struct amdgpu_cs_query_fence *fence,
922 amdgpu_context_handle context;
923 uint64_t *signaled_fence;
924 uint64_t *expired_fence;
925 unsigned ip_type, ip_instance;
936 if (NULL == fence->context)
938 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
940 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
943 context = fence->context;
944 ip_type = fence->ip_type;
945 ip_instance = fence->ip_instance;
947 signaled_fence = context->fence_ib->cpu;
948 signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
949 expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
952 pthread_mutex_lock(&context->sequence_mutex);
953 if (fence->fence <= *expired_fence) {
954 /* This fence value is expired already. */
955 pthread_mutex_unlock(&context->sequence_mutex);
960 if (fence->fence <= *signaled_fence) {
961 /* This fence value is signaled already. */
962 *expired_fence = *signaled_fence;
963 pthread_mutex_unlock(&context->sequence_mutex);
964 amdgpu_cs_pending_gc(context, ip_type, ip_instance, ring,
970 pthread_mutex_unlock(&context->sequence_mutex);
972 r = amdgpu_ioctl_wait_cs(dev, ip_type, ip_instance, ring,
973 fence->fence, fence->timeout_ns, &busy);
976 pthread_mutex_lock(&context->sequence_mutex);
977 /* The thread doesn't hold sequence_mutex. Other thread could
978 update *expired_fence already. Check whether there is a
979 newerly expired fence. */
980 if (fence->fence > *expired_fence) {
981 *expired_fence = fence->fence;
982 pthread_mutex_unlock(&context->sequence_mutex);
983 amdgpu_cs_pending_gc(context, ip_type, ip_instance,
986 pthread_mutex_unlock(&context->sequence_mutex);