2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
30 #include <sys/ioctl.h>
36 #include "amdgpu_drm.h"
37 #include "amdgpu_internal.h"
39 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
40 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
43 * Create command submission context
45 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
46 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
47 * \param context - \c [out] GPU Context handle
49 * \return 0 on success otherwise POSIX Error code
51 drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
53 amdgpu_context_handle *context)
55 struct amdgpu_context *gpu_context;
56 union drm_amdgpu_ctx args;
63 gpu_context = calloc(1, sizeof(struct amdgpu_context));
67 gpu_context->dev = dev;
69 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
73 /* Create the context */
74 memset(&args, 0, sizeof(args));
75 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
76 args.in.priority = priority;
78 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
82 gpu_context->id = args.out.alloc.ctx_id;
83 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
84 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
85 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
86 list_inithead(&gpu_context->sem_list[i][j][k]);
87 *context = (amdgpu_context_handle)gpu_context;
92 pthread_mutex_destroy(&gpu_context->sequence_mutex);
97 drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
98 amdgpu_context_handle *context)
100 return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
104 * Release command submission context
106 * \param dev - \c [in] amdgpu device handle
107 * \param context - \c [in] amdgpu context handle
109 * \return 0 on success otherwise POSIX Error code
111 drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
113 union drm_amdgpu_ctx args;
120 pthread_mutex_destroy(&context->sequence_mutex);
122 /* now deal with kernel side */
123 memset(&args, 0, sizeof(args));
124 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
125 args.in.ctx_id = context->id;
126 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
127 &args, sizeof(args));
128 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
129 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
130 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
131 amdgpu_semaphore_handle sem;
132 LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
133 list_del(&sem->list);
134 amdgpu_cs_reset_sem(sem);
135 amdgpu_cs_unreference_sem(sem);
145 drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
146 amdgpu_context_handle context,
152 if (!dev || !context || master_fd < 0)
155 union drm_amdgpu_sched args;
156 memset(&args, 0, sizeof(args));
158 args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
159 args.in.fd = dev->fd;
160 args.in.priority = priority;
161 args.in.ctx_id = context->id;
163 r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
170 drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
171 uint32_t *state, uint32_t *hangs)
173 union drm_amdgpu_ctx args;
179 memset(&args, 0, sizeof(args));
180 args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
181 args.in.ctx_id = context->id;
182 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
183 &args, sizeof(args));
185 *state = args.out.state.reset_status;
186 *hangs = args.out.state.hangs;
192 * Submit command to kernel DRM
193 * \param dev - \c [in] Device handle
194 * \param context - \c [in] GPU Context
195 * \param ibs_request - \c [in] Pointer to submission requests
196 * \param fence - \c [out] return fence for this submission
198 * \return 0 on success otherwise POSIX Error code
199 * \sa amdgpu_cs_submit()
201 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
202 struct amdgpu_cs_request *ibs_request)
204 union drm_amdgpu_cs cs;
205 uint64_t *chunk_array;
206 struct drm_amdgpu_cs_chunk *chunks;
207 struct drm_amdgpu_cs_chunk_data *chunk_data;
208 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
209 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
210 struct list_head *sem_list;
211 amdgpu_semaphore_handle sem, tmp;
212 uint32_t i, size, sem_count = 0;
216 if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
218 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
220 if (ibs_request->number_of_ibs == 0) {
221 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
224 user_fence = (ibs_request->fence_info.handle != NULL);
226 size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
228 chunk_array = alloca(sizeof(uint64_t) * size);
229 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
231 size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
233 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
235 memset(&cs, 0, sizeof(cs));
236 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
237 cs.in.ctx_id = context->id;
238 if (ibs_request->resources)
239 cs.in.bo_list_handle = ibs_request->resources->handle;
240 cs.in.num_chunks = ibs_request->number_of_ibs;
242 for (i = 0; i < ibs_request->number_of_ibs; i++) {
243 struct amdgpu_cs_ib_info *ib;
244 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
245 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
246 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
247 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
249 ib = &ibs_request->ibs[i];
251 chunk_data[i].ib_data._pad = 0;
252 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
253 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
254 chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
255 chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
256 chunk_data[i].ib_data.ring = ibs_request->ring;
257 chunk_data[i].ib_data.flags = ib->flags;
260 pthread_mutex_lock(&context->sequence_mutex);
263 i = cs.in.num_chunks++;
266 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
267 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
268 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
269 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
271 /* fence bo handle */
272 chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
274 chunk_data[i].fence_data.offset =
275 ibs_request->fence_info.offset * sizeof(uint64_t);
278 if (ibs_request->number_of_dependencies) {
279 dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
280 ibs_request->number_of_dependencies);
286 for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
287 struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
288 struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
289 dep->ip_type = info->ip_type;
290 dep->ip_instance = info->ip_instance;
291 dep->ring = info->ring;
292 dep->ctx_id = info->context->id;
293 dep->handle = info->fence;
296 i = cs.in.num_chunks++;
298 /* dependencies chunk */
299 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
300 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
301 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
302 * ibs_request->number_of_dependencies;
303 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
306 sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
307 LIST_FOR_EACH_ENTRY(sem, sem_list, list)
310 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
311 if (!sem_dependencies) {
316 LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
317 struct amdgpu_cs_fence *info = &sem->signal_fence;
318 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
319 dep->ip_type = info->ip_type;
320 dep->ip_instance = info->ip_instance;
321 dep->ring = info->ring;
322 dep->ctx_id = info->context->id;
323 dep->handle = info->fence;
325 list_del(&sem->list);
326 amdgpu_cs_reset_sem(sem);
327 amdgpu_cs_unreference_sem(sem);
329 i = cs.in.num_chunks++;
331 /* dependencies chunk */
332 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
333 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
334 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
335 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
338 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
343 ibs_request->seq_no = cs.out.handle;
344 context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
346 pthread_mutex_unlock(&context->sequence_mutex);
348 free(sem_dependencies);
352 drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
354 struct amdgpu_cs_request *ibs_request,
355 uint32_t number_of_requests)
360 if (!context || !ibs_request)
364 for (i = 0; i < number_of_requests; i++) {
365 r = amdgpu_cs_submit_one(context, ibs_request);
375 * Calculate absolute timeout.
377 * \param timeout - \c [in] timeout in nanoseconds.
379 * \return absolute timeout in nanoseconds
381 drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
385 if (timeout != AMDGPU_TIMEOUT_INFINITE) {
386 struct timespec current;
388 r = clock_gettime(CLOCK_MONOTONIC, ¤t);
390 fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
391 return AMDGPU_TIMEOUT_INFINITE;
394 current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
395 current_ns += current.tv_nsec;
396 timeout += current_ns;
397 if (timeout < current_ns)
398 timeout = AMDGPU_TIMEOUT_INFINITE;
403 static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
405 unsigned ip_instance,
412 amdgpu_device_handle dev = context->dev;
413 union drm_amdgpu_wait_cs args;
416 memset(&args, 0, sizeof(args));
417 args.in.handle = handle;
418 args.in.ip_type = ip;
419 args.in.ip_instance = ip_instance;
421 args.in.ctx_id = context->id;
423 if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
424 args.in.timeout = timeout_ns;
426 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
428 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
432 *busy = args.out.status;
436 drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
444 if (!fence || !expired || !fence->context)
446 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
448 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
450 if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
457 r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
458 fence->ip_instance, fence->ring,
459 fence->fence, timeout_ns, flags, &busy);
467 static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
468 uint32_t fence_count,
474 struct drm_amdgpu_fence *drm_fences;
475 amdgpu_device_handle dev = fences[0].context->dev;
476 union drm_amdgpu_wait_fences args;
480 drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
481 for (i = 0; i < fence_count; i++) {
482 drm_fences[i].ctx_id = fences[i].context->id;
483 drm_fences[i].ip_type = fences[i].ip_type;
484 drm_fences[i].ip_instance = fences[i].ip_instance;
485 drm_fences[i].ring = fences[i].ring;
486 drm_fences[i].seq_no = fences[i].fence;
489 memset(&args, 0, sizeof(args));
490 args.in.fences = (uint64_t)(uintptr_t)drm_fences;
491 args.in.fence_count = fence_count;
492 args.in.wait_all = wait_all;
493 args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
495 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
499 *status = args.out.status;
502 *first = args.out.first_signaled;
507 drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
508 uint32_t fence_count,
517 if (!fences || !status || !fence_count)
520 for (i = 0; i < fence_count; i++) {
521 if (NULL == fences[i].context)
523 if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
525 if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
531 return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
532 timeout_ns, status, first);
535 drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
537 struct amdgpu_semaphore *gpu_semaphore;
542 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
546 atomic_set(&gpu_semaphore->refcount, 1);
547 *sem = gpu_semaphore;
552 drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
554 uint32_t ip_instance,
556 amdgpu_semaphore_handle sem)
560 if (ip_type >= AMDGPU_HW_IP_NUM)
562 if (ring >= AMDGPU_CS_MAX_RINGS)
564 /* sem has been signaled */
565 if (sem->signal_fence.context)
567 pthread_mutex_lock(&ctx->sequence_mutex);
568 sem->signal_fence.context = ctx;
569 sem->signal_fence.ip_type = ip_type;
570 sem->signal_fence.ip_instance = ip_instance;
571 sem->signal_fence.ring = ring;
572 sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
573 update_references(NULL, &sem->refcount);
574 pthread_mutex_unlock(&ctx->sequence_mutex);
578 drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
580 uint32_t ip_instance,
582 amdgpu_semaphore_handle sem)
586 if (ip_type >= AMDGPU_HW_IP_NUM)
588 if (ring >= AMDGPU_CS_MAX_RINGS)
590 /* must signal first */
591 if (!sem->signal_fence.context)
594 pthread_mutex_lock(&ctx->sequence_mutex);
595 list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
596 pthread_mutex_unlock(&ctx->sequence_mutex);
600 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
602 if (!sem || !sem->signal_fence.context)
605 sem->signal_fence.context = NULL;
606 sem->signal_fence.ip_type = 0;
607 sem->signal_fence.ip_instance = 0;
608 sem->signal_fence.ring = 0;
609 sem->signal_fence.fence = 0;
614 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
619 if (update_references(&sem->refcount, NULL))
624 drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
626 return amdgpu_cs_unreference_sem(sem);
629 drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
636 return drmSyncobjCreate(dev->fd, flags, handle);
639 drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
645 return drmSyncobjCreate(dev->fd, 0, handle);
648 drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
654 return drmSyncobjDestroy(dev->fd, handle);
657 drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
658 const uint32_t *syncobjs,
659 uint32_t syncobj_count)
664 return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
667 drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
668 const uint32_t *syncobjs,
669 uint32_t syncobj_count)
674 return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
677 drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
678 const uint32_t *syncobjs,
680 uint32_t syncobj_count)
685 return drmSyncobjTimelineSignal(dev->fd, syncobjs,
686 points, syncobj_count);
689 drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
690 uint32_t *handles, unsigned num_handles,
691 int64_t timeout_nsec, unsigned flags,
692 uint32_t *first_signaled)
697 return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
698 flags, first_signaled);
701 drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
702 uint32_t *handles, uint64_t *points,
703 unsigned num_handles,
704 int64_t timeout_nsec, unsigned flags,
705 uint32_t *first_signaled)
710 return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
711 timeout_nsec, flags, first_signaled);
714 drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
715 uint32_t *handles, uint64_t *points,
716 unsigned num_handles)
721 return drmSyncobjQuery(dev->fd, handles, points, num_handles);
724 drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
731 return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
734 drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
741 return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
744 drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
751 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
754 drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
761 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
764 drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
770 uint32_t binary_handle;
777 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
779 ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
783 ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
784 syncobj, point, flags);
787 ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
789 drmSyncobjDestroy(dev->fd, binary_handle);
793 drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
798 uint32_t binary_handle;
805 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
807 ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
810 ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
813 ret = drmSyncobjTransfer(dev->fd, syncobj, point,
814 binary_handle, 0, 0);
816 drmSyncobjDestroy(dev->fd, binary_handle);
820 drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
821 amdgpu_context_handle context,
822 amdgpu_bo_list_handle bo_list_handle,
824 struct drm_amdgpu_cs_chunk *chunks,
827 union drm_amdgpu_cs cs;
828 uint64_t *chunk_array;
833 memset(&cs, 0, sizeof(cs));
834 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
835 for (i = 0; i < num_chunks; i++)
836 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
837 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
838 cs.in.ctx_id = context->id;
839 cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
840 cs.in.num_chunks = num_chunks;
841 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
847 *seq_no = cs.out.handle;
851 drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
852 amdgpu_context_handle context,
853 uint32_t bo_list_handle,
855 struct drm_amdgpu_cs_chunk *chunks,
858 union drm_amdgpu_cs cs;
859 uint64_t *chunk_array;
862 memset(&cs, 0, sizeof(cs));
863 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
864 for (i = 0; i < num_chunks; i++)
865 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
866 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
867 cs.in.ctx_id = context->id;
868 cs.in.bo_list_handle = bo_list_handle;
869 cs.in.num_chunks = num_chunks;
870 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
873 *seq_no = cs.out.handle;
877 drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
878 struct drm_amdgpu_cs_chunk_data *data)
880 data->fence_data.handle = fence_info->handle->handle;
881 data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
884 drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
885 struct drm_amdgpu_cs_chunk_dep *dep)
887 dep->ip_type = fence->ip_type;
888 dep->ip_instance = fence->ip_instance;
889 dep->ring = fence->ring;
890 dep->ctx_id = fence->context->id;
891 dep->handle = fence->fence;
894 drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
895 struct amdgpu_cs_fence *fence,
897 uint32_t *out_handle)
899 union drm_amdgpu_fence_to_handle fth;
902 memset(&fth, 0, sizeof(fth));
903 fth.in.fence.ctx_id = fence->context->id;
904 fth.in.fence.ip_type = fence->ip_type;
905 fth.in.fence.ip_instance = fence->ip_instance;
906 fth.in.fence.ring = fence->ring;
907 fth.in.fence.seq_no = fence->fence;
910 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
913 *out_handle = fth.out.handle;