2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
34 #include <sys/ioctl.h>
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
43 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
44 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
47 * Create command submission context
49 * \param dev - \c [in] amdgpu device handle
50 * \param context - \c [out] amdgpu context handle
52 * \return 0 on success otherwise POSIX Error code
54 int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
55 amdgpu_context_handle *context)
57 struct amdgpu_context *gpu_context;
58 union drm_amdgpu_ctx args;
65 gpu_context = calloc(1, sizeof(struct amdgpu_context));
69 gpu_context->dev = dev;
71 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
75 /* Create the context */
76 memset(&args, 0, sizeof(args));
77 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
78 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
82 gpu_context->id = args.out.alloc.ctx_id;
83 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
84 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
85 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
86 list_inithead(&gpu_context->sem_list[i][j][k]);
87 *context = (amdgpu_context_handle)gpu_context;
92 pthread_mutex_destroy(&gpu_context->sequence_mutex);
98 * Release command submission context
100 * \param dev - \c [in] amdgpu device handle
101 * \param context - \c [in] amdgpu context handle
103 * \return 0 on success otherwise POSIX Error code
105 int amdgpu_cs_ctx_free(amdgpu_context_handle context)
107 union drm_amdgpu_ctx args;
114 pthread_mutex_destroy(&context->sequence_mutex);
116 /* now deal with kernel side */
117 memset(&args, 0, sizeof(args));
118 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
119 args.in.ctx_id = context->id;
120 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
121 &args, sizeof(args));
122 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
123 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
124 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
125 amdgpu_semaphore_handle sem;
126 LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
127 list_del(&sem->list);
128 amdgpu_cs_reset_sem(sem);
129 amdgpu_cs_unreference_sem(sem);
139 int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
140 uint32_t *state, uint32_t *hangs)
142 union drm_amdgpu_ctx args;
148 memset(&args, 0, sizeof(args));
149 args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
150 args.in.ctx_id = context->id;
151 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
152 &args, sizeof(args));
154 *state = args.out.state.reset_status;
155 *hangs = args.out.state.hangs;
161 * Submit command to kernel DRM
162 * \param dev - \c [in] Device handle
163 * \param context - \c [in] GPU Context
164 * \param ibs_request - \c [in] Pointer to submission requests
165 * \param fence - \c [out] return fence for this submission
167 * \return 0 on success otherwise POSIX Error code
168 * \sa amdgpu_cs_submit()
170 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
171 struct amdgpu_cs_request *ibs_request)
173 union drm_amdgpu_cs cs;
174 uint64_t *chunk_array;
175 struct drm_amdgpu_cs_chunk *chunks;
176 struct drm_amdgpu_cs_chunk_data *chunk_data;
177 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
178 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
179 struct list_head *sem_list;
180 amdgpu_semaphore_handle sem, tmp;
181 uint32_t i, size, sem_count = 0;
185 if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
187 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
189 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
191 if (ibs_request->number_of_ibs == 0) {
192 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
195 user_fence = (ibs_request->fence_info.handle != NULL);
197 size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
199 chunk_array = alloca(sizeof(uint64_t) * size);
200 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
202 size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
204 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
206 memset(&cs, 0, sizeof(cs));
207 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
208 cs.in.ctx_id = context->id;
209 if (ibs_request->resources)
210 cs.in.bo_list_handle = ibs_request->resources->handle;
211 cs.in.num_chunks = ibs_request->number_of_ibs;
213 for (i = 0; i < ibs_request->number_of_ibs; i++) {
214 struct amdgpu_cs_ib_info *ib;
215 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
216 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
217 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
218 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
220 ib = &ibs_request->ibs[i];
222 chunk_data[i].ib_data._pad = 0;
223 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
224 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
225 chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
226 chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
227 chunk_data[i].ib_data.ring = ibs_request->ring;
228 chunk_data[i].ib_data.flags = ib->flags;
231 pthread_mutex_lock(&context->sequence_mutex);
234 i = cs.in.num_chunks++;
237 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
238 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
239 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
240 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
242 /* fence bo handle */
243 chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
245 chunk_data[i].fence_data.offset =
246 ibs_request->fence_info.offset * sizeof(uint64_t);
249 if (ibs_request->number_of_dependencies) {
250 dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
251 ibs_request->number_of_dependencies);
257 for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
258 struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
259 struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
260 dep->ip_type = info->ip_type;
261 dep->ip_instance = info->ip_instance;
262 dep->ring = info->ring;
263 dep->ctx_id = info->context->id;
264 dep->handle = info->fence;
267 i = cs.in.num_chunks++;
269 /* dependencies chunk */
270 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
271 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
272 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
273 * ibs_request->number_of_dependencies;
274 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
277 sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
278 LIST_FOR_EACH_ENTRY(sem, sem_list, list)
281 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
282 if (!sem_dependencies) {
287 LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
288 struct amdgpu_cs_fence *info = &sem->signal_fence;
289 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
290 dep->ip_type = info->ip_type;
291 dep->ip_instance = info->ip_instance;
292 dep->ring = info->ring;
293 dep->ctx_id = info->context->id;
294 dep->handle = info->fence;
296 list_del(&sem->list);
297 amdgpu_cs_reset_sem(sem);
298 amdgpu_cs_unreference_sem(sem);
300 i = cs.in.num_chunks++;
302 /* dependencies chunk */
303 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
304 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
305 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
306 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
309 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
314 ibs_request->seq_no = cs.out.handle;
315 context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
317 pthread_mutex_unlock(&context->sequence_mutex);
319 free(sem_dependencies);
323 int amdgpu_cs_submit(amdgpu_context_handle context,
325 struct amdgpu_cs_request *ibs_request,
326 uint32_t number_of_requests)
331 if (!context || !ibs_request)
335 for (i = 0; i < number_of_requests; i++) {
336 r = amdgpu_cs_submit_one(context, ibs_request);
346 * Calculate absolute timeout.
348 * \param timeout - \c [in] timeout in nanoseconds.
350 * \return absolute timeout in nanoseconds
352 drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
356 if (timeout != AMDGPU_TIMEOUT_INFINITE) {
357 struct timespec current;
359 r = clock_gettime(CLOCK_MONOTONIC, ¤t);
361 fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
362 return AMDGPU_TIMEOUT_INFINITE;
365 current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
366 current_ns += current.tv_nsec;
367 timeout += current_ns;
368 if (timeout < current_ns)
369 timeout = AMDGPU_TIMEOUT_INFINITE;
374 static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
376 unsigned ip_instance,
383 amdgpu_device_handle dev = context->dev;
384 union drm_amdgpu_wait_cs args;
387 memset(&args, 0, sizeof(args));
388 args.in.handle = handle;
389 args.in.ip_type = ip;
390 args.in.ip_instance = ip_instance;
392 args.in.ctx_id = context->id;
394 if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
395 args.in.timeout = timeout_ns;
397 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
399 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
403 *busy = args.out.status;
407 int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
415 if (!fence || !expired || !fence->context)
417 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
419 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
421 if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
428 r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
429 fence->ip_instance, fence->ring,
430 fence->fence, timeout_ns, flags, &busy);
438 static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
439 uint32_t fence_count,
445 struct drm_amdgpu_fence *drm_fences;
446 amdgpu_device_handle dev = fences[0].context->dev;
447 union drm_amdgpu_wait_fences args;
451 drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
452 for (i = 0; i < fence_count; i++) {
453 drm_fences[i].ctx_id = fences[i].context->id;
454 drm_fences[i].ip_type = fences[i].ip_type;
455 drm_fences[i].ip_instance = fences[i].ip_instance;
456 drm_fences[i].ring = fences[i].ring;
457 drm_fences[i].seq_no = fences[i].fence;
460 memset(&args, 0, sizeof(args));
461 args.in.fences = (uint64_t)(uintptr_t)drm_fences;
462 args.in.fence_count = fence_count;
463 args.in.wait_all = wait_all;
464 args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
466 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
470 *status = args.out.status;
473 *first = args.out.first_signaled;
478 int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
479 uint32_t fence_count,
488 if (!fences || !status || !fence_count)
491 for (i = 0; i < fence_count; i++) {
492 if (NULL == fences[i].context)
494 if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
496 if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
502 return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
503 timeout_ns, status, first);
506 int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
508 struct amdgpu_semaphore *gpu_semaphore;
513 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
517 atomic_set(&gpu_semaphore->refcount, 1);
518 *sem = gpu_semaphore;
523 int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
525 uint32_t ip_instance,
527 amdgpu_semaphore_handle sem)
531 if (ip_type >= AMDGPU_HW_IP_NUM)
533 if (ring >= AMDGPU_CS_MAX_RINGS)
535 /* sem has been signaled */
536 if (sem->signal_fence.context)
538 pthread_mutex_lock(&ctx->sequence_mutex);
539 sem->signal_fence.context = ctx;
540 sem->signal_fence.ip_type = ip_type;
541 sem->signal_fence.ip_instance = ip_instance;
542 sem->signal_fence.ring = ring;
543 sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
544 update_references(NULL, &sem->refcount);
545 pthread_mutex_unlock(&ctx->sequence_mutex);
549 int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
551 uint32_t ip_instance,
553 amdgpu_semaphore_handle sem)
557 if (ip_type >= AMDGPU_HW_IP_NUM)
559 if (ring >= AMDGPU_CS_MAX_RINGS)
561 /* must signal first */
562 if (!sem->signal_fence.context)
565 pthread_mutex_lock(&ctx->sequence_mutex);
566 list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
567 pthread_mutex_unlock(&ctx->sequence_mutex);
571 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
573 if (!sem || !sem->signal_fence.context)
576 sem->signal_fence.context = NULL;;
577 sem->signal_fence.ip_type = 0;
578 sem->signal_fence.ip_instance = 0;
579 sem->signal_fence.ring = 0;
580 sem->signal_fence.fence = 0;
585 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
590 if (update_references(&sem->refcount, NULL))
595 int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
597 return amdgpu_cs_unreference_sem(sem);