2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/ioctl.h>
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
36 * Create command submission context
38 * \param dev - \c [in] amdgpu device handle
39 * \param context - \c [out] amdgpu context handle
41 * \return 0 on success otherwise POSIX Error code
43 int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
44 amdgpu_context_handle *context)
46 struct amdgpu_bo_alloc_request alloc_buffer = {};
47 struct amdgpu_bo_alloc_result info = {};
48 struct amdgpu_context *gpu_context;
49 union drm_amdgpu_ctx args;
57 gpu_context = calloc(1, sizeof(struct amdgpu_context));
58 if (NULL == gpu_context)
61 gpu_context->dev = dev;
63 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
67 /* Create the fence BO */
68 alloc_buffer.alloc_size = 4 * 1024;
69 alloc_buffer.phys_alignment = 4 * 1024;
70 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
72 r = amdgpu_bo_alloc(dev, &alloc_buffer, &info);
74 goto error_fence_alloc;
75 gpu_context->fence_bo = info.buf_handle;
77 r = amdgpu_bo_cpu_map(gpu_context->fence_bo, &gpu_context->fence_cpu);
81 /* Create the context */
82 memset(&args, 0, sizeof(args));
83 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
84 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
88 gpu_context->id = args.out.alloc.ctx_id;
89 *context = (amdgpu_context_handle)gpu_context;
94 amdgpu_bo_cpu_unmap(gpu_context->fence_bo);
97 amdgpu_bo_free(gpu_context->fence_bo);
100 pthread_mutex_destroy(&gpu_context->sequence_mutex);
108 * Release command submission context
110 * \param dev - \c [in] amdgpu device handle
111 * \param context - \c [in] amdgpu context handle
113 * \return 0 on success otherwise POSIX Error code
115 int amdgpu_cs_ctx_free(amdgpu_context_handle context)
117 union drm_amdgpu_ctx args;
123 r = amdgpu_bo_cpu_unmap(context->fence_bo);
127 r = amdgpu_bo_free(context->fence_bo);
131 pthread_mutex_destroy(&context->sequence_mutex);
133 /* now deal with kernel side */
134 memset(&args, 0, sizeof(args));
135 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
136 args.in.ctx_id = context->id;
137 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
138 &args, sizeof(args));
145 int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
146 uint32_t *state, uint32_t *hangs)
148 union drm_amdgpu_ctx args;
154 memset(&args, 0, sizeof(args));
155 args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
156 args.in.ctx_id = context->id;
157 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
158 &args, sizeof(args));
160 *state = args.out.state.reset_status;
161 *hangs = args.out.state.hangs;
166 static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
168 return ip * AMDGPU_CS_MAX_RINGS + ring;
172 * Submit command to kernel DRM
173 * \param dev - \c [in] Device handle
174 * \param context - \c [in] GPU Context
175 * \param ibs_request - \c [in] Pointer to submission requests
176 * \param fence - \c [out] return fence for this submission
178 * \return 0 on success otherwise POSIX Error code
179 * \sa amdgpu_cs_submit()
181 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
182 struct amdgpu_cs_request *ibs_request,
185 union drm_amdgpu_cs cs;
186 uint64_t *chunk_array;
187 struct drm_amdgpu_cs_chunk *chunks;
188 struct drm_amdgpu_cs_chunk_data *chunk_data;
189 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
193 if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
195 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
197 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
200 size = ibs_request->number_of_ibs + 2;
202 chunk_array = alloca(sizeof(uint64_t) * size);
203 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
205 size = ibs_request->number_of_ibs + 1;
206 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
208 memset(&cs, 0, sizeof(cs));
209 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
210 cs.in.ctx_id = context->id;
211 if (ibs_request->resources)
212 cs.in.bo_list_handle = ibs_request->resources->handle;
213 cs.in.num_chunks = ibs_request->number_of_ibs;
215 for (i = 0; i < ibs_request->number_of_ibs; i++) {
216 struct amdgpu_cs_ib_info *ib;
217 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
218 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
219 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
220 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
222 ib = &ibs_request->ibs[i];
224 chunk_data[i].ib_data._pad = 0;
225 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
226 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
227 chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
228 chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
229 chunk_data[i].ib_data.ring = ibs_request->ring;
230 chunk_data[i].ib_data.flags = ib->flags;
233 pthread_mutex_lock(&context->sequence_mutex);
235 if (ibs_request->ip_type != AMDGPU_HW_IP_UVD &&
236 ibs_request->ip_type != AMDGPU_HW_IP_VCE) {
237 i = cs.in.num_chunks++;
240 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
241 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
242 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
243 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
245 /* fence bo handle */
246 chunk_data[i].fence_data.handle = context->fence_bo->handle;
248 chunk_data[i].fence_data.offset = amdgpu_cs_fence_index(
249 ibs_request->ip_type, ibs_request->ring);
250 chunk_data[i].fence_data.offset *= sizeof(uint64_t);
253 if (ibs_request->number_of_dependencies) {
254 dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
255 ibs_request->number_of_dependencies);
261 for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
262 struct amdgpu_cs_dep_info *info = &ibs_request->dependencies[i];
263 struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
264 dep->ip_type = info->ip_type;
265 dep->ip_instance = info->ip_instance;
266 dep->ring = info->ring;
267 dep->ctx_id = info->context->id;
268 dep->handle = info->fence;
271 i = cs.in.num_chunks++;
273 /* dependencies chunk */
274 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
275 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
276 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
277 * ibs_request->number_of_dependencies;
278 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
281 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
286 *fence = cs.out.handle;
289 pthread_mutex_unlock(&context->sequence_mutex);
294 int amdgpu_cs_submit(amdgpu_context_handle context,
296 struct amdgpu_cs_request *ibs_request,
297 uint32_t number_of_requests,
305 if (NULL == ibs_request)
311 for (i = 0; i < number_of_requests; i++) {
312 r = amdgpu_cs_submit_one(context, ibs_request, fences);
323 * Calculate absolute timeout.
325 * \param timeout - \c [in] timeout in nanoseconds.
327 * \return absolute timeout in nanoseconds
329 uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
333 if (timeout != AMDGPU_TIMEOUT_INFINITE) {
334 struct timespec current;
335 r = clock_gettime(CLOCK_MONOTONIC, ¤t);
339 timeout += ((uint64_t)current.tv_sec) * 1000000000ull;
340 timeout += current.tv_nsec;
345 static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
347 unsigned ip_instance,
354 amdgpu_device_handle dev = context->dev;
355 union drm_amdgpu_wait_cs args;
358 memset(&args, 0, sizeof(args));
359 args.in.handle = handle;
360 args.in.ip_type = ip;
361 args.in.ip_instance = ip_instance;
363 args.in.ctx_id = context->id;
365 if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
366 args.in.timeout = timeout_ns;
368 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
370 /* Handle errors manually here because of timeout */
371 r = ioctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
372 if (r == -1 && (errno == EINTR || errno == EAGAIN)) {
378 *busy = args.out.status;
382 int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
385 amdgpu_context_handle context;
386 uint64_t *expired_fence;
387 unsigned ip_type, ip_instance;
396 if (NULL == fence->context)
398 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
400 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
403 context = fence->context;
404 ip_type = fence->ip_type;
405 ip_instance = fence->ip_instance;
407 expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
410 pthread_mutex_lock(&context->sequence_mutex);
411 if (fence->fence <= *expired_fence) {
412 /* This fence value is expired already. */
413 pthread_mutex_unlock(&context->sequence_mutex);
418 /* Check the user fence only if the IP supports user fences. */
419 if (fence->ip_type != AMDGPU_HW_IP_UVD &&
420 fence->ip_type != AMDGPU_HW_IP_VCE) {
421 uint64_t *signaled_fence = context->fence_cpu;
422 signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
424 if (fence->fence <= *signaled_fence) {
425 /* This fence value is signaled already. */
426 *expired_fence = *signaled_fence;
427 pthread_mutex_unlock(&context->sequence_mutex);
432 /* Checking the user fence is enough. */
433 if (fence->timeout_ns == 0) {
434 pthread_mutex_unlock(&context->sequence_mutex);
439 pthread_mutex_unlock(&context->sequence_mutex);
441 r = amdgpu_ioctl_wait_cs(context, ip_type, ip_instance, ring,
442 fence->fence, fence->timeout_ns,
443 fence->flags, &busy);
446 pthread_mutex_lock(&context->sequence_mutex);
447 /* The thread doesn't hold sequence_mutex. Other thread could
448 update *expired_fence already. Check whether there is a
449 newerly expired fence. */
450 if (fence->fence > *expired_fence)
451 *expired_fence = fence->fence;
452 pthread_mutex_unlock(&context->sequence_mutex);