2 * © Copyright 2019 Collabora, Ltd.
3 * Copyright 2019 Alyssa Rosenzweig
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include "drm-uapi/panfrost_drm.h"
31 #include "util/u_memory.h"
32 #include "util/os_time.h"
33 #include "os/os_mman.h"
35 #include "pan_screen.h"
36 #include "pan_resource.h"
37 #include "pan_context.h"
39 #include "pandecode/decode.h"
42 panfrost_drm_mmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
44 struct drm_panfrost_mmap_bo mmap_bo = { .handle = bo->gem_handle };
50 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
52 fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %d\n", ret);
56 bo->cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
57 screen->fd, mmap_bo.offset);
58 if (bo->cpu == MAP_FAILED) {
59 fprintf(stderr, "mmap failed: %p\n", bo->cpu);
63 /* Record the mmap if we're tracing */
64 if (pan_debug & PAN_DBG_TRACE)
65 pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
69 panfrost_drm_munmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
74 if (os_munmap((void *) (uintptr_t)bo->cpu, bo->size)) {
83 panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
86 struct panfrost_bo *bo;
88 /* Kernel will fail (confusingly) with EPERM otherwise */
91 unsigned translated_flags = 0;
93 /* TODO: translate flags to kernel flags, if the kernel supports */
95 struct drm_panfrost_create_bo create_bo = {
97 .flags = translated_flags,
100 /* Before creating a BO, we first want to check the cache */
102 bo = panfrost_bo_cache_fetch(screen, size, flags);
105 /* Otherwise, the cache misses and we need to allocate a BO fresh from
110 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
112 fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %d\n", ret);
116 /* We have a BO allocated from the kernel; fill in the userspace
119 bo = rzalloc(screen, struct panfrost_bo);
120 bo->size = create_bo.size;
121 bo->gpu = create_bo.offset;
122 bo->gem_handle = create_bo.handle;
125 /* Only mmap now if we know we need to. For CPU-invisible buffers, we
126 * never map since we don't care about their contents; they're purely
127 * for GPU-internal use. */
129 if (!(flags & (PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_DELAY_MMAP)))
130 panfrost_drm_mmap_bo(screen, bo);
132 pipe_reference_init(&bo->reference, 1);
137 panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
139 struct drm_gem_close gem_close = { .handle = bo->gem_handle };
145 panfrost_drm_munmap_bo(screen, bo);
147 ret = drmIoctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
149 fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %d\n", ret);
157 panfrost_drm_allocate_slab(struct panfrost_screen *screen,
158 struct panfrost_memory *mem,
165 // TODO cache allocations
166 // TODO properly handle errors
167 // TODO take into account extra_flags
168 mem->bo = panfrost_drm_create_bo(screen, pages * 4096, extra_flags);
169 mem->stack_bottom = 0;
173 panfrost_drm_free_slab(struct panfrost_screen *screen, struct panfrost_memory *mem)
175 panfrost_bo_unreference(&screen->base, mem->bo);
180 panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
182 struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
183 struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
184 MAYBE_UNUSED int ret;
187 ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
190 get_bo_offset.handle = gem_handle;
191 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
194 bo->gem_handle = gem_handle;
195 bo->gpu = (mali_ptr) get_bo_offset.offset;
196 bo->size = lseek(fd, 0, SEEK_END);
197 assert(bo->size > 0);
198 pipe_reference_init(&bo->reference, 1);
200 // TODO map and unmap on demand?
201 panfrost_drm_mmap_bo(screen, bo);
206 panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo)
208 struct drm_prime_handle args = {
209 .handle = bo->gem_handle,
210 .flags = DRM_CLOEXEC,
213 int ret = drmIoctl(screen->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
221 panfrost_drm_submit_job(struct panfrost_context *ctx, u64 job_desc, int reqs)
223 struct pipe_context *gallium = (struct pipe_context *) ctx;
224 struct panfrost_screen *screen = pan_screen(gallium->screen);
225 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
226 struct drm_panfrost_submit submit = {0,};
227 int *bo_handles, ret;
229 submit.in_syncs = (u64) (uintptr_t) &ctx->out_sync;
230 submit.in_sync_count = 1;
232 submit.out_sync = ctx->out_sync;
234 submit.jc = job_desc;
235 submit.requirements = reqs;
237 bo_handles = calloc(job->bos->entries, sizeof(*bo_handles));
240 set_foreach(job->bos, entry) {
241 struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
242 assert(bo->gem_handle > 0);
243 bo_handles[submit.bo_handle_count++] = bo->gem_handle;
246 submit.bo_handles = (u64) (uintptr_t) bo_handles;
247 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
250 fprintf(stderr, "Error submitting: %m\n");
254 /* Trace the job if we're doing that */
255 if (pan_debug & PAN_DBG_TRACE) {
256 /* Wait so we can get errors reported back */
257 drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
258 pandecode_jc(submit.jc, FALSE);
265 panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws, bool is_scanout)
269 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
271 /* TODO: Add here the transient pools */
272 panfrost_job_add_bo(job, ctx->shaders.bo);
273 panfrost_job_add_bo(job, ctx->scratchpad.bo);
274 panfrost_job_add_bo(job, ctx->tiler_heap.bo);
275 panfrost_job_add_bo(job, ctx->varying_mem.bo);
276 panfrost_job_add_bo(job, ctx->tiler_polygon_list.bo);
278 if (job->first_job.gpu) {
279 ret = panfrost_drm_submit_job(ctx, job->first_job.gpu, 0);
283 if (job->first_tiler.gpu || job->clear) {
284 struct pipe_surface *surf = ctx->pipe_framebuffer.cbufs[0];
286 struct panfrost_resource *res = pan_resource(surf->texture);
288 panfrost_job_add_bo(job, res->bo);
290 ret = panfrost_drm_submit_job(ctx, panfrost_fragment_job(ctx, has_draws), PANFROST_JD_REQ_FS);
297 static struct panfrost_fence *
298 panfrost_fence_create(struct panfrost_context *ctx)
300 struct pipe_context *gallium = (struct pipe_context *) ctx;
301 struct panfrost_screen *screen = pan_screen(gallium->screen);
302 struct panfrost_fence *f = calloc(1, sizeof(*f));
306 /* Snapshot the last Panfrost's rendering's out fence. We'd rather have
307 * another syncobj instead of a sync file, but this is all we get.
308 * (HandleToFD/FDToHandle just gives you another syncobj ID for the
311 drmSyncobjExportSyncFile(screen->fd, ctx->out_sync, &f->fd);
313 fprintf(stderr, "export failed\n");
318 pipe_reference_init(&f->reference, 1);
324 panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
325 struct pipe_fence_handle **fence)
327 struct pipe_context *gallium = (struct pipe_context *) ctx;
328 struct panfrost_screen *screen = pan_screen(gallium->screen);
330 if (!screen->last_fragment_flushed) {
331 drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
332 screen->last_fragment_flushed = true;
334 /* The job finished up, so we're safe to clean it up now */
335 panfrost_free_job(ctx, screen->last_job);
339 struct panfrost_fence *f = panfrost_fence_create(ctx);
340 gallium->screen->fence_reference(gallium->screen, fence, NULL);
341 *fence = (struct pipe_fence_handle *)f;
346 panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
348 struct drm_panfrost_get_param get_param = {0,};
349 MAYBE_UNUSED int ret;
351 get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
352 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
355 return get_param.value;
359 panfrost_drm_init_context(struct panfrost_context *ctx)
361 struct pipe_context *gallium = (struct pipe_context *) ctx;
362 struct panfrost_screen *screen = pan_screen(gallium->screen);
364 return drmSyncobjCreate(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
369 panfrost_drm_fence_reference(struct pipe_screen *screen,
370 struct pipe_fence_handle **ptr,
371 struct pipe_fence_handle *fence)
373 struct panfrost_fence **p = (struct panfrost_fence **)ptr;
374 struct panfrost_fence *f = (struct panfrost_fence *)fence;
375 struct panfrost_fence *old = *p;
377 if (pipe_reference(&(*p)->reference, &f->reference)) {
385 panfrost_drm_fence_finish(struct pipe_screen *pscreen,
386 struct pipe_context *ctx,
387 struct pipe_fence_handle *fence,
390 struct panfrost_screen *screen = pan_screen(pscreen);
391 struct panfrost_fence *f = (struct panfrost_fence *)fence;
395 ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
397 fprintf(stderr, "Failed to create syncobj to wait on: %m\n");
401 drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
403 fprintf(stderr, "Failed to import fence to syncobj: %m\n");
407 uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
408 if (abs_timeout == OS_TIMEOUT_INFINITE)
409 abs_timeout = INT64_MAX;
411 ret = drmSyncobjWait(screen->fd, &syncobj, 1, abs_timeout, 0, NULL);
413 drmSyncobjDestroy(screen->fd, syncobj);