2 * Copyright © 2008 Dave Airlie
3 * Copyright © 2008 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
30 * Jérôme Glisse <glisse@freedesktop.org>
43 #include "xf86atomic.h"
45 #include "radeon_drm.h"
46 #include "radeon_bo.h"
47 #include "radeon_bo_int.h"
48 #include "radeon_bo_gem.h"
50 struct radeon_bo_gem {
51 struct radeon_bo_int base;
58 struct bo_manager_gem {
59 struct radeon_bo_manager base;
62 static int bo_wait(struct radeon_bo_int *boi);
64 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
71 struct radeon_bo_gem *bo;
74 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
82 bo->base.alignment = alignment;
83 bo->base.domains = domains;
84 bo->base.flags = flags;
86 atomic_set(&bo->reloc_in_cs, 0);
89 struct drm_gem_open open_arg;
91 memset(&open_arg, 0, sizeof(open_arg));
92 open_arg.name = handle;
93 r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
98 bo->base.handle = open_arg.handle;
99 bo->base.size = open_arg.size;
102 struct drm_radeon_gem_create args;
105 args.alignment = alignment;
106 args.initial_domain = bo->base.domains;
109 r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
110 &args, sizeof(args));
111 bo->base.handle = args.handle;
113 fprintf(stderr, "Failed to allocate :\n");
114 fprintf(stderr, " size : %d bytes\n", size);
115 fprintf(stderr, " alignment : %d bytes\n", alignment);
116 fprintf(stderr, " domains : %d\n", bo->base.domains);
121 radeon_bo_ref((struct radeon_bo*)bo);
122 return (struct radeon_bo*)bo;
125 static void bo_ref(struct radeon_bo_int *boi)
129 static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
131 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
132 struct drm_gem_close args;
135 return (struct radeon_bo *)boi;
137 if (bo_gem->priv_ptr) {
138 munmap(bo_gem->priv_ptr, boi->size);
141 /* Zero out args to make valgrind happy */
142 memset(&args, 0, sizeof(args));
145 args.handle = boi->handle;
146 drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
147 memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
152 static int bo_map(struct radeon_bo_int *boi, int write)
154 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
155 struct drm_radeon_gem_mmap args;
159 if (bo_gem->map_count++ != 0) {
162 if (bo_gem->priv_ptr) {
168 /* Zero out args to make valgrind happy */
169 memset(&args, 0, sizeof(args));
170 args.handle = boi->handle;
172 args.size = (uint64_t)boi->size;
173 r = drmCommandWriteRead(boi->bom->fd,
178 fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
179 boi, boi->handle, r);
182 ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
183 if (ptr == MAP_FAILED)
185 bo_gem->priv_ptr = ptr;
187 boi->ptr = bo_gem->priv_ptr;
194 static int bo_unmap(struct radeon_bo_int *boi)
196 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
198 if (--bo_gem->map_count > 0) {
201 //munmap(bo->ptr, bo->size);
206 static int bo_wait(struct radeon_bo_int *boi)
208 struct drm_radeon_gem_wait_idle args;
211 /* Zero out args to make valgrind happy */
212 memset(&args, 0, sizeof(args));
213 args.handle = boi->handle;
215 ret = drmCommandWrite(boi->bom->fd, DRM_RADEON_GEM_WAIT_IDLE,
216 &args, sizeof(args));
217 } while (ret == -EBUSY);
221 static int bo_is_busy(struct radeon_bo_int *boi, uint32_t *domain)
223 struct drm_radeon_gem_busy args;
226 args.handle = boi->handle;
229 ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_BUSY,
230 &args, sizeof(args));
232 *domain = args.domain;
236 static int bo_set_tiling(struct radeon_bo_int *boi, uint32_t tiling_flags,
239 struct drm_radeon_gem_set_tiling args;
242 args.handle = boi->handle;
243 args.tiling_flags = tiling_flags;
246 r = drmCommandWriteRead(boi->bom->fd,
247 DRM_RADEON_GEM_SET_TILING,
253 static int bo_get_tiling(struct radeon_bo_int *boi, uint32_t *tiling_flags,
256 struct drm_radeon_gem_set_tiling args = {};
259 args.handle = boi->handle;
261 r = drmCommandWriteRead(boi->bom->fd,
262 DRM_RADEON_GEM_GET_TILING,
269 *tiling_flags = args.tiling_flags;
274 static struct radeon_bo_funcs bo_gem_funcs = {
287 drm_public struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
289 struct bo_manager_gem *bomg;
291 bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
295 bomg->base.funcs = &bo_gem_funcs;
297 return (struct radeon_bo_manager*)bomg;
300 drm_public void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
302 struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
311 radeon_gem_name_bo(struct radeon_bo *bo)
313 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
318 radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
320 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
321 return &bo_gem->reloc_in_cs;
325 radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
327 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
328 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
329 struct drm_gem_flink flink;
333 *name = bo_gem->name;
336 flink.handle = bo->handle;
337 r = drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_FLINK, &flink);
341 bo_gem->name = flink.name;
347 radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
349 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
350 struct drm_radeon_gem_set_domain args;
353 args.handle = bo->handle;
354 args.read_domains = read_domains;
355 args.write_domain = write_domain;
357 r = drmCommandWriteRead(boi->bom->fd,
358 DRM_RADEON_GEM_SET_DOMAIN,
364 drm_public int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
366 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
369 ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
373 drm_public struct radeon_bo *
374 radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
376 struct radeon_bo_gem *bo;
380 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
387 bo->base.size = size;
388 bo->base.alignment = 0;
389 bo->base.domains = RADEON_GEM_DOMAIN_GTT;
392 atomic_set(&bo->reloc_in_cs, 0);
395 r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
401 bo->base.handle = handle;
404 radeon_bo_ref((struct radeon_bo *)bo);
405 return (struct radeon_bo *)bo;