2 * Copyright © 2008 Dave Airlie
3 * Copyright © 2008 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
30 * Jérôme Glisse <glisse@freedesktop.org>
42 #include "xf86atomic.h"
44 #include "radeon_drm.h"
45 #include "radeon_bo.h"
46 #include "radeon_bo_int.h"
47 #include "radeon_bo_gem.h"
49 struct radeon_bo_gem {
50 struct radeon_bo_int base;
57 struct bo_manager_gem {
58 struct radeon_bo_manager base;
61 static int bo_wait(struct radeon_bo_int *boi);
63 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
70 struct radeon_bo_gem *bo;
73 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
81 bo->base.alignment = alignment;
82 bo->base.domains = domains;
83 bo->base.flags = flags;
85 atomic_set(&bo->reloc_in_cs, 0);
88 struct drm_gem_open open_arg;
90 memset(&open_arg, 0, sizeof(open_arg));
91 open_arg.name = handle;
92 r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
97 bo->base.handle = open_arg.handle;
98 bo->base.size = open_arg.size;
101 struct drm_radeon_gem_create args;
104 args.alignment = alignment;
105 args.initial_domain = bo->base.domains;
108 r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
109 &args, sizeof(args));
110 bo->base.handle = args.handle;
112 fprintf(stderr, "Failed to allocate :\n");
113 fprintf(stderr, " size : %d bytes\n", size);
114 fprintf(stderr, " alignment : %d bytes\n", alignment);
115 fprintf(stderr, " domains : %d\n", bo->base.domains);
120 radeon_bo_ref((struct radeon_bo*)bo);
121 return (struct radeon_bo*)bo;
124 static void bo_ref(struct radeon_bo_int *boi)
128 static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
130 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
131 struct drm_gem_close args;
134 return (struct radeon_bo *)boi;
136 if (bo_gem->priv_ptr) {
137 munmap(bo_gem->priv_ptr, boi->size);
140 /* Zero out args to make valgrind happy */
141 memset(&args, 0, sizeof(args));
144 args.handle = boi->handle;
145 drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
146 memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
151 static int bo_map(struct radeon_bo_int *boi, int write)
153 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
154 struct drm_radeon_gem_mmap args;
158 if (bo_gem->map_count++ != 0) {
161 if (bo_gem->priv_ptr) {
167 /* Zero out args to make valgrind happy */
168 memset(&args, 0, sizeof(args));
169 args.handle = boi->handle;
171 args.size = (uint64_t)boi->size;
172 r = drmCommandWriteRead(boi->bom->fd,
177 fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
178 boi, boi->handle, r);
181 ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
182 if (ptr == MAP_FAILED)
184 bo_gem->priv_ptr = ptr;
186 boi->ptr = bo_gem->priv_ptr;
193 static int bo_unmap(struct radeon_bo_int *boi)
195 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
197 if (--bo_gem->map_count > 0) {
200 //munmap(bo->ptr, bo->size);
205 static int bo_wait(struct radeon_bo_int *boi)
207 struct drm_radeon_gem_wait_idle args;
210 /* Zero out args to make valgrind happy */
211 memset(&args, 0, sizeof(args));
212 args.handle = boi->handle;
214 ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_WAIT_IDLE,
215 &args, sizeof(args));
216 } while (ret == -EBUSY);
220 static int bo_is_busy(struct radeon_bo_int *boi, uint32_t *domain)
222 struct drm_radeon_gem_busy args;
225 args.handle = boi->handle;
228 ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_BUSY,
229 &args, sizeof(args));
231 *domain = args.domain;
235 static int bo_set_tiling(struct radeon_bo_int *boi, uint32_t tiling_flags,
238 struct drm_radeon_gem_set_tiling args;
241 args.handle = boi->handle;
242 args.tiling_flags = tiling_flags;
245 r = drmCommandWriteRead(boi->bom->fd,
246 DRM_RADEON_GEM_SET_TILING,
252 static int bo_get_tiling(struct radeon_bo_int *boi, uint32_t *tiling_flags,
255 struct drm_radeon_gem_set_tiling args = {};
258 args.handle = boi->handle;
260 r = drmCommandWriteRead(boi->bom->fd,
261 DRM_RADEON_GEM_GET_TILING,
268 *tiling_flags = args.tiling_flags;
273 static struct radeon_bo_funcs bo_gem_funcs = {
286 struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
288 struct bo_manager_gem *bomg;
290 bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
294 bomg->base.funcs = &bo_gem_funcs;
296 return (struct radeon_bo_manager*)bomg;
299 void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
301 struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
309 uint32_t radeon_gem_name_bo(struct radeon_bo *bo)
311 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
315 void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
317 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
318 return &bo_gem->reloc_in_cs;
321 int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
323 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
324 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
325 struct drm_gem_flink flink;
329 *name = bo_gem->name;
332 flink.handle = bo->handle;
333 r = drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_FLINK, &flink);
337 bo_gem->name = flink.name;
342 int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
344 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
345 struct drm_radeon_gem_set_domain args;
348 args.handle = bo->handle;
349 args.read_domains = read_domains;
350 args.write_domain = write_domain;
352 r = drmCommandWriteRead(boi->bom->fd,
353 DRM_RADEON_GEM_SET_DOMAIN,
359 int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
361 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
364 ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
368 struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
372 struct radeon_bo_gem *bo;
376 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
383 bo->base.size = size;
384 bo->base.alignment = 0;
385 bo->base.domains = RADEON_GEM_DOMAIN_GTT;
388 atomic_set(&bo->reloc_in_cs, 0);
391 r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
397 bo->base.handle = handle;
400 radeon_bo_ref((struct radeon_bo *)bo);
401 return (struct radeon_bo *)bo;