2 * Copyright © 2008 Jérôme Glisse
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
41 #include <sys/ioctl.h>
42 #include "radeon_cs.h"
43 #include "radeon_cs_int.h"
44 #include "radeon_bo_int.h"
45 #include "radeon_cs_gem.h"
46 #include "radeon_bo_gem.h"
50 #include "xf86atomic.h"
51 #include "radeon_drm.h"
56 struct radeon_cs_manager_gem {
57 struct radeon_cs_manager base;
66 uint32_t write_domain;
71 #define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
74 struct radeon_cs_int base;
75 struct drm_radeon_cs cs;
76 struct drm_radeon_cs_chunk chunks[2];
79 struct radeon_bo_int **relocs_bo;
82 static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER;
83 static uint32_t cs_id_source = 0;
86 * result is undefined if called with ~0
88 static uint32_t get_first_zero(const uint32_t n)
90 /* __builtin_ctz returns number of trailing zeros. */
91 return 1 << __builtin_ctz(~n);
95 * Returns a free id for cs.
96 * If there is no free id we return zero
98 static uint32_t generate_id(void)
101 pthread_mutex_lock( &id_mutex );
102 /* check for free ids */
103 if (cs_id_source != ~r) {
104 /* find first zero bit */
105 r = get_first_zero(cs_id_source);
107 /* set id as reserved */
110 pthread_mutex_unlock( &id_mutex );
115 * Free the id for later reuse
117 static void free_id(uint32_t id)
119 pthread_mutex_lock( &id_mutex );
123 pthread_mutex_unlock( &id_mutex );
126 static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm,
131 /* max cmd buffer size is 64Kb */
132 if (ndw > (64 * 1024 / 4)) {
135 csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
140 csg->base.ndw = 64 * 1024 / 4;
141 csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
142 if (csg->base.packets == NULL) {
146 csg->base.relocs_total_size = 0;
147 csg->base.crelocs = 0;
148 csg->base.id = generate_id();
149 csg->nrelocs = 4096 / (4 * 4) ;
150 csg->relocs_bo = (struct radeon_bo_int**)calloc(1,
151 csg->nrelocs*sizeof(void*));
152 if (csg->relocs_bo == NULL) {
153 free(csg->base.packets);
157 csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
158 if (csg->relocs == NULL) {
159 free(csg->relocs_bo);
160 free(csg->base.packets);
164 csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
165 csg->chunks[0].length_dw = 0;
166 csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
167 csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
168 csg->chunks[1].length_dw = 0;
169 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
170 return (struct radeon_cs_int*)csg;
173 static int cs_gem_write_reloc(struct radeon_cs_int *cs,
174 struct radeon_bo *bo,
175 uint32_t read_domain,
176 uint32_t write_domain,
179 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
180 struct cs_gem *csg = (struct cs_gem*)cs;
181 struct cs_reloc_gem *reloc;
185 assert(boi->space_accounted);
188 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
189 /* in one CS a bo can only be in read or write domain but not
190 * in read & write domain at the same sime
194 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
197 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
200 /* use bit field hash function to determine
201 if this bo is for sure not in this cs.*/
202 if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
203 /* check if bo is already referenced.
204 * Scanning from end to begin reduces cycles with mesa because
205 * it often relocates same shared dma bo again. */
206 for(i = cs->crelocs; i != 0;) {
208 idx = i * RELOC_SIZE;
209 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
210 if (reloc->handle == bo->handle) {
211 /* Check domains must be in read or write. As we check already
212 * checked that in argument one of the read or write domain was
213 * set we only need to check that if previous reloc as the read
214 * domain set then the read_domain should also be set for this
217 /* the DDX expects to read and write from same pixmap */
218 if (write_domain && (reloc->read_domain & write_domain)) {
219 reloc->read_domain = 0;
220 reloc->write_domain = write_domain;
221 } else if (read_domain & reloc->write_domain) {
222 reloc->read_domain = 0;
224 if (write_domain != reloc->write_domain)
226 if (read_domain != reloc->read_domain)
230 reloc->read_domain |= read_domain;
231 reloc->write_domain |= write_domain;
233 reloc->flags |= (flags & reloc->flags);
234 /* write relocation packet */
235 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
236 radeon_cs_write_dword((struct radeon_cs *)cs, idx);
242 if (csg->base.crelocs >= csg->nrelocs) {
243 /* allocate more memory (TODO: should use a slab allocatore maybe) */
245 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
246 tmp = (uint32_t*)realloc(csg->relocs_bo, size);
250 csg->relocs_bo = (struct radeon_bo_int **)tmp;
251 size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
252 tmp = (uint32_t*)realloc(csg->relocs, size);
256 cs->relocs = csg->relocs = tmp;
258 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
260 csg->relocs_bo[csg->base.crelocs] = boi;
261 idx = (csg->base.crelocs++) * RELOC_SIZE;
262 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
263 reloc->handle = bo->handle;
264 reloc->read_domain = read_domain;
265 reloc->write_domain = write_domain;
266 reloc->flags = flags;
267 csg->chunks[1].length_dw += RELOC_SIZE;
269 /* bo might be referenced from another context so have to use atomic opertions */
270 atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
271 cs->relocs_total_size += boi->size;
272 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
273 radeon_cs_write_dword((struct radeon_cs *)cs, idx);
277 static int cs_gem_begin(struct radeon_cs_int *cs,
284 if (cs->section_ndw) {
285 fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
286 cs->section_file, cs->section_func, cs->section_line);
287 fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
291 cs->section_ndw = ndw;
293 cs->section_file = file;
294 cs->section_func = func;
295 cs->section_line = line;
297 if (cs->cdw + ndw > cs->ndw) {
300 /* round up the required size to a multiple of 1024 */
301 tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
302 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
312 static int cs_gem_end(struct radeon_cs_int *cs,
318 if (!cs->section_ndw) {
319 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
323 if (cs->section_ndw != cs->section_cdw) {
324 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
325 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
326 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
329 /* We must reset the section even when there is error. */
338 static void cs_gem_dump_bof(struct radeon_cs_int *cs)
340 struct cs_gem *csg = (struct cs_gem*)cs;
341 struct radeon_cs_manager_gem *csm;
342 bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
346 csm = (struct radeon_cs_manager_gem *)cs->csm;
347 root = device_id = bcs = blob = array = bo = size = handle = NULL;
351 device_id = bof_int32(csm->device_id);
352 if (device_id == NULL)
354 if (bof_object_set(root, "device_id", device_id))
356 bof_decref(device_id);
359 blob = bof_blob(csg->nrelocs * 16, csg->relocs);
362 if (bof_object_set(root, "reloc", blob))
367 blob = bof_blob(cs->cdw * 4, cs->packets);
370 if (bof_object_set(root, "pm4", blob))
378 for (i = 0; i < csg->base.crelocs; i++) {
382 size = bof_int32(csg->relocs_bo[i]->size);
385 if (bof_object_set(bo, "size", size))
389 handle = bof_int32(csg->relocs_bo[i]->handle);
392 if (bof_object_set(bo, "handle", handle))
396 radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0);
397 blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr);
398 radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]);
401 if (bof_object_set(bo, "data", blob))
405 if (bof_array_append(array, bo))
410 if (bof_object_set(root, "bo", array))
412 sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++);
413 bof_dump_file(root, tmp);
420 bof_decref(device_id);
425 static int cs_gem_emit(struct radeon_cs_int *cs)
427 struct cs_gem *csg = (struct cs_gem*)cs;
428 uint64_t chunk_array[2];
433 radeon_cs_write_dword((struct radeon_cs *)cs, 0x80000000);
438 csg->chunks[0].length_dw = cs->cdw;
440 chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
441 chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
443 csg->cs.num_chunks = 2;
444 csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
446 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
447 &csg->cs, sizeof(struct drm_radeon_cs));
448 for (i = 0; i < csg->base.crelocs; i++) {
449 csg->relocs_bo[i]->space_accounted = 0;
450 /* bo might be referenced from another context so have to use atomic opertions */
451 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
452 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
453 csg->relocs_bo[i] = NULL;
456 cs->csm->read_used = 0;
457 cs->csm->vram_write_used = 0;
458 cs->csm->gart_write_used = 0;
462 static int cs_gem_destroy(struct radeon_cs_int *cs)
464 struct cs_gem *csg = (struct cs_gem*)cs;
467 free(csg->relocs_bo);
474 static int cs_gem_erase(struct radeon_cs_int *cs)
476 struct cs_gem *csg = (struct cs_gem*)cs;
479 if (csg->relocs_bo) {
480 for (i = 0; i < csg->base.crelocs; i++) {
481 if (csg->relocs_bo[i]) {
482 /* bo might be referenced from another context so have to use atomic opertions */
483 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
484 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
485 csg->relocs_bo[i] = NULL;
489 cs->relocs_total_size = 0;
493 csg->chunks[0].length_dw = 0;
494 csg->chunks[1].length_dw = 0;
498 static int cs_gem_need_flush(struct radeon_cs_int *cs)
500 return 0; //(cs->relocs_total_size > (32*1024*1024));
503 static void cs_gem_print(struct radeon_cs_int *cs, FILE *file)
505 struct radeon_cs_manager_gem *csm;
508 csm = (struct radeon_cs_manager_gem *)cs->csm;
509 fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id);
510 for (i = 0; i < cs->cdw; i++) {
511 fprintf(file, "0x%08X\n", cs->packets[i]);
515 static struct radeon_cs_funcs radeon_cs_gem_funcs = {
527 static int radeon_get_device_id(int fd, uint32_t *device_id)
529 struct drm_radeon_info info = {};
533 info.request = RADEON_INFO_DEVICE_ID;
534 info.value = (uintptr_t)device_id;
535 r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
536 sizeof(struct drm_radeon_info));
540 drm_public struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
542 struct radeon_cs_manager_gem *csm;
544 csm = calloc(1, sizeof(struct radeon_cs_manager_gem));
548 csm->base.funcs = &radeon_cs_gem_funcs;
550 radeon_get_device_id(fd, &csm->device_id);
554 drm_public void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)