2 * Copyright © 2008 Jérôme Glisse
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
36 #include <sys/ioctl.h>
37 #include "radeon_cs.h"
38 #include "radeon_cs_gem.h"
39 #include "radeon_bo_gem.h"
42 #include "radeon_drm.h"
48 uint32_t write_domain;
53 #define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
56 struct radeon_cs base;
57 struct drm_radeon_cs cs;
58 struct drm_radeon_cs_chunk chunks[2];
61 struct radeon_bo **relocs_bo;
64 static struct radeon_cs *cs_gem_create(struct radeon_cs_manager *csm,
69 /* max cmd buffer size is 64Kb */
70 if (ndw > (64 * 1024 / 4)) {
73 csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
78 csg->base.ndw = 64 * 1024 / 4;
79 csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
80 if (csg->base.packets == NULL) {
84 csg->base.relocs_total_size = 0;
85 csg->base.crelocs = 0;
86 csg->nrelocs = 4096 / (4 * 4) ;
87 csg->relocs_bo = (struct radeon_bo**)calloc(1,
88 csg->nrelocs*sizeof(void*));
89 if (csg->relocs_bo == NULL) {
90 free(csg->base.packets);
94 csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
95 if (csg->relocs == NULL) {
97 free(csg->base.packets);
101 csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
102 csg->chunks[0].length_dw = 0;
103 csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
104 csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
105 csg->chunks[1].length_dw = 0;
106 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
107 return (struct radeon_cs*)csg;
110 static int cs_gem_write_reloc(struct radeon_cs *cs,
111 struct radeon_bo *bo,
112 uint32_t read_domain,
113 uint32_t write_domain,
116 struct cs_gem *csg = (struct cs_gem*)cs;
117 struct cs_reloc_gem *reloc;
121 assert(bo->space_accounted);
124 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
125 /* in one CS a bo can only be in read or write domain but not
126 * in read & write domain at the same sime
130 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
133 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
136 /* check if bo is already referenced */
137 for(i = 0; i < cs->crelocs; i++) {
138 idx = i * RELOC_SIZE;
139 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
140 if (reloc->handle == bo->handle) {
141 /* Check domains must be in read or write. As we check already
142 * checked that in argument one of the read or write domain was
143 * set we only need to check that if previous reloc as the read
144 * domain set then the read_domain should also be set for this
147 /* the DDX expects to read and write from same pixmap */
148 if (write_domain && (reloc->read_domain & write_domain)) {
149 reloc->read_domain = 0;
150 reloc->write_domain = write_domain;
151 } else if (read_domain & reloc->write_domain) {
152 reloc->read_domain = 0;
154 if (write_domain != reloc->write_domain)
156 if (read_domain != reloc->read_domain)
160 reloc->read_domain |= read_domain;
161 reloc->write_domain |= write_domain;
163 reloc->flags |= (flags & reloc->flags);
164 /* write relocation packet */
165 radeon_cs_write_dword(cs, 0xc0001000);
166 radeon_cs_write_dword(cs, idx);
171 if (csg->base.crelocs >= csg->nrelocs) {
172 /* allocate more memory (TODO: should use a slab allocatore maybe) */
174 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
175 tmp = (uint32_t*)realloc(csg->relocs_bo, size);
179 csg->relocs_bo = (struct radeon_bo**)tmp;
180 size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
181 tmp = (uint32_t*)realloc(csg->relocs, size);
185 cs->relocs = csg->relocs = tmp;
187 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
189 csg->relocs_bo[csg->base.crelocs] = bo;
190 idx = (csg->base.crelocs++) * RELOC_SIZE;
191 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
192 reloc->handle = bo->handle;
193 reloc->read_domain = read_domain;
194 reloc->write_domain = write_domain;
195 reloc->flags = flags;
196 csg->chunks[1].length_dw += RELOC_SIZE;
198 cs->relocs_total_size += bo->size;
199 radeon_cs_write_dword(cs, 0xc0001000);
200 radeon_cs_write_dword(cs, idx);
204 static int cs_gem_begin(struct radeon_cs *cs,
212 fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
213 cs->section_file, cs->section_func, cs->section_line);
214 fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
219 cs->section_ndw = ndw;
221 cs->section_file = file;
222 cs->section_func = func;
223 cs->section_line = line;
226 if (cs->cdw + ndw > cs->ndw) {
229 /* round up the required size to a multiple of 1024 */
230 tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
231 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
242 static int cs_gem_end(struct radeon_cs *cs,
249 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
254 if (cs->section_ndw != cs->section_cdw) {
255 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
256 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
257 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
264 static int cs_gem_emit(struct radeon_cs *cs)
266 struct cs_gem *csg = (struct cs_gem*)cs;
267 uint64_t chunk_array[2];
271 csg->chunks[0].length_dw = cs->cdw;
273 chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
274 chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
276 csg->cs.num_chunks = 2;
277 csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
279 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
280 &csg->cs, sizeof(struct drm_radeon_cs));
281 for (i = 0; i < csg->base.crelocs; i++) {
282 csg->relocs_bo[i]->space_accounted = 0;
283 radeon_bo_unref(csg->relocs_bo[i]);
284 csg->relocs_bo[i] = NULL;
287 cs->csm->read_used = 0;
288 cs->csm->vram_write_used = 0;
289 cs->csm->gart_write_used = 0;
293 static int cs_gem_destroy(struct radeon_cs *cs)
295 struct cs_gem *csg = (struct cs_gem*)cs;
297 free(csg->relocs_bo);
304 static int cs_gem_erase(struct radeon_cs *cs)
306 struct cs_gem *csg = (struct cs_gem*)cs;
309 if (csg->relocs_bo) {
310 for (i = 0; i < csg->base.crelocs; i++) {
311 if (csg->relocs_bo[i]) {
312 radeon_bo_unref(csg->relocs_bo[i]);
313 csg->relocs_bo[i] = NULL;
317 cs->relocs_total_size = 0;
321 csg->chunks[0].length_dw = 0;
322 csg->chunks[1].length_dw = 0;
326 static int cs_gem_need_flush(struct radeon_cs *cs)
328 return 0; //(cs->relocs_total_size > (32*1024*1024));
331 #define PACKET_TYPE0 0
332 #define PACKET_TYPE1 1
333 #define PACKET_TYPE2 2
334 #define PACKET_TYPE3 3
336 #define PACKET3_NOP 0x10
337 #define PACKET3_SET_SCISSORS 0x1E
338 #define PACKET3_3D_DRAW_VBUF 0x28
339 #define PACKET3_3D_DRAW_IMMD 0x29
340 #define PACKET3_3D_DRAW_INDX 0x2A
341 #define PACKET3_3D_LOAD_VBPNTR 0x2F
342 #define PACKET3_INDX_BUFFER 0x33
343 #define PACKET3_3D_DRAW_VBUF_2 0x34
344 #define PACKET3_3D_DRAW_IMMD_2 0x35
345 #define PACKET3_3D_DRAW_INDX_2 0x36
347 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
348 #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
349 #define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
350 #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
351 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
353 static void cs_gem_print(struct radeon_cs *cs, FILE *file)
360 for (i = 0; i < cs->cdw;) {
361 cnt = CP_PACKET_GET_COUNT(cs->packets[i]) + 1;
362 switch (CP_PACKET_GET_TYPE(cs->packets[i])) {
364 fprintf(file, "Pkt0 at %d (%d dwords):\n", i, cnt);
365 reg = CP_PACKET0_GET_REG(cs->packets[i]);
366 if (CP_PACKET0_GET_ONE_REG_WR(cs->packets[i++])) {
367 for (j = 0; j < cnt; j++) {
368 fprintf(file, " 0x%08X -> 0x%04X\n",
369 cs->packets[i++], reg);
372 for (j = 0; j < cnt; j++) {
373 fprintf(file, " 0x%08X -> 0x%04X\n",
374 cs->packets[i++], reg);
380 fprintf(file, "Pkt3 at %d :\n", i);
381 opcode = CP_PACKET3_GET_OPCODE(cs->packets[i++]);
384 fprintf(file, " PACKET3_NOP:\n");
386 case PACKET3_3D_DRAW_VBUF:
387 fprintf(file, " PACKET3_3D_DRAW_VBUF:\n");
389 case PACKET3_3D_DRAW_IMMD:
390 fprintf(file, " PACKET3_3D_DRAW_IMMD:\n");
392 case PACKET3_3D_DRAW_INDX:
393 fprintf(file, " PACKET3_3D_DRAW_INDX:\n");
395 case PACKET3_3D_LOAD_VBPNTR:
396 fprintf(file, " PACKET3_3D_LOAD_VBPNTR:\n");
398 case PACKET3_INDX_BUFFER:
399 fprintf(file, " PACKET3_INDX_BUFFER:\n");
401 case PACKET3_3D_DRAW_VBUF_2:
402 fprintf(file, " PACKET3_3D_DRAW_VBUF_2:\n");
404 case PACKET3_3D_DRAW_IMMD_2:
405 fprintf(file, " PACKET3_3D_DRAW_IMMD_2:\n");
407 case PACKET3_3D_DRAW_INDX_2:
408 fprintf(file, " PACKET3_3D_DRAW_INDX_2:\n");
411 fprintf(file, "Unknow opcode 0x%02X at %d\n", opcode, i);
414 for (j = 0; j < cnt; j++) {
415 fprintf(file, " 0x%08X\n", cs->packets[i++]);
421 fprintf(file, "Unknow packet 0x%08X at %d\n", cs->packets[i], i);
429 static struct radeon_cs_funcs radeon_cs_gem_funcs = {
441 struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
443 struct radeon_cs_manager *csm;
445 csm = (struct radeon_cs_manager*)calloc(1,
446 sizeof(struct radeon_cs_manager));
450 csm->funcs = &radeon_cs_gem_funcs;
455 void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)