2 * Copyright © 2008 Jérôme Glisse
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
28 * Aapo Tahkola <aet@rasterburn.org>
29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org>
35 #include <sys/ioctl.h>
36 #include "radeon_cs.h"
37 #include "radeon_cs_gem.h"
38 #include "radeon_bo_gem.h"
41 #include "radeon_drm.h"
46 uint32_t start_offset;
49 uint32_t write_domain;
55 struct radeon_cs base;
56 struct drm_radeon_cs2 cs;
57 struct drm_radeon_cs_chunk chunks[2];
60 struct radeon_bo **relocs_bo;
63 static struct radeon_cs *cs_gem_create(struct radeon_cs_manager *csm,
68 /* max cmd buffer size is 64Kb */
69 if (ndw > (64 * 1024 / 4)) {
72 csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
77 csg->base.ndw = 64 * 1024 / 4;
78 csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
79 if (csg->base.packets == NULL) {
83 csg->base.relocs_total_size = 0;
84 csg->base.crelocs = 0;
85 csg->nrelocs = 4096 / (4 * 4) ;
86 csg->relocs_bo = (struct radeon_bo**)calloc(1,
87 csg->nrelocs*sizeof(void*));
88 if (csg->relocs_bo == NULL) {
89 free(csg->base.packets);
93 csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
94 if (csg->relocs == NULL) {
96 free(csg->base.packets);
100 csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
101 csg->chunks[0].length_dw = 0;
102 csg->chunks[0].chunk_data = (uint64_t)(intptr_t)csg->base.packets;
103 csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
104 csg->chunks[1].length_dw = 0;
105 csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
106 return (struct radeon_cs*)csg;
109 static int cs_gem_write_dword(struct radeon_cs *cs, uint32_t dword)
111 struct cs_gem *csg = (struct cs_gem*)cs;
112 if (cs->cdw >= cs->ndw) {
114 tmp = (cs->cdw + 1 + 0x3FF) & (~0x3FF);
115 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
121 csg->chunks[0].chunk_data = (uint64_t)(intptr_t)csg->base.packets;
123 cs->packets[cs->cdw++] = dword;
124 csg->chunks[0].length_dw += 1;
128 static int cs_gem_write_reloc(struct radeon_cs *cs,
129 struct radeon_bo *bo,
130 uint32_t start_offset,
132 uint32_t read_domain,
133 uint32_t write_domain,
136 struct cs_gem *csg = (struct cs_gem*)cs;
137 struct cs_reloc_gem *reloc;
142 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
143 /* in one CS a bo can only be in read or write domain but not
144 * in read & write domain at the same sime
148 if (read_domain == RADEON_GEM_DOMAIN_CPU) {
151 if (write_domain == RADEON_GEM_DOMAIN_CPU) {
154 /* check reloc window */
155 if (end_offset > bo->size) {
158 if (start_offset > end_offset) {
161 /* check if bo is already referenced */
162 for(i = 0; i < cs->crelocs; i++) {
164 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
165 if (reloc->handle == bo->handle) {
166 /* Check domains must be in read or write. As we check already
167 * checked that in argument one of the read or write domain was
168 * set we only need to check that if previous reloc as the read
169 * domain set then the read_domain should also be set for this
172 if (reloc->read_domain && !read_domain) {
175 if (reloc->write_domain && !write_domain) {
178 reloc->read_domain |= read_domain;
179 reloc->write_domain |= write_domain;
180 /* update start and end offset */
181 if (start_offset < reloc->start_offset) {
182 reloc->start_offset = start_offset;
184 if (end_offset > reloc->end_offset) {
185 reloc->end_offset = end_offset;
188 reloc->flags |= (flags & reloc->flags);
189 /* write relocation packet */
190 cs_gem_write_dword(cs, 0xc0001000);
191 cs_gem_write_dword(cs, idx);
196 if (csg->base.crelocs >= csg->nrelocs) {
197 /* allocate more memory (TODO: should use a slab allocatore maybe) */
199 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
200 tmp = (uint32_t*)realloc(csg->relocs_bo, size);
204 csg->relocs_bo = (struct radeon_bo**)tmp;
205 size = ((csg->nrelocs + 1) * 6 * 4);
206 tmp = (uint32_t*)realloc(csg->relocs, size);
210 cs->relocs = csg->relocs = tmp;
212 csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
214 csg->relocs_bo[csg->base.crelocs] = bo;
215 idx = (csg->base.crelocs++) * 6;
216 reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
217 reloc->handle = bo->handle;
218 reloc->start_offset = start_offset;
219 reloc->end_offset = end_offset;
220 reloc->read_domain = read_domain;
221 reloc->write_domain = write_domain;
222 reloc->flags = flags;
223 csg->chunks[1].length_dw += 6;
225 cs->relocs_total_size += bo->size;
226 cs_gem_write_dword(cs, 0xc0001000);
227 cs_gem_write_dword(cs, idx);
231 static int cs_gem_begin(struct radeon_cs *cs,
240 static int cs_gem_end(struct radeon_cs *cs,
250 static int cs_gem_emit(struct radeon_cs *cs)
252 struct cs_gem *csg = (struct cs_gem*)cs;
253 uint64_t chunk_array[2];
256 chunk_array[0] = (uint64_t)(intptr_t)&csg->chunks[0];
257 chunk_array[1] = (uint64_t)(intptr_t)&csg->chunks[1];
259 csg->cs.num_chunks = 2;
260 csg->cs.chunks = (uint64_t)(intptr_t)chunk_array;
262 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS2,
263 &csg->cs, sizeof(struct drm_radeon_cs2));
264 for (i = 0; i < csg->base.crelocs; i++) {
265 radeon_bo_unref(csg->relocs_bo[i]);
266 csg->relocs_bo[i] = NULL;
271 static int cs_gem_destroy(struct radeon_cs *cs)
273 struct cs_gem *csg = (struct cs_gem*)cs;
275 free(csg->relocs_bo);
282 static int cs_gem_erase(struct radeon_cs *cs)
284 struct cs_gem *csg = (struct cs_gem*)cs;
287 if (csg->relocs_bo) {
288 for (i = 0; i < csg->base.crelocs; i++) {
289 if (csg->relocs_bo[i]) {
290 radeon_bo_unref(csg->relocs_bo[i]);
291 csg->relocs_bo[i] = NULL;
295 cs->relocs_total_size = 0;
299 csg->chunks[0].length_dw = 0;
300 csg->chunks[1].length_dw = 0;
304 static int cs_gem_need_flush(struct radeon_cs *cs)
306 return (cs->relocs_total_size > (16*1024*1024));
309 static struct radeon_cs_funcs radeon_cs_gem_funcs = {
321 struct radeon_cs_manager *radeon_cs_manager_gem(int fd)
323 struct radeon_cs_manager *csm;
325 csm = (struct radeon_cs_manager*)calloc(1,
326 sizeof(struct radeon_cs_manager));
330 csm->funcs = &radeon_cs_gem_funcs;
335 void radeon_cs_manager_gem_shutdown(struct radeon_cs_manager *csm)