2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "nouveau_private.h"
30 #define PB_BUFMGR_DWORDS (4096 / 2)
31 #define PB_MIN_USER_DWORDS 2048
34 nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
35 struct drm_nouveau_gem_pushbuf_reloc *r)
39 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
40 push = (pbbo->presumed_offset + r->data);
42 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
43 push = (pbbo->presumed_offset + r->data) >> 32;
47 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
48 if (pbbo->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM)
58 nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
59 struct nouveau_bo *bo, uint32_t data, uint32_t data2,
60 uint32_t flags, uint32_t vor, uint32_t tor)
62 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
63 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
64 struct drm_nouveau_gem_pushbuf_reloc *r;
65 struct drm_nouveau_gem_pushbuf_bo *pbbo;
68 if (nvpb->nr_relocs >= NOUVEAU_GEM_MAX_RELOCS) {
69 fprintf(stderr, "too many relocs!!\n");
73 if (nvbo->user && (flags & NOUVEAU_BO_WR)) {
74 fprintf(stderr, "write to user buffer!!\n");
78 pbbo = nouveau_bo_emit_buffer(chan, bo);
80 fprintf(stderr, "buffer emit fail :(\n");
84 nvbo->pending_refcnt++;
86 if (flags & NOUVEAU_BO_VRAM)
87 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
88 if (flags & NOUVEAU_BO_GART)
89 domains |= NOUVEAU_GEM_DOMAIN_GART;
91 if (!(pbbo->valid_domains & domains)) {
92 fprintf(stderr, "no valid domains remain!\n");
95 pbbo->valid_domains &= domains;
97 assert(flags & NOUVEAU_BO_RDWR);
98 if (flags & NOUVEAU_BO_RD) {
99 pbbo->read_domains |= domains;
101 if (flags & NOUVEAU_BO_WR) {
102 pbbo->write_domains |= domains;
103 nvbo->write_marker = 1;
106 r = nvpb->relocs + nvpb->nr_relocs++;
107 r->bo_index = pbbo - nvpb->buffers;
108 r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
110 if (flags & NOUVEAU_BO_LOW)
111 r->flags |= NOUVEAU_GEM_RELOC_LOW;
112 if (flags & NOUVEAU_BO_HIGH)
113 r->flags |= NOUVEAU_GEM_RELOC_HIGH;
114 if (flags & NOUVEAU_BO_OR)
115 r->flags |= NOUVEAU_GEM_RELOC_OR;
120 *(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
121 nouveau_pushbuf_calc_reloc(pbbo, r);
126 nouveau_pushbuf_space_call(struct nouveau_channel *chan, unsigned min)
128 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
129 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
130 struct nouveau_bo *bo;
133 if (min < PB_MIN_USER_DWORDS)
134 min = PB_MIN_USER_DWORDS;
136 nvpb->current_offset = nvpb->base.cur - nvpb->pushbuf;
137 if (nvpb->current_offset + min + 2 <= nvpb->size)
141 if (nvpb->current == CALPB_BUFFERS)
143 bo = nvpb->buffer[nvpb->current];
145 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
149 nvpb->size = (bo->size - 8) / 4;
150 nvpb->pushbuf = bo->map;
151 nvpb->current_offset = 0;
153 nvpb->base.channel = chan;
154 nvpb->base.remaining = nvpb->size;
155 nvpb->base.cur = nvpb->pushbuf;
157 nouveau_bo_unmap(bo);
162 nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
164 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
165 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
168 return nouveau_pushbuf_space_call(chan, min);
172 nvpb->pushbuf = NULL;
175 nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
176 nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
178 nvpb->base.channel = chan;
179 nvpb->base.remaining = nvpb->size;
180 nvpb->base.cur = nvpb->pushbuf;
186 nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
188 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
189 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
192 for (i = 0; i < CALPB_BUFFERS; i++)
193 nouveau_bo_ref(NULL, &nvpb->buffer[i]);
195 nvpb->pushbuf = NULL;
199 nouveau_pushbuf_init_call(struct nouveau_channel *chan)
201 struct drm_nouveau_gem_pushbuf_call req;
202 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
203 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
204 struct nouveau_device *dev = chan->device;
207 req.channel = chan->id;
209 ret = drmCommandWriteRead(nouveau_device(dev)->fd,
210 DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
213 ret = drmCommandWriteRead(nouveau_device(dev)->fd,
214 DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
219 nvpb->no_aper_update = 1;
222 for (i = 0; i < CALPB_BUFFERS; i++) {
223 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
224 0, CALPB_BUFSZ, &nvpb->buffer[i]);
226 nouveau_pushbuf_fini_call(chan);
232 nvpb->cal_suffix0 = req.suffix0;
233 nvpb->cal_suffix1 = req.suffix1;
237 nouveau_pushbuf_init(struct nouveau_channel *chan)
239 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
240 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
243 nouveau_pushbuf_init_call(chan);
245 ret = nouveau_pushbuf_space(chan, 0);
248 nouveau_pushbuf_fini_call(chan);
249 ret = nouveau_pushbuf_space(chan, 0);
256 nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
257 sizeof(struct drm_nouveau_gem_pushbuf_bo));
258 nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
259 sizeof(struct drm_nouveau_gem_pushbuf_reloc));
261 chan->pushbuf = &nvpb->base;
266 nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
268 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
269 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
270 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
274 if (nvpb->base.remaining == nvpb->size)
278 struct drm_nouveau_gem_pushbuf_call req;
280 *(nvpb->base.cur++) = nvpb->cal_suffix0;
281 *(nvpb->base.cur++) = nvpb->cal_suffix1;
282 if (nvpb->base.remaining > 2) /* space() will fixup if not */
283 nvpb->base.remaining -= 2;
286 req.channel = chan->id;
287 req.handle = nvpb->buffer[nvpb->current]->handle;
288 req.offset = nvpb->current_offset * 4;
289 req.nr_buffers = nvpb->nr_buffers;
290 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
291 req.nr_relocs = nvpb->nr_relocs;
292 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
293 req.nr_dwords = (nvpb->base.cur - nvpb->pushbuf) -
294 nvpb->current_offset;
295 req.suffix0 = nvpb->cal_suffix0;
296 req.suffix1 = nvpb->cal_suffix1;
297 ret = drmCommandWriteRead(nvdev->fd, nvpb->no_aper_update ?
298 DRM_NOUVEAU_GEM_PUSHBUF_CALL :
299 DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
303 nvpb->cal_suffix0 = req.suffix0;
304 nvpb->cal_suffix1 = req.suffix1;
305 if (!nvpb->no_aper_update) {
306 nvdev->base.vm_vram_size = req.vram_available;
307 nvdev->base.vm_gart_size = req.gart_available;
310 struct drm_nouveau_gem_pushbuf req;
313 req.channel = chan->id;
314 req.nr_dwords = nvpb->size - nvpb->base.remaining;
315 req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
316 req.nr_buffers = nvpb->nr_buffers;
317 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
318 req.nr_relocs = nvpb->nr_relocs;
319 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
320 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
327 /* Update presumed offset/domain for any buffers that moved.
328 * Dereference all buffers on validate list
330 for (i = 0; i < nvpb->nr_relocs; i++) {
331 struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
332 struct drm_nouveau_gem_pushbuf_bo *pbbo =
333 &nvpb->buffers[r->bo_index];
334 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
335 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
337 if (--nvbo->pending_refcnt)
340 if (pbbo->presumed_ok == 0) {
341 nvbo->domain = pbbo->presumed_domain;
342 nvbo->offset = pbbo->presumed_offset;
345 nvbo->pending = NULL;
346 nouveau_bo_ref(NULL, &bo);
349 nvpb->nr_buffers = 0;
352 /* Allocate space for next push buffer */
353 assert(!nouveau_pushbuf_space(chan, min));
355 if (chan->flush_notify)
356 chan->flush_notify(chan);
363 nouveau_pushbuf_marker_emit(struct nouveau_channel *chan,
364 unsigned wait_dwords, unsigned wait_relocs)
366 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
368 if (AVAIL_RING(chan) < wait_dwords)
369 return nouveau_pushbuf_flush(chan, wait_dwords);
371 if (nvpb->nr_relocs + wait_relocs >= NOUVEAU_GEM_MAX_RELOCS)
372 return nouveau_pushbuf_flush(chan, wait_dwords);
374 nvpb->marker = nvpb->base.cur - nvpb->pushbuf;
375 nvpb->marker_relocs = nvpb->nr_relocs;
380 nouveau_pushbuf_marker_undo(struct nouveau_channel *chan)
382 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
388 /* undo any relocs/buffers added to the list since last marker */
389 for (i = nvpb->marker_relocs; i < nvpb->nr_relocs; i++) {
390 struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
391 struct drm_nouveau_gem_pushbuf_bo *pbbo =
392 &nvpb->buffers[r->bo_index];
393 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
394 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
396 if (--nvbo->pending_refcnt)
399 nvbo->pending = NULL;
400 nouveau_bo_ref(NULL, &bo);
403 nvpb->nr_relocs = nvpb->marker_relocs;
405 /* reset pushbuf back to last marker */
406 nvpb->base.cur = nvpb->pushbuf + nvpb->marker;
407 nvpb->base.remaining = nvpb->size - nvpb->marker;