2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
6 #include "nouveau_screen.h"
7 #include "nouveau_context.h"
8 #include "nouveau_winsys.h"
9 #include "nouveau_fence.h"
10 #include "nouveau_buffer.h"
11 #include "nouveau_mm.h"
13 struct nouveau_transfer {
14 struct pipe_transfer base;
17 static INLINE struct nouveau_transfer *
18 nouveau_transfer(struct pipe_transfer *transfer)
20 return (struct nouveau_transfer *)transfer;
24 nouveau_buffer_allocate(struct nouveau_screen *screen,
25 struct nv04_resource *buf, unsigned domain)
27 if (domain == NOUVEAU_BO_VRAM) {
28 buf->mm = nouveau_mm_allocate(screen->mm_VRAM, buf->base.width0,
29 &buf->bo, &buf->offset);
31 return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
33 if (domain == NOUVEAU_BO_GART) {
34 buf->mm = nouveau_mm_allocate(screen->mm_GART, buf->base.width0,
35 &buf->bo, &buf->offset);
39 if (domain != NOUVEAU_BO_GART) {
41 buf->data = MALLOC(buf->base.width0);
51 release_allocation(struct nouveau_mm_allocation **mm,
52 struct nouveau_fence *fence)
54 nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
59 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
61 nouveau_bo_ref(NULL, &buf->bo);
64 release_allocation(&buf->mm, buf->fence);
70 nouveau_buffer_reallocate(struct nouveau_screen *screen,
71 struct nv04_resource *buf, unsigned domain)
73 nouveau_buffer_release_gpu_storage(buf);
75 return nouveau_buffer_allocate(screen, buf, domain);
79 nouveau_buffer_destroy(struct pipe_screen *pscreen,
80 struct pipe_resource *presource)
82 struct nv04_resource *res = nv04_resource(presource);
84 nouveau_buffer_release_gpu_storage(res);
86 if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
92 /* Maybe just migrate to GART right away if we actually need to do this. */
94 nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
95 unsigned start, unsigned size)
97 struct nouveau_mm_allocation *mm;
98 struct nouveau_bo *bounce = NULL;
101 assert(buf->domain == NOUVEAU_BO_VRAM);
103 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
107 nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
108 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);
110 if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
112 memcpy(buf->data + start, bounce->map, size);
113 nouveau_bo_unmap(bounce);
115 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
117 nouveau_bo_ref(NULL, &bounce);
124 nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
125 unsigned start, unsigned size)
127 struct nouveau_mm_allocation *mm;
128 struct nouveau_bo *bounce = NULL;
132 nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
133 size, buf->data + start);
137 mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
141 nouveau_bo_map_range(bounce, offset, size,
142 NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
143 memcpy(bounce->map, buf->data + start, size);
144 nouveau_bo_unmap(bounce);
146 nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
147 bounce, offset, NOUVEAU_BO_GART, size);
149 nouveau_bo_ref(NULL, &bounce);
151 release_allocation(&mm, nv->screen->fence.current);
153 if (start == 0 && size == buf->base.width0)
154 buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
158 static struct pipe_transfer *
159 nouveau_buffer_transfer_get(struct pipe_context *pipe,
160 struct pipe_resource *resource,
161 unsigned level, unsigned usage,
162 const struct pipe_box *box)
164 struct nv04_resource *buf = nv04_resource(resource);
165 struct nouveau_context *nv = nouveau_context(pipe);
166 struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
170 xfr->base.resource = resource;
171 xfr->base.box.x = box->x;
172 xfr->base.box.width = box->width;
173 xfr->base.usage = usage;
175 if (buf->domain == NOUVEAU_BO_VRAM) {
176 if (usage & PIPE_TRANSFER_READ) {
177 if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
178 nouveau_buffer_download(nv, buf, 0, buf->base.width0);
186 nouveau_buffer_transfer_destroy(struct pipe_context *pipe,
187 struct pipe_transfer *transfer)
189 struct nv04_resource *buf = nv04_resource(transfer->resource);
190 struct nouveau_transfer *xfr = nouveau_transfer(transfer);
191 struct nouveau_context *nv = nouveau_context(pipe);
193 if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
194 /* writing is worse */
195 nouveau_buffer_adjust_score(nv, buf, -5000);
197 if (buf->domain == NOUVEAU_BO_VRAM) {
198 nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
201 if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
202 PIPE_BIND_INDEX_BUFFER)))
203 nouveau_context(pipe)->vbo_dirty = TRUE;
209 static INLINE boolean
210 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
212 if (rw == PIPE_TRANSFER_READ) {
215 if (!nouveau_fence_wait(buf->fence_wr))
220 if (!nouveau_fence_wait(buf->fence))
223 nouveau_fence_ref(NULL, &buf->fence);
225 nouveau_fence_ref(NULL, &buf->fence_wr);
230 static INLINE boolean
231 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
233 if (rw == PIPE_TRANSFER_READ)
234 return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
236 return (buf->fence && !nouveau_fence_signalled(buf->fence));
240 nouveau_buffer_transfer_map(struct pipe_context *pipe,
241 struct pipe_transfer *transfer)
243 struct nouveau_transfer *xfr = nouveau_transfer(transfer);
244 struct nv04_resource *buf = nv04_resource(transfer->resource);
245 struct nouveau_bo *bo = buf->bo;
248 uint32_t offset = xfr->base.box.x;
251 nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250);
253 if (buf->domain != NOUVEAU_BO_GART)
254 return buf->data + offset;
257 flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
259 flags = nouveau_screen_transfer_flags(xfr->base.usage);
261 offset += buf->offset;
263 ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
268 /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
269 * not doing so might make future maps fail or trigger "reloc while mapped"
270 * errors. For now, mappings to userspace are guaranteed to be persistent.
272 nouveau_bo_unmap(bo);
275 if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
276 if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
279 if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
280 nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
289 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
290 struct pipe_transfer *transfer,
291 const struct pipe_box *box)
293 struct nv04_resource *res = nv04_resource(transfer->resource);
294 struct nouveau_bo *bo = res->bo;
295 unsigned offset = res->offset + transfer->box.x + box->x;
297 /* not using non-snoop system memory yet, no need for cflush */
301 /* XXX: maybe need to upload for VRAM buffers here */
303 nouveau_screen_bo_map_flush_range(pipe->screen, bo, offset, box->width);
307 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
308 struct pipe_transfer *transfer)
310 /* we've called nouveau_bo_unmap right after map */
313 const struct u_resource_vtbl nouveau_buffer_vtbl =
315 u_default_resource_get_handle, /* get_handle */
316 nouveau_buffer_destroy, /* resource_destroy */
317 nouveau_buffer_transfer_get, /* get_transfer */
318 nouveau_buffer_transfer_destroy, /* transfer_destroy */
319 nouveau_buffer_transfer_map, /* transfer_map */
320 nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
321 nouveau_buffer_transfer_unmap, /* transfer_unmap */
322 u_default_transfer_inline_write /* transfer_inline_write */
325 struct pipe_resource *
326 nouveau_buffer_create(struct pipe_screen *pscreen,
327 const struct pipe_resource *templ)
329 struct nouveau_screen *screen = nouveau_screen(pscreen);
330 struct nv04_resource *buffer;
333 buffer = CALLOC_STRUCT(nv04_resource);
337 buffer->base = *templ;
338 buffer->vtbl = &nouveau_buffer_vtbl;
339 pipe_reference_init(&buffer->base.reference, 1);
340 buffer->base.screen = pscreen;
342 if ((buffer->base.bind & screen->sysmem_bindings) == screen->sysmem_bindings)
343 ret = nouveau_buffer_allocate(screen, buffer, 0);
345 ret = nouveau_buffer_allocate(screen, buffer, NOUVEAU_BO_GART);
350 return &buffer->base;
358 struct pipe_resource *
359 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
360 unsigned bytes, unsigned bind)
362 struct nv04_resource *buffer;
364 buffer = CALLOC_STRUCT(nv04_resource);
368 pipe_reference_init(&buffer->base.reference, 1);
369 buffer->vtbl = &nouveau_buffer_vtbl;
370 buffer->base.screen = pscreen;
371 buffer->base.format = PIPE_FORMAT_R8_UNORM;
372 buffer->base.usage = PIPE_USAGE_IMMUTABLE;
373 buffer->base.bind = bind;
374 buffer->base.width0 = bytes;
375 buffer->base.height0 = 1;
376 buffer->base.depth0 = 1;
379 buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
381 return &buffer->base;
384 /* Like download, but for GART buffers. Merge ? */
385 static INLINE boolean
386 nouveau_buffer_data_fetch(struct nv04_resource *buf, struct nouveau_bo *bo,
387 unsigned offset, unsigned size)
390 buf->data = MALLOC(size);
394 if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
396 memcpy(buf->data, bo->map, size);
397 nouveau_bo_unmap(bo);
402 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
404 nouveau_buffer_migrate(struct nouveau_context *nv,
405 struct nv04_resource *buf, const unsigned new_domain)
407 struct nouveau_screen *screen = nv->screen;
408 struct nouveau_bo *bo;
409 const unsigned old_domain = buf->domain;
410 unsigned size = buf->base.width0;
414 assert(new_domain != old_domain);
416 if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
417 if (!nouveau_buffer_allocate(screen, buf, new_domain))
419 ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
423 memcpy(buf->bo->map, buf->data, size);
424 nouveau_bo_unmap(buf->bo);
427 if (old_domain != 0 && new_domain != 0) {
428 struct nouveau_mm_allocation *mm = buf->mm;
430 if (new_domain == NOUVEAU_BO_VRAM) {
431 /* keep a system memory copy of our data in case we hit a fallback */
432 if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size))
434 debug_printf("migrating %u KiB to VRAM\n", size / 1024);
437 offset = buf->offset;
441 nouveau_buffer_allocate(screen, buf, new_domain);
443 nv->copy_data(nv, buf->bo, buf->offset, new_domain,
444 bo, offset, old_domain, buf->base.width0);
446 nouveau_bo_ref(NULL, &bo);
448 release_allocation(&mm, screen->fence.current);
450 if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
451 if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
453 if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
458 assert(buf->domain == new_domain);
462 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
463 * We'd like to only allocate @size bytes here, but then we'd have to rebase
464 * the vertex indices ...
467 nouveau_user_buffer_upload(struct nv04_resource *buf,
468 unsigned base, unsigned size)
470 struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
473 assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
475 buf->base.width0 = base + size;
476 if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
479 ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
480 NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
483 memcpy(buf->bo->map, buf->data + base, size);
484 nouveau_bo_unmap(buf->bo);