1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
44 * Vertex and index buffers need hardware backing. Constant buffers
45 * do not. No other types of buffers currently supported.
48 svga_buffer_needs_hw_storage(unsigned usage)
50 return usage & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER);
55 svga_buffer_is_referenced( struct pipe_context *pipe,
56 struct pipe_resource *buf,
57 unsigned face, unsigned level)
59 struct svga_screen *ss = svga_screen(pipe->screen);
60 struct svga_buffer *sbuf = svga_buffer(buf);
64 * The screen may cache buffer writes, but when we map, we map out
65 * of those cached writes, so we don't need to set a
66 * PIPE_REFERENCED_FOR_WRITE flag for cached buffers.
69 if (!sbuf->handle || ss->sws->surface_is_flushed(ss->sws, sbuf->handle))
70 return PIPE_UNREFERENCED;
73 * sws->surface_is_flushed() does not distinguish between read references
74 * and write references. So assume a reference is both,
75 * however, we make an exception for index- and vertex buffers, to avoid
76 * a flush in st_bufferobj_get_subdata, during display list replay.
79 if (sbuf->b.b.bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
80 return PIPE_REFERENCED_FOR_READ;
82 return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
91 svga_buffer_map_range( struct pipe_screen *screen,
92 struct pipe_resource *buf,
97 struct svga_screen *ss = svga_screen(screen);
98 struct svga_winsys_screen *sws = ss->sws;
99 struct svga_buffer *sbuf = svga_buffer( buf );
102 if (!sbuf->swbuf && !sbuf->hwbuf) {
103 if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
105 * We can't create a hardware buffer big enough, so create a malloc
108 debug_printf("%s: failed to allocate %u KB of DMA, splitting DMA transfers\n",
110 (sbuf->b.b.width0 + 1023)/1024);
112 sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
117 /* User/malloc buffer */
120 else if (sbuf->hwbuf) {
121 map = sws->buffer_map(sws, sbuf->hwbuf, usage);
128 pipe_mutex_lock(ss->swc_mutex);
132 if (usage & PIPE_TRANSFER_WRITE) {
133 assert(sbuf->map.count <= 1);
134 sbuf->map.writing = TRUE;
135 if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT)
136 sbuf->map.flush_explicit = TRUE;
139 pipe_mutex_unlock(ss->swc_mutex);
148 svga_buffer_flush_mapped_range( struct pipe_screen *screen,
149 struct pipe_resource *buf,
150 unsigned offset, unsigned length)
152 struct svga_buffer *sbuf = svga_buffer( buf );
153 struct svga_screen *ss = svga_screen(screen);
155 pipe_mutex_lock(ss->swc_mutex);
156 assert(sbuf->map.writing);
157 if(sbuf->map.writing) {
158 assert(sbuf->map.flush_explicit);
159 svga_buffer_add_range(sbuf, offset, offset + length);
161 pipe_mutex_unlock(ss->swc_mutex);
165 svga_buffer_unmap( struct pipe_screen *screen,
166 struct pipe_resource *buf)
168 struct svga_screen *ss = svga_screen(screen);
169 struct svga_winsys_screen *sws = ss->sws;
170 struct svga_buffer *sbuf = svga_buffer( buf );
172 pipe_mutex_lock(ss->swc_mutex);
174 assert(sbuf->map.count);
179 sws->buffer_unmap(sws, sbuf->hwbuf);
181 if(sbuf->map.writing) {
182 if(!sbuf->map.flush_explicit) {
183 /* No mapped range was flushed -- flush the whole buffer */
184 SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
186 svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
189 sbuf->map.writing = FALSE;
190 sbuf->map.flush_explicit = FALSE;
193 pipe_mutex_unlock(ss->swc_mutex);
199 svga_buffer_destroy( struct pipe_screen *screen,
200 struct pipe_resource *buf )
202 struct svga_screen *ss = svga_screen(screen);
203 struct svga_buffer *sbuf = svga_buffer( buf );
205 assert(!p_atomic_read(&buf->reference.count));
207 assert(!sbuf->dma.pending);
210 svga_buffer_destroy_host_surface(ss, sbuf);
212 if(sbuf->uploaded.buffer)
213 pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
216 svga_buffer_destroy_hw_storage(ss, sbuf);
218 if(sbuf->swbuf && !sbuf->user)
219 align_free(sbuf->swbuf);
225 /* Keep the original code more or less intact, implement transfers in
226 * terms of the old functions.
229 svga_buffer_transfer_map( struct pipe_context *pipe,
230 struct pipe_transfer *transfer )
232 uint8_t *map = svga_buffer_map_range( pipe->screen,
240 /* map_buffer() returned a pointer to the beginning of the buffer,
241 * but transfers are expected to return a pointer to just the
242 * region specified in the box.
244 return map + transfer->box.x;
249 static void svga_buffer_transfer_flush_region( struct pipe_context *pipe,
250 struct pipe_transfer *transfer,
251 const struct pipe_box *box)
253 assert(box->x + box->width <= transfer->box.width);
255 svga_buffer_flush_mapped_range(pipe->screen,
257 transfer->box.x + box->x,
261 static void svga_buffer_transfer_unmap( struct pipe_context *pipe,
262 struct pipe_transfer *transfer )
264 svga_buffer_unmap(pipe->screen,
274 struct u_resource_vtbl svga_buffer_vtbl =
276 u_default_resource_get_handle, /* get_handle */
277 svga_buffer_destroy, /* resource_destroy */
278 svga_buffer_is_referenced, /* is_resource_referenced */
279 u_default_get_transfer, /* get_transfer */
280 u_default_transfer_destroy, /* transfer_destroy */
281 svga_buffer_transfer_map, /* transfer_map */
282 svga_buffer_transfer_flush_region, /* transfer_flush_region */
283 svga_buffer_transfer_unmap, /* transfer_unmap */
284 u_default_transfer_inline_write /* transfer_inline_write */
289 struct pipe_resource *
290 svga_buffer_create(struct pipe_screen *screen,
291 const struct pipe_resource *template)
293 struct svga_screen *ss = svga_screen(screen);
294 struct svga_buffer *sbuf;
296 sbuf = CALLOC_STRUCT(svga_buffer);
300 sbuf->b.b = *template;
301 sbuf->b.vtbl = &svga_buffer_vtbl;
302 pipe_reference_init(&sbuf->b.b.reference, 1);
303 sbuf->b.b.screen = screen;
305 if(svga_buffer_needs_hw_storage(template->bind)) {
306 if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
310 sbuf->swbuf = align_malloc(template->width0, 64);
323 struct pipe_resource *
324 svga_user_buffer_create(struct pipe_screen *screen,
329 struct svga_buffer *sbuf;
331 sbuf = CALLOC_STRUCT(svga_buffer);
335 pipe_reference_init(&sbuf->b.b.reference, 1);
336 sbuf->b.vtbl = &svga_buffer_vtbl;
337 sbuf->b.b.screen = screen;
338 sbuf->b.b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
339 sbuf->b.b._usage = PIPE_USAGE_IMMUTABLE;
340 sbuf->b.b.bind = bind;
341 sbuf->b.b.width0 = bytes;
342 sbuf->b.b.height0 = 1;
343 sbuf->b.b.depth0 = 1;