1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "draw/draw_vbuf.h"
27 #include "draw/draw_context.h"
28 #include "draw/draw_vertex.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_state.h"
37 #include "svga_swtnl.h"
39 #include "svga_types.h"
41 #include "svga3d_reg.h"
42 #include "svga_draw.h"
43 #include "svga_swtnl_private.h"
46 static const struct vertex_info *
47 svga_vbuf_render_get_vertex_info( struct vbuf_render *render )
49 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
50 struct svga_context *svga = svga_render->svga;
52 svga_swtnl_update_vdecl(svga);
54 return &svga_render->vertex_info;
59 svga_vbuf_render_allocate_vertices( struct vbuf_render *render,
63 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
64 struct svga_context *svga = svga_render->svga;
65 struct pipe_screen *screen = svga->pipe.screen;
66 size_t size = (size_t)nr_vertices * (size_t)vertex_size;
67 boolean new_vbuf = FALSE;
68 boolean new_ibuf = FALSE;
70 if (svga_render->vertex_size != vertex_size)
71 svga->swtnl.new_vdecl = TRUE;
72 svga_render->vertex_size = (size_t)vertex_size;
74 if (svga->swtnl.new_vbuf)
75 new_ibuf = new_vbuf = TRUE;
76 svga->swtnl.new_vbuf = FALSE;
78 if (svga_render->vbuf_size < svga_render->vbuf_offset + svga_render->vbuf_used + size)
82 pipe_resource_reference(&svga_render->vbuf, NULL);
84 pipe_resource_reference(&svga_render->ibuf, NULL);
86 if (!svga_render->vbuf) {
87 svga_render->vbuf_size = MAX2(size, svga_render->vbuf_alloc_size);
88 svga_render->vbuf = pipe_buffer_create(screen,
89 PIPE_BIND_VERTEX_BUFFER,
90 svga_render->vbuf_size);
91 if(!svga_render->vbuf) {
92 svga_context_flush(svga, NULL);
93 svga_render->vbuf = pipe_buffer_create(screen,
94 PIPE_BIND_VERTEX_BUFFER,
95 svga_render->vbuf_size);
96 assert(svga_render->vbuf);
99 svga->swtnl.new_vdecl = TRUE;
100 svga_render->vbuf_offset = 0;
102 svga_render->vbuf_offset += svga_render->vbuf_used;
105 svga_render->vbuf_used = 0;
107 if (svga->swtnl.new_vdecl)
108 svga_render->vdecl_offset = svga_render->vbuf_offset;
114 svga_vbuf_render_map_vertices( struct vbuf_render *render )
116 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
117 struct svga_context *svga = svga_render->svga;
119 char *ptr = (char*)pipe_buffer_map(&svga->pipe,
121 PIPE_TRANSFER_WRITE |
122 PIPE_TRANSFER_FLUSH_EXPLICIT |
123 PIPE_TRANSFER_DISCARD |
124 PIPE_TRANSFER_UNSYNCHRONIZED,
125 &svga_render->vbuf_transfer);
126 return ptr + svga_render->vbuf_offset;
130 svga_vbuf_render_unmap_vertices( struct vbuf_render *render,
134 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
135 struct svga_context *svga = svga_render->svga;
136 unsigned offset, length;
137 size_t used = svga_render->vertex_size * ((size_t)max_index + 1);
139 offset = svga_render->vbuf_offset + svga_render->vertex_size * min_index;
140 length = svga_render->vertex_size * (max_index + 1 - min_index);
141 pipe_buffer_flush_mapped_range(&svga->pipe,
142 svga_render->vbuf_transfer,
144 pipe_buffer_unmap(&svga->pipe, svga_render->vbuf, svga_render->vbuf_transfer);
145 svga_render->min_index = min_index;
146 svga_render->max_index = max_index;
147 svga_render->vbuf_used = MAX2(svga_render->vbuf_used, used);
151 svga_vbuf_render_set_primitive( struct vbuf_render *render,
154 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
155 svga_render->prim = prim;
161 svga_vbuf_sumbit_state( struct svga_vbuf_render *svga_render )
163 struct svga_context *svga = svga_render->svga;
164 SVGA3dVertexDecl vdecl[PIPE_MAX_ATTRIBS];
168 /* if the vdecl or vbuf hasn't changed do nothing */
169 if (!svga->swtnl.new_vdecl)
172 memcpy(vdecl, svga_render->vdecl, sizeof(vdecl));
174 /* flush the hw state */
175 ret = svga_hwtnl_flush(svga->hwtnl);
177 svga_context_flush(svga, NULL);
178 ret = svga_hwtnl_flush(svga->hwtnl);
179 /* if we hit this path we might become synced with hw */
180 svga->swtnl.new_vbuf = TRUE;
184 svga_hwtnl_reset_vdecl(svga->hwtnl, svga_render->vdecl_count);
186 for (i = 0; i < svga_render->vdecl_count; i++) {
187 vdecl[i].array.offset += svga_render->vdecl_offset;
189 svga_hwtnl_vdecl( svga->hwtnl,
195 /* We have already taken care of flatshading, so let the hwtnl
196 * module use whatever is most convenient:
198 if (svga->state.sw.need_pipeline) {
199 svga_hwtnl_set_flatshade(svga->hwtnl, FALSE, FALSE);
200 svga_hwtnl_set_unfilled(svga->hwtnl, PIPE_POLYGON_MODE_FILL);
203 svga_hwtnl_set_flatshade( svga->hwtnl,
204 svga->curr.rast->templ.flatshade,
205 svga->curr.rast->templ.flatshade_first );
207 svga_hwtnl_set_unfilled( svga->hwtnl,
208 svga->curr.rast->hw_unfilled );
211 svga->swtnl.new_vdecl = FALSE;
215 svga_vbuf_render_draw_arrays( struct vbuf_render *render,
219 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
220 struct svga_context *svga = svga_render->svga;
221 unsigned bias = (svga_render->vbuf_offset - svga_render->vdecl_offset) / svga_render->vertex_size;
222 enum pipe_error ret = 0;
224 svga_vbuf_sumbit_state(svga_render);
226 /* Need to call update_state() again as the draw module may have
227 * altered some of our state behind our backs. Testcase:
230 svga_update_state_retry( svga, SVGA_STATE_HW_DRAW );
232 ret = svga_hwtnl_draw_arrays(svga->hwtnl, svga_render->prim, start + bias, nr);
233 if (ret != PIPE_OK) {
234 svga_context_flush(svga, NULL);
235 ret = svga_hwtnl_draw_arrays(svga->hwtnl, svga_render->prim, start + bias, nr);
236 svga->swtnl.new_vbuf = TRUE;
237 assert(ret == PIPE_OK);
243 svga_vbuf_render_draw( struct vbuf_render *render,
244 const ushort *indices,
247 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
248 struct svga_context *svga = svga_render->svga;
249 struct pipe_screen *screen = svga->pipe.screen;
250 unsigned bias = (svga_render->vbuf_offset - svga_render->vdecl_offset) / svga_render->vertex_size;
252 size_t size = 2 * nr_indices;
254 assert(( svga_render->vbuf_offset - svga_render->vdecl_offset) % svga_render->vertex_size == 0);
256 if (svga_render->ibuf_size < svga_render->ibuf_offset + size)
257 pipe_resource_reference(&svga_render->ibuf, NULL);
259 if (!svga_render->ibuf) {
260 svga_render->ibuf_size = MAX2(size, svga_render->ibuf_alloc_size);
261 svga_render->ibuf = pipe_buffer_create(screen,
262 PIPE_BIND_INDEX_BUFFER,
263 svga_render->ibuf_size);
264 svga_render->ibuf_offset = 0;
267 pipe_buffer_write_nooverlap(&svga->pipe, svga_render->ibuf,
268 svga_render->ibuf_offset, 2 * nr_indices, indices);
271 /* off to hardware */
272 svga_vbuf_sumbit_state(svga_render);
274 /* Need to call update_state() again as the draw module may have
275 * altered some of our state behind our backs. Testcase:
278 svga_update_state_retry( svga, SVGA_STATE_HW_DRAW );
280 ret = svga_hwtnl_draw_range_elements(svga->hwtnl,
283 svga_render->min_index,
284 svga_render->max_index,
286 svga_render->ibuf_offset / 2, nr_indices, bias);
288 svga_context_flush(svga, NULL);
289 ret = svga_hwtnl_draw_range_elements(svga->hwtnl,
292 svga_render->min_index,
293 svga_render->max_index,
295 svga_render->ibuf_offset / 2, nr_indices, bias);
296 svga->swtnl.new_vbuf = TRUE;
297 assert(ret == PIPE_OK);
300 svga_render->ibuf_offset += size;
305 svga_vbuf_render_release_vertices( struct vbuf_render *render )
312 svga_vbuf_render_destroy( struct vbuf_render *render )
314 struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
316 pipe_resource_reference(&svga_render->vbuf, NULL);
317 pipe_resource_reference(&svga_render->ibuf, NULL);
323 * Create a new primitive render.
326 svga_vbuf_render_create( struct svga_context *svga )
328 struct svga_vbuf_render *svga_render = CALLOC_STRUCT(svga_vbuf_render);
330 svga_render->svga = svga;
331 svga_render->ibuf_size = 0;
332 svga_render->vbuf_size = 0;
333 svga_render->ibuf_alloc_size = 4*1024;
334 svga_render->vbuf_alloc_size = 64*1024;
335 svga_render->base.max_vertex_buffer_bytes = 64*1024/10;
336 svga_render->base.max_indices = 65536;
337 svga_render->base.get_vertex_info = svga_vbuf_render_get_vertex_info;
338 svga_render->base.allocate_vertices = svga_vbuf_render_allocate_vertices;
339 svga_render->base.map_vertices = svga_vbuf_render_map_vertices;
340 svga_render->base.unmap_vertices = svga_vbuf_render_unmap_vertices;
341 svga_render->base.set_primitive = svga_vbuf_render_set_primitive;
342 svga_render->base.draw = svga_vbuf_render_draw;
343 svga_render->base.draw_arrays = svga_vbuf_render_draw_arrays;
344 svga_render->base.release_vertices = svga_vbuf_render_release_vertices;
345 svga_render->base.destroy = svga_vbuf_render_destroy;
347 return &svga_render->base;