2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
8 #include "nvc0_context.h"
9 #include "nvc0_resource.h"
11 #include "nvc0_3d.xml.h"
14 struct nouveau_channel *chan;
18 uint32_t vertex_words;
19 uint32_t packet_vertex_limit;
21 struct translate *translate;
23 boolean primitive_restart;
24 boolean need_vertex_id;
26 uint32_t restart_index;
39 init_push_context(struct nvc0_context *nvc0, struct push_context *ctx)
41 struct pipe_vertex_element *ve;
43 ctx->chan = nvc0->screen->base.channel;
44 ctx->translate = nvc0->vertex->translate;
46 if (likely(nvc0->vertex->num_elements < 32))
47 ctx->need_vertex_id = nvc0->vertprog->vp.need_vertex_id;
49 ctx->need_vertex_id = FALSE;
51 ctx->edgeflag.buffer = -1;
52 ctx->edgeflag.value = 0.5f;
54 if (unlikely(nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS)) {
55 ve = &nvc0->vertex->element[nvc0->vertprog->vp.edgeflag].pipe;
56 ctx->edgeflag.buffer = ve->vertex_buffer_index;
57 ctx->edgeflag.offset = ve->src_offset;
58 ctx->packet_vertex_limit = 1;
60 ctx->packet_vertex_limit = nvc0->vertex->vtx_per_packet_max;
61 if (unlikely(ctx->need_vertex_id))
62 ctx->packet_vertex_limit = 1;
65 ctx->vertex_words = nvc0->vertex->vtx_size;
69 set_edgeflag(struct push_context *ctx, unsigned vtx_id)
71 float f = *(float *)(ctx->edgeflag.data + vtx_id * ctx->edgeflag.stride);
73 if (ctx->edgeflag.value != f) {
74 ctx->edgeflag.value = f;
75 IMMED_RING(ctx->chan, RING_3D(EDGEFLAG_ENABLE), f ? 1 : 0);
80 set_vertexid(struct push_context *ctx, uint32_t vtx_id)
83 BEGIN_RING(ctx->chan, RING_3D(VERTEX_ID), 1); /* broken on nvc0 */
85 BEGIN_RING(ctx->chan, RING_3D(VERTEX_DATA), 1); /* as last attribute */
87 OUT_RING (ctx->chan, vtx_id);
90 static INLINE unsigned
91 prim_restart_search_i08(uint8_t *elts, unsigned push, uint8_t index)
94 for (i = 0; i < push; ++i)
100 static INLINE unsigned
101 prim_restart_search_i16(uint16_t *elts, unsigned push, uint16_t index)
104 for (i = 0; i < push; ++i)
105 if (elts[i] == index)
110 static INLINE unsigned
111 prim_restart_search_i32(uint32_t *elts, unsigned push, uint32_t index)
114 for (i = 0; i < push; ++i)
115 if (elts[i] == index)
121 emit_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
123 uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
126 unsigned push = MIN2(count, ctx->packet_vertex_limit);
130 if (ctx->primitive_restart)
131 nr = prim_restart_search_i08(elts, push, ctx->restart_index);
133 if (unlikely(ctx->edgeflag.buffer >= 0) && likely(nr))
134 set_edgeflag(ctx, elts[0]);
136 size = ctx->vertex_words * nr;
138 BEGIN_RING_NI(ctx->chan, RING_3D(VERTEX_DATA), size);
140 ctx->translate->run_elts8(ctx->translate, elts, nr, ctx->instance_id,
142 ctx->chan->cur += size;
144 if (unlikely(ctx->need_vertex_id) && likely(size))
145 set_vertexid(ctx, elts[0]);
153 BEGIN_RING(ctx->chan, RING_3D(VERTEX_END_GL), 2);
154 OUT_RING (ctx->chan, 0);
155 OUT_RING (ctx->chan, NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_CONT |
156 (ctx->prim & ~NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT));
162 emit_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
164 uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
167 unsigned push = MIN2(count, ctx->packet_vertex_limit);
171 if (ctx->primitive_restart)
172 nr = prim_restart_search_i16(elts, push, ctx->restart_index);
174 if (unlikely(ctx->edgeflag.buffer >= 0) && likely(nr))
175 set_edgeflag(ctx, elts[0]);
177 size = ctx->vertex_words * nr;
179 BEGIN_RING_NI(ctx->chan, RING_3D(VERTEX_DATA), size);
181 ctx->translate->run_elts16(ctx->translate, elts, nr, ctx->instance_id,
183 ctx->chan->cur += size;
185 if (unlikely(ctx->need_vertex_id))
186 set_vertexid(ctx, elts[0]);
194 BEGIN_RING(ctx->chan, RING_3D(VERTEX_END_GL), 2);
195 OUT_RING (ctx->chan, 0);
196 OUT_RING (ctx->chan, NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_CONT |
197 (ctx->prim & ~NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT));
203 emit_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
205 uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
208 unsigned push = MIN2(count, ctx->packet_vertex_limit);
212 if (ctx->primitive_restart)
213 nr = prim_restart_search_i32(elts, push, ctx->restart_index);
215 if (unlikely(ctx->edgeflag.buffer >= 0) && likely(nr))
216 set_edgeflag(ctx, elts[0]);
218 size = ctx->vertex_words * nr;
220 BEGIN_RING_NI(ctx->chan, RING_3D(VERTEX_DATA), size);
222 ctx->translate->run_elts(ctx->translate, elts, nr, ctx->instance_id,
224 ctx->chan->cur += size;
226 if (unlikely(ctx->need_vertex_id))
227 set_vertexid(ctx, elts[0]);
235 BEGIN_RING(ctx->chan, RING_3D(VERTEX_END_GL), 2);
236 OUT_RING (ctx->chan, 0);
237 OUT_RING (ctx->chan, NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_CONT |
238 (ctx->prim & ~NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT));
244 emit_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
247 unsigned push = MIN2(count, ctx->packet_vertex_limit);
248 unsigned size = ctx->vertex_words * push;
250 if (unlikely(ctx->edgeflag.buffer >= 0))
251 set_edgeflag(ctx, start);
253 BEGIN_RING_NI(ctx->chan, RING_3D(VERTEX_DATA), size);
255 ctx->translate->run(ctx->translate, start, push, ctx->instance_id,
257 ctx->chan->cur += size;
259 if (unlikely(ctx->need_vertex_id))
260 set_vertexid(ctx, start);
268 #define NVC0_PRIM_GL_CASE(n) \
269 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
271 static INLINE unsigned
272 nvc0_prim_gl(unsigned prim)
275 NVC0_PRIM_GL_CASE(POINTS);
276 NVC0_PRIM_GL_CASE(LINES);
277 NVC0_PRIM_GL_CASE(LINE_LOOP);
278 NVC0_PRIM_GL_CASE(LINE_STRIP);
279 NVC0_PRIM_GL_CASE(TRIANGLES);
280 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
281 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
282 NVC0_PRIM_GL_CASE(QUADS);
283 NVC0_PRIM_GL_CASE(QUAD_STRIP);
284 NVC0_PRIM_GL_CASE(POLYGON);
285 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
286 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
287 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
288 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
290 NVC0_PRIM_GL_CASE(PATCHES); */
292 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
298 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
300 struct push_context ctx;
301 unsigned i, index_size;
302 unsigned inst_count = info->instance_count;
303 unsigned vert_count = info->count;
304 boolean apply_bias = info->indexed && info->index_bias;
306 init_push_context(nvc0, &ctx);
308 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
310 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
311 struct nv04_resource *res = nv04_resource(vb->buffer);
313 data = nouveau_resource_map_offset(&nvc0->base, res,
314 vb->buffer_offset, NOUVEAU_BO_RD);
316 if (apply_bias && likely(!(nvc0->vertex->instance_bufs & (1 << i))))
317 data += info->index_bias * vb->stride;
319 ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
321 if (unlikely(i == ctx.edgeflag.buffer)) {
322 ctx.edgeflag.data = data + ctx.edgeflag.offset;
323 ctx.edgeflag.stride = vb->stride;
329 nouveau_resource_map_offset(&nvc0->base,
330 nv04_resource(nvc0->idxbuf.buffer),
331 nvc0->idxbuf.offset, NOUVEAU_BO_RD);
334 index_size = nvc0->idxbuf.index_size;
335 ctx.primitive_restart = info->primitive_restart;
336 ctx.restart_index = info->restart_index;
340 ctx.primitive_restart = FALSE;
341 ctx.restart_index = 0;
343 if (info->count_from_stream_output) {
344 struct pipe_context *pipe = &nvc0->base.pipe;
345 struct nvc0_so_target *targ;
346 targ = nvc0_so_target(info->count_from_stream_output);
347 pipe->get_query_result(pipe, targ->pq, TRUE, &vert_count);
348 vert_count /= targ->stride;
352 ctx.instance_id = info->start_instance;
353 ctx.prim = nvc0_prim_gl(info->mode);
355 if (unlikely(ctx.need_vertex_id)) {
356 const unsigned a = nvc0->vertex->num_elements;
357 BEGIN_RING(ctx.chan, RING_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
358 OUT_RING (ctx.chan, (a << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
359 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
360 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
361 BEGIN_RING(ctx.chan, RING_3D(VERTEX_ID_REPLACE), 1);
362 OUT_RING (ctx.chan, (((0x80 + a * 0x10) / 4) << 4) | 1);
365 while (inst_count--) {
366 BEGIN_RING(ctx.chan, RING_3D(VERTEX_BEGIN_GL), 1);
367 OUT_RING (ctx.chan, ctx.prim);
368 switch (index_size) {
370 emit_vertices_seq(&ctx, info->start, vert_count);
373 emit_vertices_i08(&ctx, info->start, vert_count);
376 emit_vertices_i16(&ctx, info->start, vert_count);
379 emit_vertices_i32(&ctx, info->start, vert_count);
385 IMMED_RING(ctx.chan, RING_3D(VERTEX_END_GL), 0);
388 ctx.prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
391 if (unlikely(ctx.edgeflag.value == 0.0f))
392 IMMED_RING(ctx.chan, RING_3D(EDGEFLAG_ENABLE), 1);
394 if (unlikely(ctx.need_vertex_id)) {
395 const unsigned a = nvc0->vertex->num_elements;
396 IMMED_RING(ctx.chan, RING_3D(VERTEX_ID_REPLACE), 0);
397 BEGIN_RING(ctx.chan, RING_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
399 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
400 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
401 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
405 nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
407 for (i = 0; i < nvc0->num_vtxbufs; ++i)
408 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));