2 * Copyright 2011 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * Authors: Christoph Bumiller
25 #include "nvc0_context.h"
26 #include "nouveau/nv_object.xml.h"
33 struct nouveau_bo *bo;
35 uint32_t offset; /* base + i * rotate */
40 int nesting; /* only used for occlusion queries */
41 struct nouveau_mm_allocation *mm;
44 #define NVC0_QUERY_ALLOC_SPACE 256
46 static INLINE struct nvc0_query *
47 nvc0_query(struct pipe_query *pipe)
49 return (struct nvc0_query *)pipe;
53 nvc0_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, int size)
55 struct nvc0_screen *screen = nvc0->screen;
59 nouveau_bo_ref(NULL, &q->bo);
62 nouveau_mm_free(q->mm);
64 nouveau_fence_work(screen->base.fence.current,
65 nouveau_mm_free_work, q->mm);
69 q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
74 ret = nouveau_bo_map_range(q->bo, q->base, size, NOUVEAU_BO_RD |
77 nvc0_query_allocate(nvc0, q, 0);
81 nouveau_bo_unmap(q->bo);
87 nvc0_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
89 nvc0_query_allocate(nvc0_context(pipe), nvc0_query(pq), 0);
93 static struct pipe_query *
94 nvc0_query_create(struct pipe_context *pipe, unsigned type)
96 struct nvc0_context *nvc0 = nvc0_context(pipe);
98 unsigned space = NVC0_QUERY_ALLOC_SPACE;
100 q = CALLOC_STRUCT(nvc0_query);
105 case PIPE_QUERY_OCCLUSION_COUNTER:
106 case PIPE_QUERY_OCCLUSION_PREDICATE:
108 space = NVC0_QUERY_ALLOC_SPACE;
110 case PIPE_QUERY_PIPELINE_STATISTICS:
114 case PIPE_QUERY_SO_STATISTICS:
115 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
119 case PIPE_QUERY_TIME_ELAPSED:
120 case PIPE_QUERY_TIMESTAMP:
121 case PIPE_QUERY_TIMESTAMP_DISJOINT:
122 case PIPE_QUERY_GPU_FINISHED:
123 case PIPE_QUERY_PRIMITIVES_GENERATED:
124 case PIPE_QUERY_PRIMITIVES_EMITTED:
127 case NVC0_QUERY_TFB_BUFFER_OFFSET:
134 if (!nvc0_query_allocate(nvc0, q, space)) {
142 /* we advance before query_begin ! */
143 q->offset -= q->rotate;
144 q->data -= q->rotate / sizeof(*q->data);
147 q->data[0] = 0; /* initialize sequence */
149 return (struct pipe_query *)q;
153 nvc0_query_get(struct nouveau_channel *chan, struct nvc0_query *q,
154 unsigned offset, uint32_t get)
158 MARK_RING (chan, 5, 2);
159 BEGIN_RING(chan, RING_3D(QUERY_ADDRESS_HIGH), 4);
160 OUT_RELOCh(chan, q->bo, offset, NOUVEAU_BO_GART | NOUVEAU_BO_WR);
161 OUT_RELOCl(chan, q->bo, offset, NOUVEAU_BO_GART | NOUVEAU_BO_WR);
162 OUT_RING (chan, q->sequence);
163 OUT_RING (chan, get);
167 nvc0_query_rotate(struct nvc0_context *nvc0, struct nvc0_query *q)
169 q->offset += q->rotate;
170 q->data += q->rotate / sizeof(*q->data);
171 if (q->offset - q->base == NVC0_QUERY_ALLOC_SPACE)
172 nvc0_query_allocate(nvc0, q, NVC0_QUERY_ALLOC_SPACE);
176 nvc0_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
178 struct nvc0_context *nvc0 = nvc0_context(pipe);
179 struct nouveau_channel *chan = nvc0->screen->base.channel;
180 struct nvc0_query *q = nvc0_query(pq);
182 /* For occlusion queries we have to change the storage, because a previous
183 * query might set the initial render conition to FALSE even *after* we re-
184 * initialized it to TRUE.
187 nvc0_query_rotate(nvc0, q);
189 /* XXX: can we do this with the GPU, and sync with respect to a previous
192 q->data[0] = q->sequence; /* initialize sequence */
193 q->data[1] = 1; /* initial render condition = TRUE */
194 q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
200 case PIPE_QUERY_OCCLUSION_COUNTER:
201 case PIPE_QUERY_OCCLUSION_PREDICATE:
202 q->nesting = nvc0->screen->num_occlusion_queries_active++;
204 nvc0_query_get(chan, q, 0x10, 0x0100f002);
206 BEGIN_RING(chan, RING_3D(COUNTER_RESET), 1);
207 OUT_RING (chan, NVC0_3D_COUNTER_RESET_SAMPLECNT);
208 IMMED_RING(chan, RING_3D(SAMPLECNT_ENABLE), 1);
211 case PIPE_QUERY_PRIMITIVES_GENERATED:
212 nvc0_query_get(chan, q, 0x10, 0x06805002 | (q->index << 5));
214 case PIPE_QUERY_PRIMITIVES_EMITTED:
215 nvc0_query_get(chan, q, 0x10, 0x05805002 | (q->index << 5));
217 case PIPE_QUERY_SO_STATISTICS:
218 nvc0_query_get(chan, q, 0x20, 0x05805002 | (q->index << 5));
219 nvc0_query_get(chan, q, 0x30, 0x06805002 | (q->index << 5));
221 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
222 nvc0_query_get(chan, q, 0x10, 0x03005002 | (q->index << 5));
224 case PIPE_QUERY_TIMESTAMP_DISJOINT:
225 case PIPE_QUERY_TIME_ELAPSED:
226 nvc0_query_get(chan, q, 0x10, 0x00005002);
228 case PIPE_QUERY_PIPELINE_STATISTICS:
229 nvc0_query_get(chan, q, 0xc0 + 0x00, 0x00801002); /* VFETCH, VERTICES */
230 nvc0_query_get(chan, q, 0xc0 + 0x10, 0x01801002); /* VFETCH, PRIMS */
231 nvc0_query_get(chan, q, 0xc0 + 0x20, 0x02802002); /* VP, LAUNCHES */
232 nvc0_query_get(chan, q, 0xc0 + 0x30, 0x03806002); /* GP, LAUNCHES */
233 nvc0_query_get(chan, q, 0xc0 + 0x40, 0x04806002); /* GP, PRIMS_OUT */
234 nvc0_query_get(chan, q, 0xc0 + 0x50, 0x07804002); /* RAST, PRIMS_IN */
235 nvc0_query_get(chan, q, 0xc0 + 0x60, 0x08804002); /* RAST, PRIMS_OUT */
236 nvc0_query_get(chan, q, 0xc0 + 0x70, 0x0980a002); /* ROP, PIXELS */
237 nvc0_query_get(chan, q, 0xc0 + 0x80, 0x0d808002); /* TCP, LAUNCHES */
238 nvc0_query_get(chan, q, 0xc0 + 0x90, 0x0e809002); /* TEP, LAUNCHES */
248 nvc0_query_end(struct pipe_context *pipe, struct pipe_query *pq)
250 struct nvc0_context *nvc0 = nvc0_context(pipe);
251 struct nouveau_channel *chan = nvc0->screen->base.channel;
252 struct nvc0_query *q = nvc0_query(pq);
255 /* some queries don't require 'begin' to be called (e.g. GPU_FINISHED) */
257 nvc0_query_rotate(nvc0, q);
264 case PIPE_QUERY_OCCLUSION_COUNTER:
265 case PIPE_QUERY_OCCLUSION_PREDICATE:
266 nvc0_query_get(chan, q, 0, 0x0100f002);
267 if (--nvc0->screen->num_occlusion_queries_active == 0)
268 IMMED_RING(chan, RING_3D(SAMPLECNT_ENABLE), 0);
270 case PIPE_QUERY_PRIMITIVES_GENERATED:
271 nvc0_query_get(chan, q, 0, 0x06805002 | (q->index << 5));
273 case PIPE_QUERY_PRIMITIVES_EMITTED:
274 nvc0_query_get(chan, q, 0, 0x05805002 | (q->index << 5));
276 case PIPE_QUERY_SO_STATISTICS:
277 nvc0_query_get(chan, q, 0x00, 0x05805002 | (q->index << 5));
278 nvc0_query_get(chan, q, 0x10, 0x06805002 | (q->index << 5));
280 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
281 /* TODO: How do we sum over all streams for render condition ? */
282 /* PRIMS_DROPPED doesn't write sequence, use a ZERO query to sync on */
283 nvc0_query_get(chan, q, 0x00, 0x03005002 | (q->index << 5));
284 nvc0_query_get(chan, q, 0x20, 0x00005002);
286 case PIPE_QUERY_TIMESTAMP:
287 case PIPE_QUERY_TIMESTAMP_DISJOINT:
288 case PIPE_QUERY_TIME_ELAPSED:
289 nvc0_query_get(chan, q, 0, 0x00005002);
291 case PIPE_QUERY_GPU_FINISHED:
292 nvc0_query_get(chan, q, 0, 0x1000f010);
294 case PIPE_QUERY_PIPELINE_STATISTICS:
295 nvc0_query_get(chan, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
296 nvc0_query_get(chan, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
297 nvc0_query_get(chan, q, 0x20, 0x02802002); /* VP, LAUNCHES */
298 nvc0_query_get(chan, q, 0x30, 0x03806002); /* GP, LAUNCHES */
299 nvc0_query_get(chan, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
300 nvc0_query_get(chan, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
301 nvc0_query_get(chan, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
302 nvc0_query_get(chan, q, 0x70, 0x0980a002); /* ROP, PIXELS */
303 nvc0_query_get(chan, q, 0x80, 0x0d808002); /* TCP, LAUNCHES */
304 nvc0_query_get(chan, q, 0x90, 0x0e809002); /* TEP, LAUNCHES */
306 case NVC0_QUERY_TFB_BUFFER_OFFSET:
307 /* indexed by TFB buffer instead of by vertex stream */
308 nvc0_query_get(chan, q, 0x00, 0x0d005002 | (q->index << 5));
316 static INLINE boolean
317 nvc0_query_ready(struct nvc0_query *q)
320 if (nouveau_bo_map(q->bo, NOUVEAU_BO_RD | NOUVEAU_BO_NOWAIT))
322 nouveau_bo_unmap(q->bo);
325 return q->data[0] == q->sequence;
329 static INLINE boolean
330 nvc0_query_wait(struct nvc0_query *q)
332 int ret = nouveau_bo_map(q->bo, NOUVEAU_BO_RD);
335 nouveau_bo_unmap(q->bo);
340 nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq,
341 boolean wait, void *result)
343 struct nvc0_query *q = nvc0_query(pq);
344 uint64_t *res64 = result;
345 uint32_t *res32 = result;
346 boolean *res8 = result;
347 uint64_t *data64 = (uint64_t *)q->data;
350 if (!q->ready) /* update ? */
351 q->ready = nvc0_query_ready(q);
353 struct nouveau_channel *chan = nvc0_context(pipe)->screen->base.channel;
355 if (nouveau_bo_pending(q->bo) & NOUVEAU_BO_WR) /* for daft apps */
359 if (!nvc0_query_wait(q))
365 case PIPE_QUERY_GPU_FINISHED:
368 case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
369 res64[0] = q->data[1] - q->data[5];
371 case PIPE_QUERY_OCCLUSION_PREDICATE:
372 res8[0] = q->data[1] != q->data[5];
374 case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */
375 case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */
376 res64[0] = data64[0] - data64[2];
378 case PIPE_QUERY_SO_STATISTICS:
379 res64[0] = data64[0] - data64[4];
380 res64[1] = data64[2] - data64[6];
382 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
383 res8[0] = data64[0] != data64[2];
385 case PIPE_QUERY_TIMESTAMP:
386 res64[0] = data64[1];
388 case PIPE_QUERY_TIMESTAMP_DISJOINT: /* u32 sequence, u32 0, u64 time */
389 res64[0] = 1000000000;
390 res8[8] = (data64[1] == data64[3]) ? FALSE : TRUE;
392 case PIPE_QUERY_TIME_ELAPSED:
393 res64[0] = data64[1] - data64[3];
395 case PIPE_QUERY_PIPELINE_STATISTICS:
396 for (i = 0; i < 10; ++i)
397 res64[i] = data64[i * 2] - data64[24 + i * 2];
399 case NVC0_QUERY_TFB_BUFFER_OFFSET:
400 res32[0] = q->data[1];
410 nvc0_query_fifo_wait(struct nouveau_channel *chan, struct pipe_query *pq)
412 struct nvc0_query *q = nvc0_query(pq);
413 unsigned offset = q->offset;
415 if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE) offset += 0x20;
417 MARK_RING (chan, 5, 2);
418 BEGIN_RING(chan, RING_3D_(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4);
419 OUT_RELOCh(chan, q->bo, offset, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
420 OUT_RELOCl(chan, q->bo, offset, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
421 OUT_RING (chan, q->sequence);
422 OUT_RING (chan, (1 << 12) |
423 NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
427 nvc0_render_condition(struct pipe_context *pipe,
428 struct pipe_query *pq, uint mode)
430 struct nvc0_context *nvc0 = nvc0_context(pipe);
431 struct nouveau_channel *chan = nvc0->screen->base.channel;
432 struct nvc0_query *q;
434 boolean negated = FALSE;
436 mode != PIPE_RENDER_COND_NO_WAIT &&
437 mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
440 IMMED_RING(chan, RING_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS);
445 /* NOTE: comparison of 2 queries only works if both have completed */
447 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
448 cond = negated ? NVC0_3D_COND_MODE_EQUAL :
449 NVC0_3D_COND_MODE_NOT_EQUAL;
452 case PIPE_QUERY_OCCLUSION_COUNTER:
453 case PIPE_QUERY_OCCLUSION_PREDICATE:
454 if (likely(!negated)) {
455 if (unlikely(q->nesting))
456 cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL :
457 NVC0_3D_COND_MODE_ALWAYS;
459 cond = NVC0_3D_COND_MODE_RES_NON_ZERO;
461 cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS;
465 assert(!"render condition query not a predicate");
466 mode = NVC0_3D_COND_MODE_ALWAYS;
471 nvc0_query_fifo_wait(chan, pq);
473 MARK_RING (chan, 4, 2);
474 BEGIN_RING(chan, RING_3D(COND_ADDRESS_HIGH), 3);
475 OUT_RELOCh(chan, q->bo, q->offset, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
476 OUT_RELOCl(chan, q->bo, q->offset, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
477 OUT_RING (chan, cond);
481 nvc0_query_pushbuf_submit(struct nouveau_channel *chan,
482 struct pipe_query *pq, unsigned result_offset)
484 struct nvc0_query *q = nvc0_query(pq);
486 #define NVC0_IB_ENTRY_1_NO_PREFETCH (1 << (31 - 8))
488 nouveau_pushbuf_submit(chan, q->bo, q->offset + result_offset, 4 |
489 NVC0_IB_ENTRY_1_NO_PREFETCH);
493 nvc0_so_target_save_offset(struct pipe_context *pipe,
494 struct pipe_stream_output_target *ptarg,
495 unsigned index, boolean *serialize)
497 struct nvc0_so_target *targ = nvc0_so_target(ptarg);
500 struct nouveau_channel *chan = nvc0_context(pipe)->screen->base.channel;
502 IMMED_RING(chan, RING_3D(SERIALIZE), 0);
505 nvc0_query(targ->pq)->index = index;
507 nvc0_query_end(pipe, targ->pq);
511 nvc0_init_query_functions(struct nvc0_context *nvc0)
513 struct pipe_context *pipe = &nvc0->base.pipe;
515 pipe->create_query = nvc0_query_create;
516 pipe->destroy_query = nvc0_query_destroy;
517 pipe->begin_query = nvc0_query_begin;
518 pipe->end_query = nvc0_query_end;
519 pipe->get_query_result = nvc0_query_result;
520 pipe->render_condition = nvc0_render_condition;