struct fd_ringbuffer *ring, enum fd_render_stage stage)
{
struct fd_context *ctx = batch->ctx;
+
+ /* special case: internal blits (like mipmap level generation)
+ * go through normal draw path (via util_blitter_blit()).. but
+ * we need to ignore the FD_STAGE_DRAW which will be set, so we
+ * don't enable queries which should be paused during internal
+ * blits:
+ */
+ if ((batch->stage == FD_STAGE_BLIT) &&
+ (stage != FD_STAGE_NULL))
+ return;
+
if (ctx->query_set_stage)
ctx->query_set_stage(batch, ring, stage);
+
+ batch->stage = stage;
}
void fd_context_setup_common_vbos(struct fd_context *ctx);
fd_begin_query(struct pipe_context *pctx, struct pipe_query *pq)
{
struct fd_query *q = fd_query(pq);
- return q->funcs->begin_query(fd_context(pctx), q);
+ boolean ret;
+
+ if (q->active)
+ return false;
+
+ ret = q->funcs->begin_query(fd_context(pctx), q);
+ q->active = ret;
+
+ return ret;
}
static bool
fd_end_query(struct pipe_context *pctx, struct pipe_query *pq)
{
struct fd_query *q = fd_query(pq);
+
+ /* there are a couple special cases, which don't have
+ * a matching ->begin_query():
+ */
+ if (skip_begin_query(q->type) && !q->active)
+ fd_begin_query(pctx, pq);
+
+ if (!q->active)
+ return false;
+
q->funcs->end_query(fd_context(pctx), q);
+ q->active = false;
+
return true;
}
boolean wait, union pipe_query_result *result)
{
struct fd_query *q = fd_query(pq);
+
+ if (q->active)
+ return false;
+
+ util_query_clear_result(result, q->type);
+
return q->funcs->get_query_result(fd_context(pctx), q, wait, result);
}
DBG("%p: active=%d", q, q->active);
- if (q->active)
- return false;
-
/* begin_query() should clear previous results: */
destroy_periods(ctx, hq);
if (batch && is_active(hq, batch->stage))
resume_query(batch, hq, batch->draw);
- q->active = true;
-
/* add to active list: */
assert(list_empty(&hq->list));
list_addtail(&hq->list, &ctx->active_queries);
struct fd_batch *batch = ctx->batch;
struct fd_hw_query *hq = fd_hw_query(q);
- /* there are a couple special cases, which don't have
- * a matching ->begin_query():
- */
- if (skip_begin_query(q->type) && !q->active) {
- fd_hw_begin_query(ctx, q);
- }
-
DBG("%p: active=%d", q, q->active);
- if (!q->active)
- return;
-
if (batch && is_active(hq, batch->stage))
pause_query(batch, hq, batch->draw);
- q->active = false;
/* remove from active list: */
list_delinit(&hq->list);
}
DBG("%p: wait=%d, active=%d", q, wait, q->active);
- if (q->active)
- return false;
-
- util_query_clear_result(result, q->type);
-
if (LIST_IS_EMPTY(&hq->periods))
return true;
fd_hw_query_set_stage(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum fd_render_stage stage)
{
- /* special case: internal blits (like mipmap level generation)
- * go through normal draw path (via util_blitter_blit()).. but
- * we need to ignore the FD_STAGE_DRAW which will be set, so we
- * don't enable queries which should be paused during internal
- * blits:
- */
- if ((batch->stage == FD_STAGE_BLIT) &&
- (stage != FD_STAGE_NULL))
- return;
-
if (stage != batch->stage) {
struct fd_hw_query *hq;
LIST_FOR_EACH_ENTRY(hq, &batch->ctx->active_queries, list) {
}
}
clear_sample_cache(batch);
- batch->stage = stage;
}
/* call the provider->enable() for all the hw queries that were active
fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
{
struct fd_sw_query *sq = fd_sw_query(q);
- q->active = true;
sq->begin_value = read_counter(ctx, q->type);
if (is_rate_query(q))
sq->begin_time = os_time_get();
fd_sw_end_query(struct fd_context *ctx, struct fd_query *q)
{
struct fd_sw_query *sq = fd_sw_query(q);
- q->active = false;
sq->end_value = read_counter(ctx, q->type);
if (is_rate_query(q))
sq->end_time = os_time_get();
{
struct fd_sw_query *sq = fd_sw_query(q);
- if (q->active)
- return false;
-
- util_query_clear_result(result, q->type);
-
result->u64 = sq->end_value - sq->begin_value;
if (is_rate_query(q)) {