}
/* Count in queries_suspend. */
- num_dw += ctx->num_cs_dw_queries_suspend;
+ num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
+ num_dw += ctx->num_cs_dw_timer_queries_suspend;
/* Count in streamout_end at the end of CS. */
num_dw += ctx->num_cs_dw_streamout_end;
{
struct radeon_winsys_cs *cs = ctx->cs;
struct r600_block *enable_block = NULL;
- bool queries_suspended = false;
+ bool timer_queries_suspended = false;
+ bool nontimer_queries_suspended = false;
bool streamout_suspended = false;
if (cs->cdw == ctx->atom_start_cs.atom.num_dw)
return;
/* suspend queries */
- if (ctx->num_cs_dw_queries_suspend) {
- r600_suspend_queries(ctx);
- queries_suspended = true;
+ if (ctx->num_cs_dw_timer_queries_suspend) {
+ r600_suspend_timer_queries(ctx);
+ timer_queries_suspended = true;
+ }
+ if (ctx->num_cs_dw_nontimer_queries_suspend) {
+ r600_suspend_nontimer_queries(ctx);
+ nontimer_queries_suspended = true;
}
if (ctx->num_cs_dw_streamout_end) {
}
/* resume queries */
- if (queries_suspended) {
- r600_resume_queries(ctx);
+ if (timer_queries_suspended) {
+ r600_resume_timer_queries(ctx);
+ }
+ if (nontimer_queries_suspended) {
+ r600_resume_nontimer_queries(ctx);
}
/* set all valid group as dirty so they get reemited on
/* The list of active queries. Only one query of each type can be active. */
int num_occlusion_queries;
- struct list_head active_query_list;
- unsigned num_cs_dw_queries_suspend;
+
+ /* Manage queries in two separate groups:
+ * The timer ones and the others (streamout, occlusion).
+ *
+ * We do this because we should only suspend non-timer queries for u_blitter,
+ * and later if the non-timer queries are suspended, the context flush should
+ * only suspend and resume the timer queries. */
+ struct list_head active_timer_queries;
+ unsigned num_cs_dw_timer_queries_suspend;
+ struct list_head active_nontimer_queries;
+ unsigned num_cs_dw_nontimer_queries_suspend;
+
unsigned num_cs_dw_streamout_end;
unsigned backend_mask;
/* r600_query.c */
void r600_init_query_functions(struct r600_context *rctx);
-void r600_suspend_queries(struct r600_context *ctx);
-void r600_resume_queries(struct r600_context *ctx);
+void r600_suspend_nontimer_queries(struct r600_context *ctx);
+void r600_resume_nontimer_queries(struct r600_context *ctx);
+void r600_suspend_timer_queries(struct r600_context *ctx);
+void r600_resume_timer_queries(struct r600_context *ctx);
/* r600_resource.c */
void r600_init_context_resource_functions(struct r600_context *r600);
#include "util/u_memory.h"
#include "r600_hw_context_priv.h"
+static bool r600_is_timer_query(unsigned type)
+{
+ return type == PIPE_QUERY_TIME_ELAPSED ||
+ type == PIPE_QUERY_TIMESTAMP ||
+ type == PIPE_QUERY_TIMESTAMP_DISJOINT;
+}
+
static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type)
{
unsigned j, i, num_results, buf_size = 4096;
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
- ctx->num_cs_dw_queries_suspend += query->num_cs_dw;
+ if (r600_is_timer_query(query->type)) {
+ ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
+ } else {
+ ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
+ }
}
static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query)
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
query->buffer.results_end += query->result_size;
- ctx->num_cs_dw_queries_suspend -= query->num_cs_dw;
+
+ if (r600_is_timer_query(query->type)) {
+ ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
+ } else {
+ ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
+ }
}
static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query,
r600_update_occlusion_query_state(rctx, rquery->type, 1);
r600_emit_query_begin(rctx, rquery);
- LIST_ADDTAIL(&rquery->list, &rctx->active_query_list);
+
+ if (r600_is_timer_query(rquery->type)) {
+ LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
+ } else {
+ LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
+ }
}
static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
}
}
-void r600_suspend_queries(struct r600_context *ctx)
+void r600_suspend_nontimer_queries(struct r600_context *ctx)
{
struct r600_query *query;
- LIST_FOR_EACH_ENTRY(query, &ctx->active_query_list, list) {
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
r600_emit_query_end(ctx, query);
}
- assert(ctx->num_cs_dw_queries_suspend == 0);
+ assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
+}
+
+void r600_resume_nontimer_queries(struct r600_context *ctx)
+{
+ struct r600_query *query;
+
+ assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
+
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
+ r600_emit_query_begin(ctx, query);
+ }
+}
+
+void r600_suspend_timer_queries(struct r600_context *ctx)
+{
+ struct r600_query *query;
+
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
+ r600_emit_query_end(ctx, query);
+ }
+
+ assert(ctx->num_cs_dw_timer_queries_suspend == 0);
}
-void r600_resume_queries(struct r600_context *ctx)
+void r600_resume_timer_queries(struct r600_context *ctx)
{
struct r600_query *query;
- assert(ctx->num_cs_dw_queries_suspend == 0);
+ assert(ctx->num_cs_dw_timer_queries_suspend == 0);
- LIST_FOR_EACH_ENTRY(query, &ctx->active_query_list, list) {
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
r600_emit_query_begin(ctx, query);
}
}