#include "ilo_state.h"
#include "ilo_3d.h"
+static void
+process_query_for_occlusion_counter(struct ilo_3d *hw3d,
+ struct ilo_query *q)
+{
+ uint64_t *vals, depth_count = 0;
+ int i;
+
+ /* in pairs */
+ assert(q->reg_read % 2 == 0);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+ for (i = 1; i < q->reg_read; i += 2)
+ depth_count += vals[i] - vals[i - 1];
+ q->bo->unmap(q->bo);
+
+ /* accumulate so that the query can be resumed if wanted */
+ q->data.u64 += depth_count;
+ q->reg_read = 0;
+}
+
+static uint64_t
+timestamp_to_ns(uint64_t timestamp)
+{
+ /* see ilo_get_timestamp() */
+ return (timestamp & 0xffffffff) * 80;
+}
+
+static void
+process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
+{
+ uint64_t *vals, timestamp;
+
+ assert(q->reg_read == 1);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+ timestamp = vals[0];
+ q->bo->unmap(q->bo);
+
+ q->data.u64 = timestamp_to_ns(timestamp);
+ q->reg_read = 0;
+}
+
+static void
+process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
+{
+ uint64_t *vals, elapsed = 0;
+ int i;
+
+ /* in pairs */
+ assert(q->reg_read % 2 == 0);
+
+ q->bo->map(q->bo, false);
+ vals = q->bo->get_virtual(q->bo);
+
+ for (i = 1; i < q->reg_read; i += 2)
+ elapsed += vals[i] - vals[i - 1];
+
+ q->bo->unmap(q->bo);
+
+ /* accumulate so that the query can be resumed if wanted */
+ q->data.u64 += timestamp_to_ns(elapsed);
+ q->reg_read = 0;
+}
+
+static void
+ilo_3d_resume_queries(struct ilo_3d *hw3d)
+{
+ struct ilo_query *q;
+
+ /* resume occlusion queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
+ /* accumulate the result if the bo is alreay full */
+ if (q->reg_read >= q->reg_total)
+ process_query_for_occlusion_counter(hw3d, q);
+
+ ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+
+ /* resume timer queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
+ /* accumulate the result if the bo is alreay full */
+ if (q->reg_read >= q->reg_total)
+ process_query_for_time_elapsed(hw3d, q);
+
+ ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+}
+
+static void
+ilo_3d_pause_queries(struct ilo_3d *hw3d)
+{
+ struct ilo_query *q;
+
+ /* pause occlusion queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
+ assert(q->reg_read < q->reg_total);
+ ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+
+ /* pause timer queries */
+ LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
+ assert(q->reg_read < q->reg_total);
+ ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
+ q->bo, q->reg_read++);
+ }
+}
+
+static void
+ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
+{
+ struct ilo_3d *hw3d = data;
+
+ ilo_3d_pause_queries(hw3d);
+}
+
+static void
+ilo_3d_own_render_ring(struct ilo_3d *hw3d)
+{
+ ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+
+ if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
+ ilo_3d_resume_queries(hw3d);
+}
+
/**
* Begin a query.
*/
{
struct ilo_3d *hw3d = ilo->hw3d;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
+ hw3d->owner_reserve += q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
+ hw3d->owner_reserve += q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
{
struct ilo_3d *hw3d = ilo->hw3d;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
+ hw3d->owner_reserve -= q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
list_del(&q->list);
assert(q->reg_read < q->reg_total);
- ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
+ hw3d->owner_reserve -= q->reg_cmd_size;
+ ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
}
}
-static void
-process_query_for_occlusion_counter(struct ilo_3d *hw3d,
- struct ilo_query *q)
-{
- uint64_t *vals, depth_count = 0;
- int i;
-
- /* in pairs */
- assert(q->reg_read % 2 == 0);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
- for (i = 1; i < q->reg_read; i += 2)
- depth_count += vals[i] - vals[i - 1];
- q->bo->unmap(q->bo);
-
- /* accumulate so that the query can be resumed if wanted */
- q->data.u64 += depth_count;
- q->reg_read = 0;
-}
-
-static uint64_t
-timestamp_to_ns(uint64_t timestamp)
-{
- /* see ilo_get_timestamp() */
- return (timestamp & 0xffffffff) * 80;
-}
-
-static void
-process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
-{
- uint64_t *vals, timestamp;
-
- assert(q->reg_read == 1);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
- timestamp = vals[0];
- q->bo->unmap(q->bo);
-
- q->data.u64 = timestamp_to_ns(timestamp);
- q->reg_read = 0;
-}
-
-static void
-process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
-{
- uint64_t *vals, elapsed = 0;
- int i;
-
- /* in pairs */
- assert(q->reg_read % 2 == 0);
-
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
-
- for (i = 1; i < q->reg_read; i += 2)
- elapsed += vals[i] - vals[i - 1];
-
- q->bo->unmap(q->bo);
-
- /* accumulate so that the query can be resumed if wanted */
- q->data.u64 += timestamp_to_ns(elapsed);
- q->reg_read = 0;
-}
-
/**
* Process the raw query data.
*/
* Hook for CP new-batch.
*/
void
-ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
+ilo_3d_cp_flushed(struct ilo_3d *hw3d)
{
- struct ilo_query *q;
-
- hw3d->new_batch = true;
+ if (ilo_debug & ILO_DEBUG_3D)
+ ilo_3d_pipeline_dump(hw3d->pipeline);
/* invalidate the pipeline */
ilo_3d_pipeline_invalidate(hw3d->pipeline,
ILO_3D_PIPELINE_INVALIDATE_HW);
}
- /* resume occlusion queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
- /* accumulate the result if the bo is alreay full */
- if (q->reg_read >= q->reg_total)
- process_query_for_occlusion_counter(hw3d, q);
-
- ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-
- /* resume timer queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
- /* accumulate the result if the bo is alreay full */
- if (q->reg_read >= q->reg_total)
- process_query_for_time_elapsed(hw3d, q);
-
- ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-}
-
-/**
- * Hook for CP pre-flush.
- */
-void
-ilo_3d_pre_cp_flush(struct ilo_3d *hw3d)
-{
- struct ilo_query *q;
-
- /* pause occlusion queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
- assert(q->reg_read < q->reg_total);
- ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-
- /* pause timer queries */
- LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
- assert(q->reg_read < q->reg_total);
- ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
- q->bo, q->reg_read++);
- }
-}
-
-/**
- * Hook for CP post-flush
- */
-void
-ilo_3d_post_cp_flush(struct ilo_3d *hw3d)
-{
- if (ilo_debug & ILO_DEBUG_3D)
- ilo_3d_pipeline_dump(hw3d->pipeline);
+ hw3d->new_batch = true;
}
/**
return NULL;
hw3d->cp = cp;
+ hw3d->owner.release_callback = ilo_3d_release_render_ring;
+ hw3d->owner.release_data = hw3d;
+
hw3d->new_batch = true;
list_inithead(&hw3d->occlusion_queries);
bool need_flush;
int max_len;
- ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
+ ilo_3d_own_render_ring(hw3d);
/*
* Without a better tracking mechanism, when the framebuffer changes, we
static const int ilo_cp_private = 2;
/**
- * Dump the contents of the parser bo. This must be called in a post-flush
- * hook.
+ * Dump the contents of the parser bo. This can only be called in the flush
+ * callback.
*/
void
ilo_cp_dump(struct ilo_cp *cp)
/*
* Recalculate cp->size. This is needed not only because cp->stolen is
- * reset above, but also that we added cp->reserve_for_pre_flush and
- * ilo_cp_private to cp->size in ilo_cp_flush().
+ * reset above, but also that ilo_cp_private are added to cp->size in
+ * ilo_cp_end_buffer().
*/
- cp->size = cp->bo_size - (cp->reserve_for_pre_flush + ilo_cp_private);
+ cp->size = cp->bo_size - ilo_cp_private;
}
/**
{
int err;
+ if (!cp->sys) {
+ cp->bo->unmap(cp->bo);
+ return 0;
+ }
+
err = cp->bo->pwrite(cp->bo, 0, cp->used * 4, cp->ptr);
if (likely(!err && cp->stolen)) {
const int offset = cp->bo_size - cp->stolen;
*/
bo = cp->winsys->alloc_buffer(cp->winsys,
"batch buffer", cp->bo_size * 4, 0);
- if (unlikely(!bo))
- return;
+ if (unlikely(!bo)) {
+ /* reuse the old one */
+ bo = cp->bo;
+ bo->reference(bo);
+ }
if (cp->bo)
cp->bo->unreference(cp->bo);
return err;
}
-static void
-ilo_cp_call_hook(struct ilo_cp *cp, enum ilo_cp_hook hook)
-{
- const bool no_implicit_flush = cp->no_implicit_flush;
-
- if (!cp->hooks[hook].func)
- return;
-
- /* no implicit flush in hooks */
- cp->no_implicit_flush = true;
- cp->hooks[hook].func(cp, cp->hooks[hook].data);
-
- cp->no_implicit_flush = no_implicit_flush;
-}
-
/**
* Flush the command parser and execute the commands. When the parser buffer
- * is empty, the hooks are not invoked.
+ * is empty, the callback is not invoked.
*/
void
ilo_cp_flush(struct ilo_cp *cp)
{
int err;
+ ilo_cp_set_owner(cp, NULL, 0);
+
/* sanity check */
- assert(cp->bo_size == cp->size +
- cp->reserve_for_pre_flush + ilo_cp_private + cp->stolen);
+ assert(cp->bo_size == cp->size + cp->stolen + ilo_cp_private);
if (!cp->used) {
+ /* return the space stolen and etc. */
ilo_cp_clear_buffer(cp);
+
return;
}
- /* make the reserved space available temporarily */
- cp->size += cp->reserve_for_pre_flush;
- ilo_cp_call_hook(cp, ILO_CP_HOOK_PRE_FLUSH);
-
ilo_cp_end_buffer(cp);
- if (cp->sys) {
- err = ilo_cp_upload_buffer(cp);
- if (likely(!err))
- err = ilo_cp_exec_bo(cp);
- }
- else {
- cp->bo->unmap(cp->bo);
+ /* upload and execute */
+ err = ilo_cp_upload_buffer(cp);
+ if (likely(!err))
err = ilo_cp_exec_bo(cp);
- }
- if (likely(!err)) {
- ilo_cp_call_hook(cp, ILO_CP_HOOK_POST_FLUSH);
- ilo_cp_clear_buffer(cp);
- }
- else {
- /* reset first so that post-flush hook knows nothing was executed */
- ilo_cp_clear_buffer(cp);
- ilo_cp_call_hook(cp, ILO_CP_HOOK_POST_FLUSH);
- }
+ if (likely(!err && cp->flush_callback))
+ cp->flush_callback(cp, cp->flush_callback_data);
+ ilo_cp_clear_buffer(cp);
ilo_cp_realloc_bo(cp);
- ilo_cp_call_hook(cp, ILO_CP_HOOK_NEW_BATCH);
}
/**
cp->ring = ILO_CP_RING_RENDER;
cp->no_implicit_flush = false;
- cp->reserve_for_pre_flush = 0;
-
- memset(cp->hooks, 0, sizeof(cp->hooks));
cp->bo_size = 8192;
ILO_CP_RING_COUNT,
};
-enum ilo_cp_hook {
- ILO_CP_HOOK_NEW_BATCH,
- ILO_CP_HOOK_PRE_FLUSH,
- ILO_CP_HOOK_POST_FLUSH,
+typedef void (*ilo_cp_callback)(struct ilo_cp *cp, void *data);
- ILO_CP_HOOK_COUNT,
+struct ilo_cp_owner {
+ ilo_cp_callback release_callback;
+ void *release_data;
};
-typedef void (*ilo_cp_hook_func)(struct ilo_cp *cp, void *data);
-
/**
* Command parser.
*/
struct intel_winsys *winsys;
struct intel_context *render_ctx;
+ ilo_cp_callback flush_callback;
+ void *flush_callback_data;
+
+ const struct ilo_cp_owner *owner;
+ int owner_reserve;
+
enum ilo_cp_ring ring;
bool no_implicit_flush;
- int reserve_for_pre_flush;
unsigned one_off_flags;
- struct {
- ilo_cp_hook_func func;
- void *data;
- } hooks[ILO_CP_HOOK_COUNT];
-
int bo_size;
struct intel_bo *bo;
uint32_t *sys;
}
/**
- * Reserve the given size of space from the parser buffer. The reserved space
- * will be made available temporarily for the pre-flush hook.
- *
- * \param reserve size in dwords to reserve. It may be negative.
+ * Set one-off flags. They will be cleared after flushing.
*/
static inline void
-ilo_cp_reserve_for_pre_flush(struct ilo_cp *cp, int reserve)
+ilo_cp_set_one_off_flags(struct ilo_cp *cp, unsigned flags)
{
- assert(cp->reserve_for_pre_flush + reserve >= 0);
-
- if (cp->used > cp->size - reserve) {
- ilo_cp_implicit_flush(cp);
- assert(cp->used <= cp->size - reserve);
- }
-
- cp->size -= reserve;
- cp->reserve_for_pre_flush += reserve;
+ cp->one_off_flags |= flags;
}
/**
- * Set one-off flags. They will be cleared after flushing.
+ * Set flush callback. The callback is invoked after the bo has been
+ * successfully executed, and before the bo is reallocated.
*/
static inline void
-ilo_cp_set_one_off_flags(struct ilo_cp *cp, unsigned flags)
+ilo_cp_set_flush_callback(struct ilo_cp *cp, ilo_cp_callback callback,
+ void *data)
{
- cp->one_off_flags |= flags;
+ cp->flush_callback = callback;
+ cp->flush_callback_data = data;
}
-
/**
- * Set a command parser hook.
+ * Set the parser owner. If this is a new owner, the previous owner is
+ * notified and the space it reserved is reclaimed.
+ *
+ * \return true if this is a new owner
*/
-static inline void
-ilo_cp_set_hook(struct ilo_cp *cp, enum ilo_cp_hook hook,
- ilo_cp_hook_func func, void *data)
+static inline bool
+ilo_cp_set_owner(struct ilo_cp *cp, const struct ilo_cp_owner *owner,
+ int reserve)
{
- cp->hooks[hook].func = func;
- cp->hooks[hook].data = data;
+ const bool new_owner = (cp->owner != owner);
+
+ /* release current owner */
+ if (new_owner && cp->owner) {
+ const bool no_implicit_flush = cp->no_implicit_flush;
+
+ /* reclaim the reserved space */
+ cp->size += cp->owner_reserve;
+ cp->owner_reserve = 0;
+
+ /* invoke the release callback */
+ cp->no_implicit_flush = true;
+ cp->owner->release_callback(cp, cp->owner->release_data);
+ cp->no_implicit_flush = no_implicit_flush;
+
+ cp->owner = NULL;
+ }
+
+ if (cp->owner_reserve != reserve) {
+ const int extra = reserve - cp->owner_reserve;
+
+ if (cp->used > cp->size - extra) {
+ ilo_cp_implicit_flush(cp);
+ assert(cp->used <= cp->size - reserve);
+
+ cp->size -= reserve;
+ cp->owner_reserve = reserve;
+ }
+ else {
+ cp->size -= extra;
+ cp->owner_reserve += extra;
+ }
+ }
+
+ /* set owner last because of the possible flush above */
+ cp->owner = owner;
+
+ return new_owner;
}
/**