/* XXX we should check the aperture size */
if (ilo_cp_space(hw3d->cp) < q->reg_cmd_size * 2) {
- ilo_cp_flush(hw3d->cp, "out of space");
+ ilo_cp_submit(hw3d->cp, "out of space");
assert(ilo_cp_space(hw3d->cp) >= q->reg_cmd_size * 2);
}
* Hook for CP new-batch.
*/
void
-ilo_3d_cp_flushed(struct ilo_3d *hw3d)
+ilo_3d_cp_submitted(struct ilo_3d *hw3d)
{
if (ilo_debug & ILO_DEBUG_3D)
ilo_builder_decode(&hw3d->cp->builder);
}
if (max_len > ilo_cp_space(hw3d->cp)) {
- ilo_cp_flush(hw3d->cp, "out of space");
+ ilo_cp_submit(hw3d->cp, "out of space");
need_flush = false;
assert(max_len <= ilo_cp_space(hw3d->cp));
}
/* don't know why */
if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
- ilo_cp_flush(hw3d->cp, "texture barrier");
+ ilo_cp_submit(hw3d->cp, "texture barrier");
}
static void
ilo_3d_destroy(struct ilo_3d *hw3d);
void
-ilo_3d_cp_flushed(struct ilo_3d *hw3d);
+ilo_3d_cp_submitted(struct ilo_3d *hw3d);
void
ilo_3d_own_render_ring(struct ilo_3d *hw3d);
/* flush and try again */
if (ilo_builder_batch_used(&p->cp->builder)) {
- ilo_cp_flush(p->cp, "out of aperture");
+ ilo_cp_submit(p->cp, "out of aperture");
continue;
}
ILO_3D_PIPELINE_RECTLIST, blitter);
if (max_len > ilo_cp_space(p->cp))
- ilo_cp_flush(p->cp, "out of space");
+ ilo_cp_submit(p->cp, "out of space");
while (true) {
struct ilo_builder_snapshot snapshot;
/* flush and try again */
if (ilo_builder_batch_used(&p->cp->builder)) {
- ilo_cp_flush(p->cp, "out of aperture");
+ ilo_cp_submit(p->cp, "out of aperture");
continue;
}
}
}
if (!ilo_builder_validate(&ilo->cp->builder, count, aper_check))
- ilo_cp_flush(ilo->cp, "out of aperture");
+ ilo_cp_submit(ilo->cp, "out of aperture");
/* set BCS_SWCTRL */
swctrl = 0x0;
max_cmd_size += (4 + 3) * 2;
if (ilo_cp_space(ilo->cp) < max_cmd_size) {
- ilo_cp_flush(ilo->cp, "out of space");
+ ilo_cp_submit(ilo->cp, "out of space");
assert(ilo_cp_space(ilo->cp) >= max_cmd_size);
}
ILO_DEBUG_FS = 1 << 3,
ILO_DEBUG_CS = 1 << 4,
ILO_DEBUG_DRAW = ILO_DEBUG_HOT << 5,
- ILO_DEBUG_FLUSH = 1 << 6,
+ ILO_DEBUG_SUBMIT = 1 << 6,
/* flags that affect the behaviors of the driver */
ILO_DEBUG_NOHW = 1 << 20,
#include "ilo_context.h"
static void
-ilo_context_cp_flushed(struct ilo_cp *cp, void *data)
+ilo_context_cp_submitted(struct ilo_cp *cp, void *data)
{
struct ilo_context *ilo = ilo_context(data);
- ilo_3d_cp_flushed(ilo->hw3d);
+ ilo_3d_cp_submitted(ilo->hw3d);
}
static void
{
struct ilo_context *ilo = ilo_context(pipe);
- ilo_cp_flush(ilo->cp,
+ ilo_cp_submit(ilo->cp,
(flags & PIPE_FLUSH_END_OF_FRAME) ? "frame end" : "user request");
if (f) {
return NULL;
}
- ilo_cp_set_flush_callback(ilo->cp,
- ilo_context_cp_flushed, (void *) ilo);
+ ilo_cp_set_submit_callback(ilo->cp,
+ ilo_context_cp_submitted, (void *) ilo);
ilo->base.screen = screen;
ilo->base.priv = priv;
* Set the parser owner. If this is a new owner or a new ring, the old owner
* is released and the new owner's own() is called.
*
- * The parser may be implicitly flushed if there is a ring change or there is
- * not enough space for the new owner.
+ * The parser may implicitly submit if there is a ring change or there is not
+ * enough space for the new owner.
*/
void
ilo_cp_set_owner(struct ilo_cp *cp, enum intel_ring_type ring,
owner = &ilo_cp_default_owner;
if (cp->ring != ring) {
- ilo_cp_flush(cp, "ring change");
+ ilo_cp_submit(cp, "ring change");
cp->ring = ring;
}
/* multiply by 2 because there are own() and release() */
if (ilo_cp_space(cp) < owner->reserve * 2) {
- ilo_cp_flush(cp, "new owner");
+ ilo_cp_submit(cp, "new owner");
assert(ilo_cp_space(cp) >= owner->reserve * 2);
}
* is empty, the callback is not invoked.
*/
void
-ilo_cp_flush_internal(struct ilo_cp *cp)
+ilo_cp_submit_internal(struct ilo_cp *cp)
{
const bool do_exec = !(ilo_debug & ILO_DEBUG_NOHW);
struct intel_bo *bo;
cp->last_submitted_bo = bo;
intel_bo_reference(cp->last_submitted_bo);
- if (cp->flush_callback)
- cp->flush_callback(cp, cp->flush_callback_data);
+ if (cp->submit_callback)
+ cp->submit_callback(cp, cp->submit_callback_data);
}
ilo_builder_begin(&cp->builder);
struct ilo_shader_cache *shader_cache;
struct intel_context *render_ctx;
- ilo_cp_callback flush_callback;
- void *flush_callback_data;
+ ilo_cp_callback submit_callback;
+ void *submit_callback_data;
enum intel_ring_type ring;
const struct ilo_cp_owner *owner;
ilo_cp_destroy(struct ilo_cp *cp);
void
-ilo_cp_flush_internal(struct ilo_cp *cp);
+ilo_cp_submit_internal(struct ilo_cp *cp);
static inline void
-ilo_cp_flush(struct ilo_cp *cp, const char *reason)
+ilo_cp_submit(struct ilo_cp *cp, const char *reason)
{
- if (ilo_debug & ILO_DEBUG_FLUSH) {
- ilo_printf("cp flushed for %s because of %s: ",
- (cp->ring == INTEL_RING_RENDER) ? "render" : "other", reason);
+ if (ilo_debug & ILO_DEBUG_SUBMIT) {
+ ilo_printf("submit batch buffer to %s ring because of %s: ",
+ (cp->ring == INTEL_RING_RENDER) ? "render" : "unknown", reason);
ilo_builder_batch_print_stats(&cp->builder);
}
- ilo_cp_flush_internal(cp);
+ ilo_cp_submit_internal(cp);
}
void
}
/**
- * Set one-off flags. They will be cleared after flushing.
+ * Set one-off flags. They will be cleared after submission.
*/
static inline void
ilo_cp_set_one_off_flags(struct ilo_cp *cp, unsigned flags)
}
/**
- * Set flush callback. The callback is invoked after the bo has been
- * successfully executed, and before the bo is reallocated.
+ * Set submit callback. The callback is invoked after the bo has been
+ * successfully submitted, and before the bo is reallocated.
*/
static inline void
-ilo_cp_set_flush_callback(struct ilo_cp *cp, ilo_cp_callback callback,
+ilo_cp_set_submit_callback(struct ilo_cp *cp, ilo_cp_callback callback,
void *data)
{
- cp->flush_callback = callback;
- cp->flush_callback_data = data;
+ cp->submit_callback = callback;
+ cp->submit_callback_data = data;
}
#endif /* ILO_CP_H */
if (q->bo) {
if (ilo_builder_has_reloc(&ilo->cp->builder, q->bo))
- ilo_cp_flush(ilo->cp, "syncing for queries");
+ ilo_cp_submit(ilo->cp, "syncing for queries");
if (!wait && intel_bo_is_busy(q->bo))
return false;
{ "fs", ILO_DEBUG_FS, "Dump fragment shaders" },
{ "cs", ILO_DEBUG_CS, "Dump compute shaders" },
{ "draw", ILO_DEBUG_DRAW, "Show draw information" },
- { "flush", ILO_DEBUG_FLUSH, "Show batch buffer flushes" },
+ { "submit", ILO_DEBUG_SUBMIT, "Show batch buffer submissions" },
{ "nohw", ILO_DEBUG_NOHW, "Do not send commands to HW" },
{ "nocache", ILO_DEBUG_NOCACHE, "Always invalidate HW caches" },
{ "nohiz", ILO_DEBUG_NOHIZ, "Disable HiZ" },
}
static bool
-is_bo_busy(struct ilo_context *ilo, struct intel_bo *bo, bool *need_flush)
+is_bo_busy(struct ilo_context *ilo, struct intel_bo *bo, bool *need_submit)
{
const bool referenced = ilo_builder_has_reloc(&ilo->cp->builder, bo);
- if (need_flush)
- *need_flush = referenced;
+ if (need_submit)
+ *need_submit = referenced;
if (referenced)
return true;
choose_transfer_method(struct ilo_context *ilo, struct ilo_transfer *xfer)
{
struct pipe_resource *res = xfer->base.resource;
- bool need_flush;
+ bool need_submit;
if (!resource_get_transfer_method(res, &xfer->base, &xfer->method))
return false;
/* see if we can avoid blocking */
- if (is_bo_busy(ilo, ilo_resource_get_bo(res), &need_flush)) {
+ if (is_bo_busy(ilo, ilo_resource_get_bo(res), &need_submit)) {
bool resource_renamed;
if (!xfer_unblock(xfer, &resource_renamed)) {
if (xfer->base.usage & PIPE_TRANSFER_DONTBLOCK)
return false;
- /* flush to make bo really busy so that map() correctly blocks */
- if (need_flush)
- ilo_cp_flush(ilo->cp, "syncing for transfers");
+ /* submit to make bo really busy and map() correctly blocks */
+ if (need_submit)
+ ilo_cp_submit(ilo->cp, "syncing for transfers");
}
if (resource_renamed)
buf_pwrite(struct ilo_context *ilo, struct ilo_buffer *buf,
unsigned usage, int offset, int size, const void *data)
{
- bool need_flush;
+ bool need_submit;
/* see if we can avoid blocking */
- if (is_bo_busy(ilo, buf->bo, &need_flush)) {
+ if (is_bo_busy(ilo, buf->bo, &need_submit)) {
bool unblocked = false;
if ((usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
}
}
- /* flush to make bo really busy so that pwrite() correctly blocks */
- if (!unblocked && need_flush)
- ilo_cp_flush(ilo->cp, "syncing for pwrites");
+ /* submit to make bo really busy and pwrite() correctly blocks */
+ if (!unblocked && need_submit)
+ ilo_cp_submit(ilo->cp, "syncing for pwrites");
}
intel_bo_pwrite(buf->bo, offset, size, data);