We'll want to change the implementation of this shortly.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
/* If the current batch references our results bo then we need to
* flush first...
*/
- if (drm_bacon_bo_references(brw->batch.bo, bo))
+ if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
case OA_COUNTERS:
return (obj->oa.results_accumulated ||
(obj->oa.bo &&
- !drm_bacon_bo_references(brw->batch.bo, obj->oa.bo) &&
+ !brw_batch_references(&brw->batch, obj->oa.bo) &&
!drm_bacon_bo_busy(obj->oa.bo)));
case PIPELINE_STATS:
return (obj->pipeline_stats.bo &&
- !drm_bacon_bo_references(brw->batch.bo, obj->pipeline_stats.bo) &&
+ !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
!drm_bacon_bo_busy(obj->pipeline_stats.bo));
}
* still contributing to it, flush it now so the results will be present
* when mapped.
*/
- if (drm_bacon_bo_references(brw->batch.bo, query->bo))
+ if (brw_batch_references(&brw->batch, query->bo))
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
* not ready yet on the first time it is queried. This ensures that
* the async query will return true in finite time.
*/
- if (query->bo && drm_bacon_bo_references(brw->batch.bo, query->bo))
+ if (query->bo && brw_batch_references(&brw->batch, query->bo))
intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_bacon_bo_busy(query->bo)) {
* (for example, due to being full). Record that it's been flushed.
*/
query->flushed = query->flushed ||
- !drm_bacon_bo_references(brw->batch.bo, query->bo);
+ !brw_batch_references(&brw->batch, query->bo);
if (!query->flushed)
intel_batchbuffer_flush(brw);
/* If the current batch is still contributing to the number of primitives
* generated, flush it now so the results will be present when mapped.
*/
- if (drm_bacon_bo_references(brw->batch.bo, obj->prim_count_bo))
+ if (brw_batch_references(&brw->batch, obj->prim_count_bo))
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug && drm_bacon_bo_busy(obj->prim_count_bo)))
return ret;
}
+bool
+brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+{
+ return drm_bacon_bo_references(batch->bo, bo);
+}
/* This is the only way buffers get added to the validate list.
*/
const void *data, GLuint bytes,
enum brw_gpu_ring ring);
+bool brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo);
+
uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
drm_bacon_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
busy =
drm_bacon_bo_busy(intel_obj->buffer) ||
- drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer);
+ brw_batch_references(&brw->batch, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
struct brw_context *brw = brw_context(ctx);
assert(intel_obj);
- if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
+ if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
intel_batchbuffer_flush(brw);
}
drm_bacon_bo_get_subdata(intel_obj->buffer, offset, size, data);
* achieve the required synchronization.
*/
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
- if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
+ if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
drm_bacon_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
drm_bacon_bo *bo = mt->bo;
- if (drm_bacon_bo_references(brw->batch.bo, bo))
+ if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
/* brw_bo_map() uses a WB mmaping of the buffer's backing storage. It
bo = irb->mt->bo;
- if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+ if (brw_batch_references(&brw->batch, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
intel_batchbuffer_flush(brw);
}
bo = image->mt->bo;
- if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+ if (brw_batch_references(&brw->batch, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
intel_batchbuffer_flush(brw);
}
bo = image->mt->bo;
- if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+ if (brw_batch_references(&brw->batch, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
intel_batchbuffer_flush(brw);
}