mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int i, ret;
if (!remap_info)
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret, i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ringbuffer *ring = params->request->ringbuf;
+ struct intel_ringbuffer *ring = params->request->ring;
ret = intel_ring_begin(params->request, 4);
if (ret)
unsigned entry,
dma_addr_t addr)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
* Note this requires that we are always called in request
* completion order.
*/
- request->ringbuf->last_retired_head = request->postfix;
+ request->ring->last_retired_head = request->postfix;
i915_gem_request_remove_from_client(request);
bool flush_caches)
{
struct intel_engine_cs *engine;
- struct intel_ringbuffer *ringbuf;
+ struct intel_ringbuffer *ring;
u32 request_start;
u32 reserved_tail;
int ret;
return;
engine = request->engine;
- ringbuf = request->ringbuf;
+ ring = request->ring;
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- request_start = intel_ring_get_tail(ringbuf);
+ request_start = intel_ring_get_tail(ring);
reserved_tail = request->reserved_space;
request->reserved_space = 0;
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request->postfix = intel_ring_get_tail(ringbuf);
+ request->postfix = intel_ring_get_tail(ring);
if (i915.enable_execlists) {
ret = engine->emit_request(request);
} else {
ret = engine->add_request(request);
- request->tail = intel_ring_get_tail(ringbuf);
+ request->tail = intel_ring_get_tail(ring);
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
/* Sanity check that the reserved size was large enough. */
- ret = intel_ring_get_tail(ringbuf) - request_start;
+ ret = intel_ring_get_tail(ring) - request_start;
if (ret < 0)
- ret += ringbuf->size;
+ ret += ring->size;
WARN_ONCE(ret > reserved_tail,
"Not enough space reserved (%d bytes) "
"for adding the request (%d bytes)\n",
*/
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
- struct intel_ringbuffer *ringbuf;
+ struct intel_ringbuffer *ring;
struct intel_signal_node signaling;
/** GEM sequence number associated with the previous request,
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
- struct intel_ringbuffer *rb;
+ struct intel_ringbuffer *ring;
vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
if (HAS_BROKEN_CS_TLB(dev_priv))
ee->wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
- engine->scratch.obj);
+ engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
- rb = request->ringbuf;
- ee->cpu_ring_head = rb->head;
- ee->cpu_ring_tail = rb->tail;
+ ring = request->ring;
+ ee->cpu_ring_head = ring->head;
+ ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
i915_error_ggtt_object_create(dev_priv,
- rb->obj);
+ ring->obj);
}
ee->hws_page =
i915_error_ggtt_object_create(dev_priv,
engine->status_page.obj);
- if (engine->wa_ctx.obj) {
- ee->wa_ctx =
- i915_error_ggtt_object_create(dev_priv,
- engine->wa_ctx.obj);
- }
+ ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
+ engine->wa_ctx.obj);
i915_gem_record_active_context(engine, error, ee);
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
return ret;
}
- request->ringbuf = ce->ringbuf;
+ request->ring = ce->ringbuf;
if (i915.enable_guc_submission) {
/*
static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
+ struct intel_ringbuffer *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
- intel_ring_advance(ringbuf);
- request->tail = ringbuf->tail;
+ intel_ring_advance(ring);
+ request->tail = ring->tail;
/*
* Here we add two extra NOOPs as padding to avoid
*
* Caller must reserve WA_TAIL_DWORDS for us!
*/
- intel_ring_emit(ringbuf, MI_NOOP);
- intel_ring_emit(ringbuf, MI_NOOP);
- intel_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
+ struct intel_ringbuffer *ring = params->request->ring;
u64 exec_start;
int instp_mode;
u32 instp_mask;
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+ if (instp_mode != 0 && engine->id != RCS) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (ret)
return ret;
- if (engine == &dev_priv->engine[RCS] &&
+ if (engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
- intel_ring_emit(ringbuf, MI_NOOP);
- intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ringbuf, INSTPM);
- intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
- intel_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
dev_priv->relative_constants_mode = instp_mode;
}
{
int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
if (ret)
return ret;
- intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ringbuf, w->reg[i].addr);
- intel_ring_emit(ringbuf, w->reg[i].value);
+ intel_ring_emit_reg(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
}
- intel_ring_emit(ringbuf, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ringbuf);
+ intel_ring_advance(ring);
engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
u32 invalidate_domains,
u32 unused)
{
- struct intel_ringbuffer *ring = request->ringbuf;
- struct intel_engine_cs *engine = ring->engine;
+ struct intel_ringbuffer *ring = request->ring;
uint32_t cmd;
int ret;
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
- if (engine->id == VCS)
+ if (request->engine->id == VCS)
cmd |= MI_INVALIDATE_BSD;
}
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = request->ringbuf;
+ struct intel_ringbuffer *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ring = request->ringbuf;
+ struct intel_ringbuffer *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ring = request->ringbuf;
+ struct intel_ringbuffer *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
if (ret)
return ret;
- intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) {
- intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
- intel_ring_emit(ringbuf, table->table[index].control_value);
+ intel_ring_emit_reg(ring, mocs_register(engine, index));
+ intel_ring_emit(ring, table->table[index].control_value);
}
/*
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
- intel_ring_emit(ringbuf, table->table[0].control_value);
+ intel_ring_emit_reg(ring, mocs_register(engine, index));
+ intel_ring_emit(ring, table->table[0].control_value);
}
- intel_ring_emit(ringbuf, MI_NOOP);
- intel_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
unsigned int i;
int ret;
if (ret)
return ret;
- intel_ring_emit(ringbuf,
+ intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
for (i = 0; i < table->size/2; i++) {
- intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
i++;
}
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
- intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 0, 0));
}
- intel_ring_emit(ringbuf, MI_NOOP);
- intel_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
overlay->active = true;
- ring = req->ringbuf;
+ ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
return ret;
}
- ring = req->ringbuf;
+ ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
return ret;
}
- ring = req->ringbuf;
+ ring = req->ring;
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
return ret;
}
- ring = req->ringbuf;
+ ring = req->ring;
intel_ring_emit(ring,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 cmd;
int ret;
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 cmd;
int ret;
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0;
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 scratch_addr =
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0;
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 6);
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
int ret, i;
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
- struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+ struct intel_ringbuffer *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
- struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+ struct intel_ringbuffer *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
- struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+ struct intel_ringbuffer *signaller = signaller_req->ring;
struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
gen6_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
if (engine->semaphore.signal)
gen8_render_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
if (engine->semaphore.signal)
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+ struct intel_ringbuffer *waiter = waiter_req->ring;
struct drm_i915_private *dev_priv = waiter_req->i915;
u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
struct i915_hw_ppgtt *ppgtt;
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+ struct intel_ringbuffer *waiter = waiter_req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
static int
i9xx_add_request(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
u64 offset, u32 length,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
u32 cs_offset = req->engine->scratch.gtt_offset;
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- request->ringbuf = request->engine->buffer;
+ request->ring = request->engine->buffer;
ret = intel_ring_begin(request, 0);
if (ret)
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *target;
- intel_ring_update_space(ringbuf);
- if (ringbuf->space >= bytes)
+ intel_ring_update_space(ring);
+ if (ring->space >= bytes)
return 0;
/*
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
- if (target->ringbuf != ringbuf)
+ if (target->ring != ring)
continue;
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
+ space = __intel_ring_space(target->postfix, ring->tail,
+ ring->size);
if (space >= bytes)
break;
}
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ringbuffer *ring = req->ring;
+ int remain_actual = ring->size - ring->tail;
+ int remain_usable = ring->effective_size - ring->tail;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
wait_bytes = total_bytes;
}
- if (wait_bytes > ringbuf->space) {
+ if (wait_bytes > ring->space) {
int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
- intel_ring_update_space(ringbuf);
- if (unlikely(ringbuf->space < wait_bytes))
+ intel_ring_update_space(ring);
+ if (unlikely(ring->space < wait_bytes))
return -EAGAIN;
}
if (unlikely(need_wrap)) {
- GEM_BUG_ON(remain_actual > ringbuf->space);
- GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+ GEM_BUG_ON(remain_actual > ring->space);
+ GEM_BUG_ON(ring->tail + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
- ringbuf->tail = 0;
- ringbuf->space -= remain_actual;
+ memset(ring->vaddr + ring->tail, 0, remain_actual);
+ ring->tail = 0;
+ ring->space -= remain_actual;
}
- ringbuf->space -= bytes;
- GEM_BUG_ON(ringbuf->space < 0);
+ ring->space -= bytes;
+ GEM_BUG_ON(ring->space < 0);
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int num_dwords =
(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
uint32_t cmd;
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ringbuffer *ring = req->ring;
uint32_t cmd;
int ret;