i915_gem.o \
i915_gem_object.o \
i915_gem_render_state.o \
- i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_request.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
return 0;
}
-static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+static inline bool is_gvt_request(struct i915_request *req)
{
return i915_gem_context_force_single_submission(req->ctx);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct i915_request *req = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
- workload->req = i915_gem_request_get(rq);
+ workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request(workload->req);
+ i915_request_add(workload->req);
workload->dispatched = true;
}
workload->status = 0;
}
- i915_gem_request_put(fetch_and_zero(&workload->req));
+ i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
- struct drm_i915_gem_request *req;
+ struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadowed;
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct task_struct *task;
mutex_lock(&dev->struct_mutex);
* Therefore, we need to protect this ->comm access using RCU.
*/
request = list_first_entry_or_null(&file_priv->mm.request_list,
- struct drm_i915_gem_request,
+ struct i915_request,
client_link);
rcu_read_lock();
task = pid_task(request && request->ctx->pid ?
I915_WAIT_LOCKED);
if (val & DROP_RETIRE)
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
/*
* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
- * by the GPU. i915_gem_retire_requests() is called directly when we
+ * by the GPU. i915_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
intel_gpu_reset(i915, ALL_ENGINES);
goto finish;
}
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_request.h"
#include "i915_vma.h"
#include "intel_gvt.h"
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_gem_request_alloc(), this bit is checked and the sequence
+ * i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
*/
unsigned long flags;
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
return READ_ONCE(error->reset_engine_count[engine->id]);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
}
static inline bool
-__i915_request_irq_complete(const struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 seqno;
/* Note that the engine may have wrapped around the seqno, and
* this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return true;
/* The request was dequeued before we were awoken. We check after
* the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request.
*/
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
wake_up_process(b->irq_wait->tsk);
spin_unlock_irq(&b->irq_lock);
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
}
long timeout,
struct intel_rps_client *rps_client)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
timeout);
rq = to_request(fence);
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
goto out;
/*
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps_client && !i915_gem_request_started(rq)) {
+ if (rps_client && !i915_request_started(rq)) {
if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps_client);
}
- timeout = i915_wait_request(rq, flags, timeout);
+ timeout = i915_request_wait(rq, flags, timeout);
out:
- if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
- i915_gem_request_retire_upto(rq);
+ if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
+ i915_request_retire_upto(rq);
return timeout;
}
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
atomic_inc(&ctx->active_count);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *active = NULL;
+ struct i915_request *request, *active = NULL;
unsigned long flags;
/* We are called by the error capture and reset at a random
*/
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) {
- if (__i915_gem_request_completed(request,
- request->global_seqno))
+ if (__i915_request_completed(request, request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request = NULL;
+ struct i915_request *request = NULL;
/*
* During the reset sequence, we must prevent the engine from
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
enum intel_engine_id id;
int err = 0;
return err;
}
-static void skip_request(struct drm_i915_gem_request *request)
+static void skip_request(struct i915_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
dma_fence_set_error(&request->fence, -EIO);
}
-static void engine_skip_context(struct drm_i915_gem_request *request)
+static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->ctx;
}
/* Returns the request if it was guilty of the hang */
-static struct drm_i915_gem_request *
+static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* The guilty request will get skipped on a hung engine.
*
}
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/*
* Make sure this write is visible before we re-enable the interrupt
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
* empty request appears sufficient to paper over the glitch.
*/
if (intel_engine_is_idle(engine)) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
- rq = i915_gem_request_alloc(engine,
- dev_priv->kernel_context);
+ rq = i915_request_alloc(engine,
+ dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
}
}
}
}
-static void nop_submit_request(struct drm_i915_gem_request *request)
+static void nop_submit_request(struct i915_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
}
-static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+static void nop_complete_submit_request(struct i915_request *request)
{
unsigned long flags;
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
*/
list_for_each_entry(tl, &i915->gt.timelines, link) {
for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = i915_gem_active_peek(&tl->engine[i].last_request,
&i915->drm.struct_mutex);
/* Come back later if the device is busy... */
if (mutex_trylock(&dev->struct_mutex)) {
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
if (ret)
return ret;
}
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
ret = wait_for_engines(i915);
} else {
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct drm_i915_gem_request *request, *target = NULL;
+ struct i915_request *request, *target = NULL;
long ret;
/* ABI: return -EIO if already wedged */
target = request;
}
if (target)
- i915_gem_request_get(target);
+ i915_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
- ret = i915_wait_request(target,
+ ret = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(target);
+ i915_request_put(target);
return ret < 0 ? ret : 0;
}
__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
/* We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct drm_i915_gem_request, fence);
- if (i915_gem_request_completed(rq))
+ rq = container_of(fence, struct i915_request, fence);
+ if (i915_request_completed(rq))
return 0;
return flag(rq->engine->uabi_id);
}
static void
-frontbuffer_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
return PTR_ERR(ctx);
for_each_engine(engine, i915, id) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ctx;
if (engine->init_context)
err = engine->init_context(rq);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
if (err)
goto err_active;
}
if (!dev_priv->luts)
goto err_vmas;
- dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ dev_priv->requests = KMEM_CACHE(i915_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
if (!reservation_object_test_signaled_rcu(resv, true))
break;
- i915_gem_retire_requests(pool->engine->i915);
+ i915_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
/*
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
if (engine_has_idle_kernel_context(engine))
continue;
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
}
* but an extra layer of paranoia before we declare the system
* idle (on suspend etc) is advisable!
*/
- __i915_add_request(req, true);
+ __i915_request_add(rq, true);
}
return 0;
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
-int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_release(struct kref *ctx_ref);
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
else
phases[1] = NULL;
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(vm->i915);
+ i915_retire_requests(vm->i915);
check_color = vm->mm.color_adjust;
if (check_color) {
struct i915_gem_context *ctx; /** context for building the request */
struct i915_address_space *vm; /** GTT and vma for the request */
- struct drm_i915_gem_request *request; /** our request to build */
+ struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
/** actual size of execobj[] as we may extend it for the cmdparser */
bool has_fence : 1;
bool needs_unfenced : 1;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
} reloc_cache;
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_add_request(cache->rq, true);
+ __i915_request_add(cache->rq, true);
cache->rq = NULL;
}
{
struct reloc_cache *cache = &eb->reloc_cache;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
if (err)
goto err_unmap;
- rq = i915_gem_request_alloc(eb->engine, eb->ctx);
+ rq = i915_request_alloc(eb->engine, eb->ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = i915_gem_request_await_object(rq, vma->obj, true);
+ err = i915_request_await_object(rq, vma->obj, true);
if (err)
goto err_request;
return 0;
err_request:
- i915_add_request(rq);
+ i915_request_add(rq);
err_unpin:
i915_vma_unpin(batch);
err_unmap:
}
static void eb_export_fence(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
*/
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &req->fence);
+ reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
+ reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
struct drm_i915_gem_object *obj = vma->obj;
if (flags & EXEC_OBJECT_CAPTURE) {
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
if (unlikely(!capture))
if (flags & EXEC_OBJECT_ASYNC)
continue;
- err = i915_gem_request_await_object
+ err = i915_request_await_object
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
if (err)
return err;
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = req->engine->id;
+ const unsigned int idx = rq->engine->id;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/*
if (!i915_vma_is_active(vma))
obj->active_count++;
i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
+ i915_gem_active_set(&vma->last_read[idx], rq);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
obj->write_domain = 0;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, req);
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
obj->read_domains = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, req);
+ i915_gem_active_set(&vma->last_fence, rq);
}
-static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
+static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
int i;
- if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
+ if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- cs = intel_ring_begin(req, 4 * 2 + 2);
+ cs = intel_ring_begin(rq, 4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = 0;
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
}
static void
-add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
+add_to_client(struct i915_request *rq, struct drm_file *file)
{
- req->file_priv = file->driver_priv;
- list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
+ rq->file_priv = file->driver_priv;
+ list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
}
static int eb_submit(struct i915_execbuffer *eb)
if (!fence)
return -EINVAL;
- err = i915_gem_request_await_dma_fence(eb->request, fence);
+ err = i915_request_await_dma_fence(eb->request, fence);
dma_fence_put(fence);
if (err < 0)
return err;
GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
+ eb.request = i915_request_alloc(eb.engine, eb.ctx);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
}
if (in_fence) {
- err = i915_gem_request_await_dma_fence(eb.request, in_fence);
+ err = i915_request_await_dma_fence(eb.request, in_fence);
if (err < 0)
goto err_request;
}
*/
eb.request->batch = eb.batch;
- trace_i915_gem_request_queue(eb.request, eb.batch_flags);
+ trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_add_request(eb.request, err == 0);
+ __i915_request_add(eb.request, err == 0);
add_to_client(eb.request, file);
if (fences)
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct drm_i915_gem_request *req,
+static int gen8_write_pdp(struct i915_request *rq,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
BUG_ON(entry >= 4);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
int i, ret;
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- ret = gen8_write_pdp(req, i, pd_daddr);
+ ret = gen8_write_pdp(rq, i, pd_daddr);
if (ret)
return ret;
}
}
static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+ return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *dev_priv = rq->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
-#include "i915_gem_request.h"
+
+#include "i915_request.h"
#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE_4K BIT(12)
gen6_pte_t __iomem *pd_addr;
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
#include <drm/i915_drm.h>
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_i915_gem_object;
#undef OUT_BATCH
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
+int i915_gem_render_state_emit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_render_state so = {}; /* keep the compiler happy */
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
-struct drm_i915_gem_request;
+struct i915_request;
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
+int i915_gem_render_state_emit(struct i915_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */
i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
trace_i915_gem_shrink(i915, target, flags);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
shrinker_unlock(i915, unlock);
#include <linux/list.h>
-#include "i915_utils.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_syncmap.h"
+#include "i915_utils.h"
struct i915_gem_timeline;
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->global_seqno : 0;
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->engine->id : -1;
}
}
-static void record_request(struct drm_i915_gem_request *request,
+static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
}
static void engine_record_requests(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *first,
+ struct i915_request *first,
struct drm_i915_error_engine *ee)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int count;
count = 0;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct i915_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
e->active = atomic_read(&ctx->active_count);
}
-static void request_record_user_bo(struct drm_i915_gem_request *request,
+static void request_record_user_bo(struct i915_request *request,
struct drm_i915_error_engine *ee)
{
- struct i915_gem_capture_list *c;
+ struct i915_capture_list *c;
struct drm_i915_error_object **bo;
long count;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
ee->engine_id = -1;
static void notify_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq = NULL;
+ struct i915_request *rq = NULL;
struct intel_wait *wait;
if (!engine->breadcrumbs.irq_armed)
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
wait->seqno)) {
- struct drm_i915_gem_request *waiter = wait->request;
+ struct i915_request *waiter = wait->request;
wakeup = true;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
- rq = i915_gem_request_get(waiter);
+ rq = i915_request_get(waiter);
}
if (wakeup)
if (rq) {
dma_fence_signal(&rq->fence);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
}
trace_intel_engine_notify(engine, wait);
* Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
* is only used by the kernel context.
*/
-static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
+static int gen8_emit_oa_config(struct i915_request *rq,
const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
u32 *cs;
int i;
- cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
+ cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_timeline *timeline;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- ret = gen8_emit_oa_config(req, oa_config);
+ ret = gen8_emit_oa_config(rq, oa_config);
if (ret) {
- i915_add_request(req);
+ i915_request_add(rq);
return ret;
}
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
GFP_KERNEL);
}
- i915_add_request(req);
+ i915_request_add(rq);
return 0;
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* The timeline struct (as part of the ppgtt underneath a context)
+ /*
+ * The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
* We could extend the life of a context to beyond that of all
* fences, possibly keeping the hw resource around indefinitely,
static bool i915_fence_signaled(struct dma_fence *fence)
{
- return i915_gem_request_completed(to_request(fence));
+ return i915_request_completed(to_request(fence));
}
static bool i915_fence_enable_signaling(struct dma_fence *fence)
bool interruptible,
signed long timeout)
{
- return i915_wait_request(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence), interruptible, timeout);
}
static void i915_fence_release(struct dma_fence *fence)
{
- struct drm_i915_gem_request *req = to_request(fence);
+ struct i915_request *rq = to_request(fence);
- /* The request is put onto a RCU freelist (i.e. the address
+ /*
+ * The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
* Otherwise the debugobjects for the fences are only marked as
* freed when the slab cache itself is freed, and so we would get
* caught trying to reuse dead objects.
*/
- i915_sw_fence_fini(&req->submit);
+ i915_sw_fence_fini(&rq->submit);
- kmem_cache_free(req->i915->requests, req);
+ kmem_cache_free(rq->i915->requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
};
static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+i915_request_remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
- /* HWS page needs to be set less than what we
- * will inject to ring
- */
- return reset_all_global_seqno(dev_priv, seqno - 1);
+ /* HWS page needs to be set less than what we will inject to ring */
+ return reset_all_global_seqno(i915, seqno - 1);
}
static void mark_busy(struct drm_i915_private *i915)
}
void i915_gem_retire_noop(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* Space left intentionally blank */
}
-static void advance_ring(struct drm_i915_gem_request *request)
+static void advance_ring(struct i915_request *request)
{
unsigned int tail;
- /* We know the GPU must have read the request to have
+ /*
+ * We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
* completion order.
*/
if (list_is_last(&request->ring_link, &request->ring->request_list)) {
- /* We may race here with execlists resubmitting this request
+ /*
+ * We may race here with execlists resubmitting this request
* as we retire it. The resubmission will move the ring->tail
* forwards (to request->wa_tail). We either read the
* current value that was written to hw, or the value that
request->ring->head = tail;
}
-static void free_capture_list(struct drm_i915_gem_request *request)
+static void free_capture_list(struct i915_request *request)
{
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = request->capture_list;
while (capture) {
- struct i915_gem_capture_list *next = capture->next;
+ struct i915_capture_list *next = capture->next;
kfree(capture);
capture = next;
}
}
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+static void i915_request_retire(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!i915_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests);
- trace_i915_gem_request_retire(request);
+ trace_i915_request_retire(request);
spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
free_capture_list(request);
- /* Walk through the active list, calling retire on each. This allows
+ /*
+ * Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
* when their *last* active request is completed (updating state
* tracking lists for eviction, active references for GEM, etc).
* the node after the callback).
*/
list_for_each_entry_safe(active, next, &request->active_list, link) {
- /* In microbenchmarks or focusing upon time inside the kernel,
+ /*
+ * In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
active->retire(active, request);
}
- i915_gem_request_remove_from_client(request);
+ i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->ctx->ban_score);
- /* The backing object for the context is done after switching to the
+ /*
+ * The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. However, since we
- * cannot take the required locks at i915_gem_request_submit() we
+ * cannot take the required locks at i915_request_submit() we
* defer the unpinning of the active context to now, retirement of
* the subsequent request.
*/
spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
- i915_gem_request_put(request);
+ i915_request_put(request);
}
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *tmp;
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_request *tmp;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_gem_request_completed(req));
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&req->link))
+ if (list_empty(&rq->link))
return;
do {
tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
- i915_gem_request_retire(tmp);
- } while (tmp != req);
+ i915_request_retire(tmp);
+ } while (tmp != rq);
}
static u32 timeline_get_seqno(struct intel_timeline *tl)
return ++tl->seqno;
}
-void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
- trace_i915_gem_request_execute(request);
+ trace_i915_request_execute(request);
wake_up_all(&request->execute);
}
-void i915_gem_request_submit(struct drm_i915_gem_request *request)
+void i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- /* Only unwind in reverse order, required so that the per-context list
+ /*
+ * Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
GEM_BUG_ON(!request->global_seqno);
list_move(&request->link, &timeline->requests);
spin_unlock(&timeline->lock);
- /* We don't need to wake_up any waiters on request->execute, they
+ /*
+ * We don't need to wake_up any waiters on request->execute, they
* will get woken by any other event or us re-adding this request
- * to the engine timeline (__i915_gem_request_submit()). The waiters
+ * to the engine timeline (__i915_request_submit()). The waiters
* should be quite adapt at finding that the request now has a new
* global_seqno to the one they went to sleep on.
*/
}
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_unsubmit(request);
+ __i915_request_unsubmit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct drm_i915_gem_request *request =
+ struct i915_request *request =
container_of(fence, typeof(*request), submit);
switch (state) {
case FENCE_COMPLETE:
- trace_i915_gem_request_submit(request);
+ trace_i915_request_submit(request);
/*
- * We need to serialize use of the submit_request() callback with its
- * hotplugging performed during an emergency i915_gem_set_wedged().
- * We use the RCU mechanism to mark the critical section in order to
- * force i915_gem_set_wedged() to wait until the submit_request() is
- * completed before proceeding.
+ * We need to serialize use of the submit_request() callback
+ * with its hotplugging performed during an emergency
+ * i915_gem_set_wedged(). We use the RCU mechanism to mark the
+ * critical section in order to force i915_gem_set_wedged() to
+ * wait until the submit_request() is completed before
+ * proceeding.
*/
rcu_read_lock();
request->engine->submit_request(request);
break;
case FENCE_FREE:
- i915_gem_request_put(request);
+ i915_request_put(request);
break;
}
}
/**
- * i915_gem_request_alloc - allocate a request structure
+ * i915_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+struct i915_request *
+i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *req;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq;
struct intel_ring *ring;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/*
* Preempt contexts are reserved for exclusive use to inject a
* preemption context switch. They are never to be used for any trivial
* request!
*/
- GEM_BUG_ON(ctx == dev_priv->preempt_context);
+ GEM_BUG_ON(ctx == i915->preempt_context);
- /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(&i915->gpu_error))
return ERR_PTR(-EIO);
- /* Pinning the contexts may generate requests in order to acquire
+ /*
+ * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
goto err_unreserve;
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->timeline->requests,
- typeof(*req), link);
- if (req && i915_gem_request_completed(req))
- i915_gem_request_retire(req);
+ rq = list_first_entry_or_null(&engine->timeline->requests,
+ typeof(*rq), link);
+ if (rq && i915_request_completed(rq))
+ i915_request_retire(rq);
- /* Beware: Dragons be flying overhead.
+ /*
+ * Beware: Dragons be flying overhead.
*
* We use RCU to look up requests in flight. The lookups may
* race with the request being allocated from the slab freelist.
*
* Do not use kmem_cache_zalloc() here!
*/
- req = kmem_cache_alloc(dev_priv->requests,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(!req)) {
+ rq = kmem_cache_alloc(i915->requests,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(!rq)) {
/* Ratelimit ourselves to prevent oom from malicious clients */
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
I915_WAIT_INTERRUPTIBLE);
if (ret)
* Having already penalized the client to stall, we spend
* a little extra time to re-optimise page allocation.
*/
- kmem_cache_shrink(dev_priv->requests);
+ kmem_cache_shrink(i915->requests);
rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
- req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req) {
+ rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
- GEM_BUG_ON(req->timeline == engine->timeline);
+ rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(rq->timeline == engine->timeline);
- spin_lock_init(&req->lock);
- dma_fence_init(&req->fence,
+ spin_lock_init(&rq->lock);
+ dma_fence_init(&rq->fence,
&i915_fence_ops,
- &req->lock,
- req->timeline->fence_context,
- timeline_get_seqno(req->timeline));
+ &rq->lock,
+ rq->timeline->fence_context,
+ timeline_get_seqno(rq->timeline));
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
- init_waitqueue_head(&req->execute);
+ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ init_waitqueue_head(&rq->execute);
- i915_priotree_init(&req->priotree);
+ i915_priotree_init(&rq->priotree);
- INIT_LIST_HEAD(&req->active_list);
- req->i915 = dev_priv;
- req->engine = engine;
- req->ctx = ctx;
- req->ring = ring;
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->i915 = i915;
+ rq->engine = engine;
+ rq->ctx = ctx;
+ rq->ring = ring;
/* No zalloc, must clear what we need by hand */
- req->global_seqno = 0;
- req->signaling.wait.seqno = 0;
- req->file_priv = NULL;
- req->batch = NULL;
- req->capture_list = NULL;
- req->waitboost = false;
+ rq->global_seqno = 0;
+ rq->signaling.wait.seqno = 0;
+ rq->file_priv = NULL;
+ rq->batch = NULL;
+ rq->capture_list = NULL;
+ rq->waitboost = false;
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
- * i915_add_request() call can't fail. Note that the reserve may need
+ * i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
- req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
/*
* Record the position of the start of the request so that
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->emit;
+ rq->head = rq->ring->emit;
/* Unconditionally invalidate GPU caches and TLBs. */
- ret = engine->emit_flush(req, EMIT_INVALIDATE);
+ ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_unwind;
- ret = engine->request_alloc(req);
+ ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
/* Check that we didn't interrupt ourselves with a new request */
- GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
- return req;
+ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ return rq;
err_unwind:
- req->ring->emit = req->head;
+ rq->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&req->active_list));
- GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
- GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+ GEM_BUG_ON(!list_empty(&rq->active_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
- kmem_cache_free(dev_priv->requests, req);
+ kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_engine(engine);
err_unpin:
}
static int
-i915_gem_request_await_request(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from)
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_gem_request_completed(from))
+ if (i915_request_completed(from))
return 0;
if (to->engine->schedule) {
GEM_BUG_ON(!from->engine->semaphore.signal);
- seqno = i915_gem_request_global_seqno(from);
+ seqno = i915_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
}
int
-i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence)
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
- /* Note that if the fence-array was created in signal-on-any mode,
+ /*
+ * Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
* we don't currently store which mode the fence-array is operating
* in. Fortunately, the only user of signal-on-any is private to
/*
* Requests on the same timeline are explicitly ordered, along
- * with their dependencies, by i915_add_request() which ensures
+ * with their dependencies, by i915_request_add() which ensures
* that requests are submitted in-order through each ring.
*/
- if (fence->context == req->fence.context)
+ if (fence->context == rq->fence.context)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != req->i915->mm.unordered_timeline &&
- intel_timeline_sync_is_later(req->timeline, fence))
+ if (fence->context != rq->i915->mm.unordered_timeline &&
+ intel_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
- ret = i915_gem_request_await_request(req,
- to_request(fence));
+ ret = i915_request_await_request(rq, to_request(fence));
else
- ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
+ ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != req->i915->mm.unordered_timeline)
- intel_timeline_sync_set(req->timeline, fence);
+ if (fence->context != rq->i915->mm.unordered_timeline)
+ intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
}
/**
- * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ * i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
* @obj: object which may be in use on another ring.
* @write: whether the wait is on behalf of a writer
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
- struct drm_i915_gem_object *obj,
- bool write)
+i915_request_await_object(struct i915_request *to,
+ struct drm_i915_gem_object *obj,
+ bool write)
{
struct dma_fence *excl;
int ret = 0;
return ret;
for (i = 0; i < count; i++) {
- ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ ret = i915_request_await_dma_fence(to, shared[i]);
if (ret)
break;
if (excl) {
if (ret == 0)
- ret = i915_gem_request_await_dma_fence(to, excl);
+ ret = i915_request_await_dma_fence(to, excl);
dma_fence_put(excl);
}
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+void __i915_request_add(struct i915_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
- trace_i915_gem_request_add(request);
+ trace_i915_request_add(request);
/*
* Make sure that no request gazumped us - if it was allocated after
- * our i915_gem_request_alloc() and called __i915_add_request() before
+ * our i915_request_alloc() and called __i915_request_add() before
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev && !i915_gem_request_completed(prev)) {
+ if (prev && !i915_request_completed(prev)) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_gem_request_completed(prev))
- i915_gem_request_retire_upto(prev);
+ if (prev && i915_request_completed(prev))
+ i915_request_retire_upto(prev);
}
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
- /* Cheaply and approximately convert from nanoseconds to microseconds.
+ /*
+ * Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+static bool __i915_spin_request(const struct i915_request *rq,
u32 seqno, int state, unsigned long timeout_us)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
unsigned int irq, cpu;
GEM_BUG_ON(!seqno);
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
return false;
- /* When waiting for high frequency requests, e.g. during synchronous
+ /*
+ * When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
- return seqno == i915_gem_request_global_seqno(req);
+ return seqno == i915_request_global_seqno(rq);
- /* Seqno are meant to be ordered *before* the interrupt. If
+ /*
+ * Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
return false;
}
-static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
+static bool __i915_wait_request_check_and_reset(struct i915_request *request)
{
if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
return false;
/**
* i915_wait_request - wait until execution of request has finished
- * @req: the request to wait upon
+ * @rq: the request to wait upon
* @flags: how to wait
* @timeout: how long to wait in jiffies
*
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
*/
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+ wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
DEFINE_WAIT_FUNC(reset, default_wake_function);
DEFINE_WAIT_FUNC(exec, default_wake_function);
struct intel_wait wait;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
GEM_BUG_ON(timeout < 0);
- if (i915_gem_request_completed(req))
+ if (i915_request_completed(rq))
return timeout;
if (!timeout)
return -ETIME;
- trace_i915_gem_request_wait_begin(req, flags);
+ trace_i915_request_wait_begin(rq, flags);
- add_wait_queue(&req->execute, &exec);
+ add_wait_queue(&rq->execute, &exec);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, req);
+ intel_wait_init(&wait, rq);
restart:
do {
set_current_state(state);
- if (intel_wait_update_request(&wait, req))
+ if (intel_wait_update_request(&wait, rq))
break;
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
if (signal_pending_state(state, current)) {
} while (1);
GEM_BUG_ON(!intel_wait_has_seqno(&wait));
- GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
/* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(req, wait.seqno, state, 5))
+ if (__i915_spin_request(rq, wait.seqno, state, 5))
goto complete;
set_current_state(state);
- if (intel_engine_add_wait(req->engine, &wait))
- /* In order to check that we haven't missed the interrupt
+ if (intel_engine_add_wait(rq->engine, &wait))
+ /*
+ * In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
* coherent check on the seqno before we sleep.
*/
goto wakeup;
if (flags & I915_WAIT_LOCKED)
- __i915_wait_request_check_and_reset(req);
+ __i915_wait_request_check_and_reset(rq);
for (;;) {
if (signal_pending_state(state, current)) {
timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait) &&
- intel_wait_check_request(&wait, req))
+ intel_wait_check_request(&wait, rq))
break;
set_current_state(state);
wakeup:
- /* Carefully check if the request is complete, giving time
+ /*
+ * Carefully check if the request is complete, giving time
* for the seqno to be visible following the interrupt.
* We also have to check in case we are kicked by the GPU
* reset in order to drop the struct_mutex.
*/
- if (__i915_request_irq_complete(req))
+ if (__i915_request_irq_complete(rq))
break;
- /* If the GPU is hung, and we hold the lock, reset the GPU
+ /*
+ * If the GPU is hung, and we hold the lock, reset the GPU
* and then check for completion. On a full reset, the engine's
* HW seqno will be advanced passed us and we are complete.
* If we do a partial reset, we have to wait for the GPU to
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
/* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(req, wait.seqno, state, 2))
+ if (__i915_spin_request(rq, wait.seqno, state, 2))
break;
- if (!intel_wait_check_request(&wait, req)) {
- intel_engine_remove_wait(req->engine, &wait);
+ if (!intel_wait_check_request(&wait, rq)) {
+ intel_engine_remove_wait(rq->engine, &wait);
goto restart;
}
}
- intel_engine_remove_wait(req->engine, &wait);
+ intel_engine_remove_wait(rq->engine, &wait);
complete:
__set_current_state(TASK_RUNNING);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(errq, &reset);
- remove_wait_queue(&req->execute, &exec);
- trace_i915_gem_request_wait_end(req);
+ remove_wait_queue(&rq->execute, &exec);
+ trace_i915_request_wait_end(rq);
return timeout;
}
static void engine_retire_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *next;
+ struct i915_request *request, *next;
u32 seqno = intel_engine_get_seqno(engine);
LIST_HEAD(retire);
spin_unlock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next, &retire, link)
- i915_gem_request_retire(request);
+ i915_request_retire(request);
}
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- if (!dev_priv->gt.active_requests)
+ if (!i915->gt.active_requests)
return;
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, i915, id)
engine_retire_requests(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
-#include "selftests/i915_gem_request.c"
+#include "selftests/i915_request.c"
#endif
/*
- * Copyright © 2008-2015 Intel Corporation
+ * Copyright © 2008-2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
*
*/
-#ifndef I915_GEM_REQUEST_H
-#define I915_GEM_REQUEST_H
+#ifndef I915_REQUEST_H
+#define I915_REQUEST_H
#include <linux/dma-fence.h>
struct drm_file;
struct drm_i915_gem_object;
-struct drm_i915_gem_request;
+struct i915_request;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
u32 seqno;
};
#define I915_DEPENDENCY_ALLOC BIT(0)
};
-/* Requests exist in a complex web of interdependencies. Each request
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
* has to wait for some other request to complete before it is ready to be run
* (e.g. we have to wait until the pixels have been rendering into a texture
* before we can copy from it). We track the readiness of a request in terms
I915_PRIORITY_INVALID = INT_MIN
};
-struct i915_gem_capture_list {
- struct i915_gem_capture_list *next;
+struct i915_capture_list {
+ struct i915_capture_list *next;
struct i915_vma *vma;
};
*
* The requests are reference counted.
*/
-struct drm_i915_gem_request {
+struct i915_request {
struct dma_fence fence;
spinlock_t lock;
* it persists while any request is linked to it. Requests themselves
* are also refcounted, so the request will only be freed when the last
* reference to it is dismissed, and the code in
- * i915_gem_request_free() will then decrement the refcount on the
+ * i915_request_free() will then decrement the refcount on the
* context.
*/
struct i915_gem_context *ctx;
struct intel_timeline *timeline;
struct intel_signal_node signaling;
- /* Fences for the various phases in the request's lifetime.
+ /*
+ * Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run.
wait_queue_entry_t submitq;
wait_queue_head_t execute;
- /* A list of everyone we wait upon, and everyone who waits upon us.
+ /*
+ * A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the
* submit fence is signaled (it waits for all external events as well
* as our own requests), the scheduler still needs to know the
struct i915_priotree priotree;
struct i915_dependency dep;
- /** GEM sequence number associated with this request on the
+ /**
+ * GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
* error state dump only).
*/
struct i915_vma *batch;
- /** Additional buffers requested by userspace to be captured upon
+ /**
+ * Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
* active reference - all objects on this list must also be
* on the active_list (of their final request).
*/
- struct i915_gem_capture_list *capture_list;
+ struct i915_capture_list *capture_list;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
return fence->ops == &i915_fence_ops;
}
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+struct i915_request * __must_check
+i915_request_alloc(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+void i915_request_retire_upto(struct i915_request *rq);
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+ BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
- return container_of(fence, struct drm_i915_gem_request, fence);
+ return container_of(fence, struct i915_request, fence);
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get(struct i915_request *rq)
{
- return to_request(dma_fence_get(&req->fence));
+ return to_request(dma_fence_get(&rq->fence));
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get_rcu(struct i915_request *rq)
{
- return to_request(dma_fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&rq->fence));
}
static inline void
-i915_gem_request_put(struct drm_i915_gem_request *req)
+i915_request_put(struct i915_request *rq)
{
- dma_fence_put(&req->fence);
+ dma_fence_put(&rq->fence);
}
/**
- * i915_gem_request_global_seqno - report the current global seqno
+ * i915_request_global_seqno - report the current global seqno
* @request - the request
*
* A request is assigned a global seqno only when it is on the hardware
* after the read, it is indeed complete).
*/
static u32
-i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
+i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
}
-int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
+int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
-int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence);
-
-void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
-#define i915_add_request(req) \
- __i915_add_request(req, false)
+int i915_request_await_dma_fence(struct i915_request *rq,
+ struct dma_fence *fence);
-void __i915_gem_request_submit(struct drm_i915_gem_request *request);
-void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_request_add(struct i915_request *rq, bool flush_caches);
+#define i915_request_add(rq) \
+ __i915_request_add(rq, false)
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void __i915_request_submit(struct i915_request *request);
+void i915_request_submit(struct i915_request *request);
-struct intel_rps_client;
-#define NO_WAITBOOST ERR_PTR(-1)
-#define IS_RPS_CLIENT(p) (!IS_ERR(p))
-#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+void __i915_request_unsubmit(struct i915_request *request);
+void i915_request_unsubmit(struct i915_request *request);
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
__attribute__((nonnull(1)));
}
static inline bool
-__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
+__i915_request_completed(const struct i915_request *rq, u32 seqno)
{
GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
- seqno == i915_gem_request_global_seqno(req);
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+ seqno == i915_request_global_seqno(rq);
}
-static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+static inline bool i915_request_completed(const struct i915_request *rq)
{
u32 seqno;
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
- return __i915_gem_request_completed(req, seqno);
+ return __i915_request_completed(rq, seqno);
}
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+static inline bool i915_request_started(const struct i915_request *rq)
{
u32 seqno;
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
seqno - 1);
}
static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
{
- const struct drm_i915_gem_request *rq =
- container_of(pt, const struct drm_i915_gem_request, priotree);
+ const struct i915_request *rq =
+ container_of(pt, const struct i915_request, priotree);
- return i915_gem_request_completed(rq);
+ return i915_request_completed(rq);
}
-/* We treat requests as fences. This is not be to confused with our
+void i915_retire_requests(struct drm_i915_private *i915);
+
+/*
+ * We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
* GPU, for example, we should not rewrite an object's PTE whilst the GPU
struct i915_gem_active;
typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
- struct drm_i915_gem_request *);
+ struct i915_request *);
struct i915_gem_active {
- struct drm_i915_gem_request __rcu *request;
+ struct i915_request __rcu *request;
struct list_head link;
i915_gem_retire_fn retire;
};
void i915_gem_retire_noop(struct i915_gem_active *,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
/**
* init_request_active - prepares the activity tracker for use
*/
static inline void
i915_gem_active_set(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request);
active->retire = fn ?: i915_gem_retire_noop;
}
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
- /* Inside the error capture (running with the driver in an unknown
+ /*
+ * Inside the error capture (running with the driver in an unknown
* state), we want to bend the rules slightly (a lot).
*
* Work is in progress to make it safer, in the meantime this keeps
* It does not obtain a reference on the request for the caller, so the caller
* must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
{
return rcu_dereference_protected(active->request,
* still active, or NULL. It does not obtain a reference on the request
* for the caller, so the caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = i915_gem_active_raw(active, mutex);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
return request;
* i915_gem_active_get() returns a reference to the active request, or NULL
* if the active tracker is idle. The caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
{
- return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+ return i915_request_get(i915_gem_active_peek(active, mutex));
}
/**
* if the active tracker is idle. The caller must hold the RCU read lock, but
* the returned pointer is safe to use outside of RCU.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
- /* Performing a lockless retrieval of the active request is super
+ /*
+ * Performing a lockless retrieval of the active request is super
* tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
*
* Thread A Thread B
*
- * req = active.request
- * retire(req) -> free(req);
- * (req is now first on the slab freelist)
+ * rq = active.request
+ * retire(rq) -> free(rq);
+ * (rq is now first on the slab freelist)
* active.request = NULL
*
- * req = new submission on a new object
- * ref(req)
+ * rq = new submission on a new object
+ * ref(rq)
*
* To prevent the request from being reused whilst the caller
* uses it, we take a reference like normal. Whilst acquiring
*
* It is then imperative that we do not zero the request on
* reallocation, so that we can chase the dangling pointers!
- * See i915_gem_request_alloc().
+ * See i915_request_alloc().
*/
do {
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
- /* An especially silly compiler could decide to recompute the
- * result of i915_gem_request_completed, more specifically
+ /*
+ * An especially silly compiler could decide to recompute the
+ * result of i915_request_completed, more specifically
* re-emit the load for request->fence.seqno. A race would catch
* a later seqno value, which could flip the result from true to
* false. Which means part of the instructions below might not
* be executed, while later on instructions are executed. Due to
* barriers within the refcounting the inconsistency can't reach
- * past the call to i915_gem_request_get_rcu, but not executing
- * that while still executing i915_gem_request_put() creates
+ * past the call to i915_request_get_rcu, but not executing
+ * that while still executing i915_request_put() creates
* havoc enough. Prevent this with a compiler barrier.
*/
barrier();
- request = i915_gem_request_get_rcu(request);
+ request = i915_request_get_rcu(request);
- /* What stops the following rcu_access_pointer() from occurring
- * before the above i915_gem_request_get_rcu()? If we were
+ /*
+ * What stops the following rcu_access_pointer() from occurring
+ * before the above i915_request_get_rcu()? If we were
* to read the value before pausing to get the reference to
* the request, we may not notice a change in the active
* tracker.
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
+ * i915_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_gem_request_get_rcu()
+ * when successful. That is, if i915_request_get_rcu()
* returns the request (and so with the reference counted
* incremented) then the following read for rcu_access_pointer()
* must occur after the atomic operation and so confirm
if (!request || request == rcu_access_pointer(active->request))
return rcu_pointer_handoff(request);
- i915_gem_request_put(request);
+ i915_request_put(request);
} while (1);
}
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_gem_request_put().
+ * The reference should be freed with i915_request_put().
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get_unlocked(const struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
rcu_read_lock();
request = __i915_gem_active_get_rcu(active);
* can then wait upon the request, and afterwards release our reference,
* free of any locking.
*
- * This function wraps i915_wait_request(), see it for the full details on
+ * This function wraps i915_request_wait(), see it for the full details on
* the arguments.
*
* Returns 0 if successful, or a negative error code.
static inline int
i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(request);
+ ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
}
return ret < 0 ? ret : 0;
i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
return 0;
- ret = i915_wait_request(request,
+ ret = i915_request_wait(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
-#endif /* I915_GEM_REQUEST_H */
+#endif /* I915_REQUEST_H */
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from),
+ TP_PROTO(struct i915_request *to, struct i915_request *from),
TP_ARGS(to, from),
TP_STRUCT__entry(
__entry->seqno)
);
-TRACE_EVENT(i915_gem_request_queue,
- TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_queue,
+ TP_PROTO(struct i915_request *rq, u32 flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
__entry->seqno, __entry->flags)
);
-DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req),
+DECLARE_EVENT_CLASS(i915_request,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
__entry->seqno, __entry->global)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_add,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
-DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_submit,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_execute,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DECLARE_EVENT_CLASS(i915_gem_request_hw,
- TP_PROTO(struct drm_i915_gem_request *req,
- unsigned int port),
- TP_ARGS(req, port),
+DECLARE_EVENT_CLASS(i915_request_hw,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global_seqno = req->global_seqno;
- __entry->port = port;
- ),
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global_seqno = rq->global_seqno;
+ __entry->port = port;
+ ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
__entry->dev, __entry->hw_id, __entry->ring,
__entry->global_seqno, __entry->port)
);
-DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
- TP_ARGS(req, port)
+DEFINE_EVENT(i915_request_hw, i915_request_in,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_out,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
-trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
+trace_i915_request_submit(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
+trace_i915_request_execute(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
+trace_i915_request_in(struct i915_request *rq, unsigned int port)
{
}
static inline void
-trace_i915_gem_request_out(struct drm_i915_gem_request *req)
+trace_i915_request_out(struct i915_request *rq)
{
}
#endif
__entry->waiters)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_retire,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_wait_begin,
+ TP_PROTO(struct i915_request *rq, unsigned int flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
* less desirable.
*/
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->hw_id = req->ctx->hw_id;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
__entry->flags = flags;
),
!!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_wait_end,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
TRACE_EVENT(i915_flip_request,
#include <drm/drm_gem.h>
static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
+i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
enum i915_cache_level;
spin_unlock_irq(&b->rb_lock);
}
-static bool signal_complete(const struct drm_i915_gem_request *request)
+static bool signal_complete(const struct i915_request *request)
{
if (!request)
return false;
return __i915_request_irq_complete(request);
}
-static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+static struct i915_request *to_signaler(struct rb_node *rb)
{
- return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
+ return rb_entry(rb, struct i915_request, signaling.node);
}
static void signaler_set_rtpriority(void)
}
static void __intel_engine_remove_signal(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
}
}
-static struct drm_i915_gem_request *
+static struct i915_request *
get_first_signal_rcu(struct intel_breadcrumbs *b)
{
/*
* the required memory barriers.
*/
do {
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = rcu_dereference(b->first_signal);
if (request)
- request = i915_gem_request_get_rcu(request);
+ request = i915_request_get_rcu(request);
barrier();
if (!request || request == rcu_access_pointer(b->first_signal))
return rcu_pointer_handoff(request);
- i915_gem_request_put(request);
+ i915_request_put(request);
} while (1);
}
{
struct intel_engine_cs *engine = arg;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
/* Install ourselves with high priority to reduce signalling latency */
signaler_set_rtpriority();
&request->fence.flags)) {
local_bh_disable();
dma_fence_signal(&request->fence);
- GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!i915_request_completed(request));
local_bh_enable(); /* kick start the tasklets */
}
*/
do_schedule = need_resched();
}
- i915_gem_request_put(request);
+ i915_request_put(request);
if (unlikely(do_schedule)) {
if (kthread_should_park())
return 0;
}
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup)
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- seqno = i915_gem_request_global_seqno(request);
+ seqno = i915_request_global_seqno(request);
if (!seqno)
return;
*/
wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
- if (!__i915_gem_request_completed(request, seqno)) {
+ if (!__i915_request_completed(request, seqno)) {
struct rb_node *parent, **p;
bool first;
wake_up_process(b->signaler);
}
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+void intel_engine_cancel_signaling(struct i915_request *request)
{
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
struct wait_queue_entry wait;
struct drm_crtc *crtc;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
};
static int do_rps_boost(struct wait_queue_entry *_wait,
unsigned mode, int sync, void *key)
{
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct drm_i915_gem_request *rq = wait->request;
+ struct i915_request *rq = wait->request;
/*
* If we missed the vblank, but the request is already running it
* is reasonable to assume that it will complete before the next
* vblank without our intervention, so leave RPS alone.
*/
- if (!i915_gem_request_started(rq))
+ if (!i915_request_started(rq))
gen6_rps_boost(rq, NULL);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
- struct intel_rps_client *rps);
+void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
return 0;
}
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+int intel_ring_workarounds_emit(struct i915_request *rq)
{
- struct i915_workarounds *w = &req->i915->workarounds;
+ struct i915_workarounds *w = &rq->i915->workarounds;
u32 *cs;
int ret, i;
if (w->count == 0)
return 0;
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(req, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, w->count * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
{
const struct i915_gem_context * const kernel_context =
engine->i915->kernel_context;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
}
static void print_request(struct drm_printer *m,
- struct drm_i915_gem_request *rq,
+ struct i915_request *rq,
const char *prefix)
{
drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno,
- i915_gem_request_completed(rq) ? "!" : "",
+ i915_request_completed(rq) ? "!" : "",
rq->ctx->hw_id, rq->fence.seqno,
rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&execlists->port[idx], &count);
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct rb_node *rb;
if (header) {
drm_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
+ struct i915_request, link);
if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
+ struct i915_request, link);
if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tlast ");
GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
-static void guc_add_request(struct intel_guc *guc,
- struct drm_i915_gem_request *rq)
+static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&port[n], &count);
}
}
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(port_isset(port));
- port_set(port, i915_gem_request_get(rq));
+ port_set(port, i915_request_get(rq));
}
static void guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *last = NULL;
+ struct i915_request *last = NULL;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
bool submit = false;
do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
INIT_LIST_HEAD(&rq->priotree.link);
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq,
- port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ while (rq && i915_request_completed(rq)) {
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
execlists_port_complete(execlists, port);
return ptr_pack_bits(p, first, 1);
}
-static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
assert_ring_tail_valid(rq->ring, rq->tail);
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
int last_prio = I915_PRIORITY_INVALID;
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline->requests,
link) {
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
return;
- __i915_gem_request_unsubmit(rq);
+ __i915_request_unsubmit(rq);
unwind_wa_tail(rq);
GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
}
static inline void
-execlists_context_status_change(struct drm_i915_gem_request *rq,
- unsigned long status)
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
/*
* Only used when GVT-g is enabled now. When GVT-g is disabled,
}
static inline void
-execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+execlists_context_schedule_in(struct i915_request *rq)
{
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
}
static inline void
-execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+execlists_context_schedule_out(struct i915_request *rq)
{
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
-static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
struct i915_hw_ppgtt *ppgtt =
unsigned int n;
for (n = execlists_num_ports(&engine->execlists); n--; ) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int count;
u64 desc;
return true;
}
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
if (port_isset(port))
- i915_gem_request_put(port_request(port));
+ i915_request_put(port_request(port));
- port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
+ port_set(port, port_pack(i915_request_get(rq), port_count(port)));
}
static void inject_preempt_context(struct intel_engine_cs *engine)
struct execlist_port *port = execlists->port;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct i915_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent
- * ring:HEAD == req:TAIL as we resubmit the
+ * ring:HEAD == rq:TAIL as we resubmit the
* request. See gen8_emit_breadcrumb() for
* where we prepare the padding after the
* end of the request.
do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
/*
}
INIT_LIST_HEAD(&rq->priotree.link);
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
unsigned int num_ports = execlists_num_ports(execlists);
while (num_ports-- && port_isset(port)) {
- struct drm_i915_gem_request *rq = port_request(port);
+ struct i915_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
memset(port, 0, sizeof(*port));
port++;
static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_gem_request_completed(rq))
+ if (!i915_request_completed(rq))
dma_fence_set_error(&rq->fence, -EIO);
}
INIT_LIST_HEAD(&rq->priotree.link);
dma_fence_set_error(&rq->fence, -EIO);
- __i915_gem_request_submit(rq);
+ __i915_request_submit(rq);
}
rb = rb_next(rb);
tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
while (head != tail) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int status;
unsigned int count;
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!i915_gem_request_completed(rq));
+ GEM_BUG_ON(!i915_request_completed(rq));
execlists_context_schedule_out(rq);
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
execlists_port_complete(execlists, port);
} else {
tasklet_hi_schedule(&engine->execlists.tasklet);
}
-static void execlists_submit_request(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *pt_to_request(struct i915_priotree *pt)
{
- return container_of(pt, struct drm_i915_gem_request, priotree);
+ return container_of(pt, struct i915_request, priotree);
}
static struct intel_engine_cs *
return engine;
}
-static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+static void execlists_schedule(struct i915_request *request, int prio)
{
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (i915_gem_request_completed(request))
+ if (i915_request_completed(request))
return;
if (prio <= READ_ONCE(request->priotree.priority))
i915_gem_context_put(ctx);
}
-static int execlists_request_alloc(struct drm_i915_gem_request *request)
+static int execlists_request_alloc(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
}
static void reset_common_ring(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
unwind_wa_tail(request);
}
-static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *engine = req->engine;
+ struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
int i;
- cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+ cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (req->ctx->ppgtt &&
- (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
- !intel_vgpu_active(req->i915)) {
- ret = intel_logical_ring_emit_pdps(req);
+ if (rq->ctx->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ !intel_vgpu_active(rq->i915)) {
+ ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
+ rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
-static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
+static int gen8_emit_flush(struct i915_request *request, u32 mode)
{
u32 cmd, *cs;
return 0;
}
-static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
struct intel_engine_cs *engine = request->engine;
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
{
/* Ensure there's always at least one preemption point per-request. */
*cs++ = MI_ARB_CHECK;
request->wa_tail = intel_ring_offset(request, cs);
}
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
-static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
- u32 *cs)
+static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
}
static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
-static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
+static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret)
return ret;
- ret = intel_rcs_context_init_mocs(req);
+ ret = intel_rcs_context_init_mocs(rq);
/*
* Failing to program the MOCS is non-fatal.The system will not
* run at peak performance. So generate an error and carry on.
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_emit(req);
+ return i915_gem_render_state_emit(rq);
}
/**
/**
* emit_mocs_control_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+static int emit_mocs_control_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
- enum intel_engine_id engine = req->engine->id;
+ enum intel_engine_id engine = rq->engine->id;
unsigned int index;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
/**
* emit_mocs_l3cc_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
unsigned int i;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
/**
* intel_rcs_context_init_mocs() - program the MOCS register.
- * @req: Request to set up the MOCS tables for.
+ * @rq: Request to set up the MOCS tables for.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
*
* Return: 0 on success, otherwise the error status.
*/
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+int intel_rcs_context_init_mocs(struct i915_request *rq)
{
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(req->i915, &t)) {
+ if (get_mocs_settings(rq->i915, &t)) {
/* Program the RCS control registers */
- ret = emit_mocs_control_table(req, &t);
+ ret = emit_mocs_control_table(rq, &t);
if (ret)
return ret;
/* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(req, &t);
+ ret = emit_mocs_l3cc_table(rq, &t);
if (ret)
return ret;
}
#include <drm/drmP.h>
#include "i915_drv.h"
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
int intel_mocs_init_engine(struct intel_engine_cs *engine);
}
static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
&overlay->i915->drm.struct_mutex);
- i915_gem_active_set(&overlay->last_flip, req);
- i915_add_request(req);
+ i915_gem_active_set(&overlay->last_flip, rq);
+ i915_request_add(rq);
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
- intel_overlay_submit_request(overlay, req, retire);
+ intel_overlay_submit_request(overlay, rq, retire);
return i915_gem_active_retire(&overlay->last_flip,
&overlay->i915->drm.struct_mutex);
}
-static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
- return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_alloc(engine, dev_priv->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs;
WARN_ON(overlay->active);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = overlay->flip_addr | OFC_UPDATE;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, req, NULL);
+ return intel_overlay_do_wait_request(overlay, rq, NULL);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
- intel_overlay_submit_request(overlay, req, NULL);
+ intel_overlay_submit_request(overlay, rq, NULL);
return 0;
}
}
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
}
static void intel_overlay_off_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
- return intel_overlay_do_wait_request(overlay, req,
+ return intel_overlay_do_wait_request(overlay, rq,
intel_overlay_off_tail);
}
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, req,
+ ret = intel_overlay_do_wait_request(overlay, rq,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
+void gen6_rps_boost(struct i915_request *rq,
struct intel_rps_client *rps_client)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return;
- /* Serializes with i915_gem_request_retire() */
+ /* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
}
static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+ if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
cmd |= MI_INVALIDATE_ISP;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
+intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs;
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(req);
+ ret = intel_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
+gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
u32 *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
- gen7_render_ring_cs_stall_wa(req);
+ gen7_render_ring_cs_stall_wa(rq);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
}
static void reset_ring_common(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/*
* RC6 must be prevented until the reset is complete and the engine
}
}
-static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
+static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret != 0)
return ret;
- ret = i915_gem_render_state_emit(req);
+ ret = i915_gem_render_state_emit(rq);
if (ret)
return ret;
return init_workarounds_ring(engine);
}
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
+static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int num_rings = 0;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
continue;
- mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
+ mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(mbox_reg);
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
num_rings++;
}
}
static void cancel_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline->requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_gem_request_completed(request))
+ if (!i915_request_completed(request))
dma_fence_set_error(&request->fence, -EIO);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
+static void i9xx_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- i915_gem_request_submit(request);
+ i915_request_submit(request);
I915_WRITE_TAIL(request->engine,
intel_ring_set_tail(request->ring, request->tail));
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
*cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req, cs);
- assert_ring_tail_valid(req->ring, req->tail);
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, cs));
+ return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
}
static int
-gen6_ring_sync_to(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
{
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
+ u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = signal->global_seqno - 1;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
}
static int
-bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
}
static int
-i965_emit_bb_start(struct drm_i915_gem_request *req,
+i965_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_emit_bb_start(struct drm_i915_gem_request *req,
+i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
+ u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cs_offset;
*cs++ = 0xdeadbeef;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- cs = intel_ring_begin(req, 6 + 2);
+ cs = intel_ring_begin(rq, 6 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
/* ... and execute it. */
offset = cs_offset;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-i915_emit_bb_start(struct drm_i915_gem_request *req,
+i915_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
intel_ring_reset(engine->buffer, 0);
}
-static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
return 0;
}
-static int remap_l3(struct drm_i915_gem_request *rq, int slice)
+static int remap_l3(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
return 0;
}
-static int switch_context(struct drm_i915_gem_request *rq)
+static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *to_ctx = rq->ctx;
return ret;
}
-static int ring_request_alloc(struct drm_i915_gem_request *request)
+static int ring_request_alloc(struct i915_request *request)
{
int ret;
static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
- struct drm_i915_gem_request *target;
+ struct i915_request *target;
long timeout;
lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- timeout = i915_wait_request(target,
+ timeout = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
- i915_gem_request_retire_upto(target);
+ i915_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
return 0;
}
-u32 *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int num_dwords)
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
{
- struct intel_ring *ring = req->ring;
+ struct intel_ring *ring = rq->ring;
const unsigned int remain_usable = ring->effective_size - ring->emit;
const unsigned int bytes = num_dwords * sizeof(u32);
unsigned int need_wrap = 0;
/* Packets must be qword aligned. */
GEM_BUG_ON(num_dwords & 1);
- total_bytes = bytes + req->reserved_space;
+ total_bytes = bytes + rq->reserved_space;
GEM_BUG_ON(total_bytes > ring->effective_size);
if (unlikely(total_bytes > remain_usable)) {
* wrap and only need to effectively wait for the
* reserved size from the start of ringbuffer.
*/
- total_bytes = req->reserved_space + remain_actual;
+ total_bytes = rq->reserved_space + remain_actual;
}
}
* overallocation and the assumption is that then we never need
* to wait (which has the risk of failing with EINTR).
*
- * See also i915_gem_request_alloc() and i915_add_request().
+ * See also i915_request_alloc() and i915_request_add().
*/
- GEM_BUG_ON(!req->reserved_space);
+ GEM_BUG_ON(!rq->reserved_space);
ret = wait_for_space(ring, total_bytes);
if (unlikely(ret))
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+int intel_ring_cacheline_align(struct i915_request *rq)
{
- int num_dwords =
- (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
u32 *cs;
if (num_dwords == 0)
return 0;
- num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- cs = intel_ring_begin(req, num_dwords);
+ num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+ cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
while (num_dwords--)
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
+static void gen6_bsd_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-hsw_emit_bb_start(struct drm_i915_gem_request *req,
+hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_emit_bb_start(struct drm_i915_gem_request *req,
+gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+
#include "i915_gem_batch_pool.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+
#include "i915_pmu.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_printer;
unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
bool stalled;
};
struct i915_vma *vma;
};
-struct drm_i915_gem_request;
+struct i915_request;
/*
* Engine IDs definitions.
/**
* @request_count: combined request and submission count
*/
- struct drm_i915_gem_request *request_count;
+ struct i915_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct task_struct *signaler; /* used for fence signalling */
- struct drm_i915_gem_request __rcu *first_signal;
+ struct i915_request __rcu *first_signal;
struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */
int (*init_hw)(struct intel_engine_cs *engine);
void (*reset_hw)(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
- int (*request_alloc)(struct drm_i915_gem_request *req);
- int (*init_context)(struct drm_i915_gem_request *req);
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
- int (*emit_flush)(struct drm_i915_gem_request *request,
- u32 mode);
+ int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct drm_i915_gem_request *req,
+ int (*emit_bb_start)(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
- u32 *cs);
+ void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* This is called from an atomic context with irqs disabled; must
* be irq safe.
*/
- void (*submit_request)(struct drm_i915_gem_request *req);
+ void (*submit_request)(struct i915_request *rq);
/* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
*
* Called under the struct_mutex.
*/
- void (*schedule)(struct drm_i915_gem_request *request,
- int priority);
+ void (*schedule)(struct i915_request *request, int priority);
/*
* Cancel all requests on the hardware, or queued for execution.
} mbox;
/* AKA wait() */
- int (*sync_to)(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal);
- u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
+ int (*sync_to)(struct i915_request *rq,
+ struct i915_request *signal);
+ u32 *(*signal)(struct i915_request *rq, u32 *cs);
} semaphore;
struct intel_engine_execlists execlists;
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+int __must_check intel_ring_cacheline_align(struct i915_request *rq);
int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
-u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int n);
+u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-static inline void
-intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
{
/* Dummy function.
*
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
}
-static inline u32
-intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
return pos & (ring->size - 1);
}
-static inline u32
-intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - req->ring->vaddr;
- GEM_BUG_ON(offset > req->ring->size);
- return intel_ring_wrap(req->ring, offset);
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
}
static inline void
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
- * read by i915_gem_request_retire() just as it is being updated
+ * read by i915_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
}
int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
+int intel_ring_workarounds_emit(struct i915_request *rq);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
wait->tsk = current;
wait->request = rq;
static inline bool
intel_wait_update_request(struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool
static inline bool
intel_wait_check_request(const struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool intel_wait_complete(const struct intel_wait *wait)
struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup);
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+void intel_engine_cancel_signaling(struct i915_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
u32 dword,
u32 value)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
int flags = 0;
int err;
if (err)
return err;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);
reservation_object_unlock(vma->resv);
err_request:
- __i915_add_request(rq, err == 0);
+ __i915_request_add(rq, err == 0);
return err;
}
u32 v)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
}
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
unsigned int flags;
goto err_vma;
}
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
err_request:
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
err_batch:
i915_vma_unpin(batch);
err_vma:
mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
igt_evict_ctl.fail_if_busy = false;
if (IS_ERR(rq)) {
if (err < 0)
break;
- i915_add_request(rq);
+ i915_request_add(rq);
count++;
err = 0;
} while(1);
static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
int err;
if (err)
return err;
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
}
i915_vma_move_to_active(vma, rq, 0);
- i915_add_request(rq);
+ i915_request_add(rq);
i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
-selftest(requests, i915_gem_request_live_selftests)
+selftest(requests, i915_request_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(timelines, i915_gem_timeline_mock_selftests)
-selftest(requests, i915_gem_request_mock_selftests)
+selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
if (!request)
goto out_unlock;
- i915_add_request(request);
+ i915_request_add(request);
err = 0;
out_unlock:
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, then wait upon it */
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
goto out_unlock;
}
- i915_add_request(request);
+ i915_request_add(request);
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
- if (!i915_gem_request_completed(request)) {
+ if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, treat it as a fence and wait upon it */
}
mutex_lock(&i915->drm.struct_mutex);
- i915_add_request(request);
+ i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
static int igt_request_rewind(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request, *vip;
+ struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
int err = -EINVAL;
goto err_context_0;
}
- i915_gem_request_get(request);
- i915_add_request(request);
+ i915_request_get(request);
+ i915_request_add(request);
ctx[1] = mock_context(i915, "B");
vip = mock_request(i915->engine[RCS], ctx[1], 0);
/* Simulate preemption by manual reordering */
if (!mock_cancel_request(request)) {
pr_err("failed to cancel request (already executed)!\n");
- i915_add_request(vip);
+ i915_request_add(vip);
goto err_context_1;
}
- i915_gem_request_get(vip);
- i915_add_request(vip);
+ i915_request_get(vip);
+ i915_request_add(vip);
rcu_read_lock();
request->engine->submit_request(request);
rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
- if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+ if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
goto err;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("low priority request already completed\n");
goto err;
}
err = 0;
err:
- i915_gem_request_put(vip);
+ i915_request_put(vip);
mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
- i915_gem_request_put(request);
+ i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
return err;
}
-int i915_gem_request_mock_selftests(void)
+int i915_request_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_add_request),
{
struct drm_i915_private *i915 = t->i915;
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- request = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request = i915_request_alloc(engine,
+ i915->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
* for latency.
*/
- i915_add_request(request);
+ i915_request_add(request);
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
return ERR_PTR(err);
}
-static struct drm_i915_gem_request *
+static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err;
- request = i915_gem_request_alloc(engine,
- engine->i915->kernel_context);
+ request = i915_request_alloc(engine, engine->i915->kernel_context);
if (IS_ERR(request))
return request;
goto out_request;
out_request:
- __i915_add_request(request, err == 0);
+ __i915_request_add(request, err == 0);
return err ? ERR_PTR(err) : request;
}
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
err = PTR_ERR(request);
goto out_batch;
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
goto out_batch;
}
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+ struct i915_request *request[I915_NUM_ENGINES];
struct i915_vma *batch;
struct live_test t;
unsigned int id;
}
for_each_engine(engine, i915, id) {
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
}
i915_vma_move_to_active(batch, request[id], 0);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
}
for_each_engine(engine, i915, id) {
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
- i915_gem_request_put(request[id]);
+ GEM_BUG_ON(!i915_request_completed(request[id]));
+ i915_request_put(request[id]);
request[id] = NULL;
}
out_request:
for_each_engine(engine, i915, id)
if (request[id])
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
- struct drm_i915_gem_request *prev = NULL;
+ struct i915_request *request[I915_NUM_ENGINES] = {};
+ struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
struct live_test t;
unsigned int id;
goto out_unlock;
}
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
}
if (prev) {
- err = i915_gem_request_await_dma_fence(request[id],
- &prev->fence);
+ err = i915_request_await_dma_fence(request[id],
+ &prev->fence);
if (err) {
- i915_add_request(request[id]);
+ i915_request_add(request[id]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
prev = request[id];
}
for_each_engine(engine, i915, id) {
long timeout;
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[id]));
}
err = end_live_test(&t);
}
i915_vma_put(request[id]->batch);
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
-int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+int i915_request_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_request),
}
static u64 hws_address(const struct i915_vma *hws,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
static int emit_recurse_batch(struct hang *h,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
return err;
}
-static struct drm_i915_gem_request *
+static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
int err;
if (i915_gem_object_is_active(h->obj)) {
h->batch = vaddr;
}
- rq = i915_gem_request_alloc(engine, h->ctx);
+ rq = i915_request_alloc(engine, h->ctx);
if (IS_ERR(rq))
return rq;
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
return ERR_PTR(err);
}
return rq;
}
-static u32 hws_seqno(const struct hang *h,
- const struct drm_i915_gem_request *rq)
+static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
{
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}
flush_test(h->i915, I915_WAIT_LOCKED);
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+static bool wait_for_hang(struct hang *h, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
rq->fence.seqno),
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
goto fini;
}
- i915_gem_request_get(rq);
+ i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
- timeout = i915_wait_request(rq,
+ timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
if (timeout < 0) {
err = timeout;
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
break;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_for_hang(&h, rq)) {
intel_engine_dump(engine, &p,
"%s\n", engine->name);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err = -EIO;
break;
}
- i915_gem_request_put(rq);
+ i915_request_put(rq);
}
engine->hangcheck.stalled = true;
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
- struct drm_i915_gem_request *rq[2] = {};
+ struct i915_request *rq[2] = {};
struct i915_gem_context *ctx[2];
struct drm_file *file;
unsigned long count = 0;
while (!kthread_should_stop()) {
unsigned int idx = count++ & 1;
- struct drm_i915_gem_request *old = rq[idx];
- struct drm_i915_gem_request *new;
+ struct i915_request *old = rq[idx];
+ struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
- new = i915_gem_request_alloc(engine, ctx[idx]);
+ new = i915_request_alloc(engine, ctx[idx]);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
- rq[idx] = i915_gem_request_get(new);
- i915_add_request(new);
+ rq[idx] = i915_request_get(new);
+ i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
if (old) {
- i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(old);
+ i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(old);
}
}
for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_gem_request_put(rq[count]);
+ i915_request_put(rq[count]);
err_file:
mock_file_free(engine->i915, file);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
mutex_lock(&i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
break;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_for_hang(&h, rq)) {
intel_engine_dump(engine, &p,
"%s\n", engine->name);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err = -EIO;
break;
}
- i915_gem_request_put(rq);
+ i915_request_put(rq);
}
engine->hangcheck.stalled = true;
return __igt_reset_engine_others(arg, true);
}
-static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+static u32 fake_hangcheck(struct i915_request *rq)
{
u32 reset_count;
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
reset_count = fake_hangcheck(rq);
- timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+ timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
if (timeout < 0) {
pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
timeout);
}
out_rq:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
fini:
hang_fini(&h);
unlock:
goto unlock;
for_each_engine(engine, i915, id) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
goto fini;
}
- i915_gem_request_get(prev);
- __i915_add_request(prev, true);
+ i915_request_get(prev);
+ __i915_request_add(prev, true);
count = 0;
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
rq = hang_create_request(&h, engine);
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
intel_engine_dump(prev->engine, &p,
"%s\n", prev->engine->name);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
prev->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
if (rq->fence.error) {
pr_err("Fence error status not zero [%d] after unrelated reset\n",
rq->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
- i915_gem_request_put(prev);
+ i915_request_put(prev);
prev = rq;
count++;
} while (time_before(jiffies, end_time));
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- i915_gem_request_put(prev);
+ i915_request_put(prev);
err = flush_test(i915, I915_WAIT_LOCKED);
if (err)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
struct hang h;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gpu_state *error;
int err;
goto err_fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
}
err_request:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err_fini:
hang_fini(&h);
err_unlock:
i915_gem_context_put(ctx);
}
-static int mock_request_alloc(struct drm_i915_gem_request *request)
+static int mock_request_alloc(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
return 0;
}
-static int mock_emit_flush(struct drm_i915_gem_request *request,
+static int mock_emit_flush(struct i915_request *request,
unsigned int flags)
{
return 0;
}
-static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+static void mock_emit_breadcrumb(struct i915_request *request,
u32 *flags)
{
}
-static void mock_submit_request(struct drm_i915_gem_request *request)
+static void mock_submit_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
GEM_BUG_ON(!request->global_seqno);
spin_lock_irq(&engine->hw_lock);
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
}
static void mock_device_release(struct drm_device *dev)
#include "mock_engine.h"
#include "mock_request.h"
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = i915_gem_request_alloc(engine, context);
+ request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
return &mock->base;
}
-bool mock_cancel_request(struct drm_i915_gem_request *request)
+bool mock_cancel_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
- i915_gem_request_unsubmit(request);
+ i915_request_unsubmit(request);
return was_queued;
}
#include <linux/list.h>
-#include "../i915_gem_request.h"
+#include "../i915_request.h"
struct mock_request {
- struct drm_i915_gem_request base;
+ struct i915_request base;
struct list_head link;
unsigned long delay;
};
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay);
-bool mock_cancel_request(struct drm_i915_gem_request *request);
+bool mock_cancel_request(struct i915_request *request);
#endif /* !__MOCK_REQUEST__ */