return 0;
}
-struct context_barrier_task {
- struct i915_active base;
- void (*task)(void *data);
- void *data;
-};
-
-static void cb_retire(struct i915_active *base)
-{
- struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
-
- if (cb->task)
- cb->task(cb->data);
-
- i915_active_fini(&cb->base);
- kfree(cb);
-}
-
-I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
-static int context_barrier_task(struct i915_gem_context *ctx,
- intel_engine_mask_t engines,
- bool (*skip)(struct intel_context *ce, void *data),
- int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
- int (*emit)(struct i915_request *rq, void *data),
- void (*task)(void *data),
- void *data)
-{
- struct context_barrier_task *cb;
- struct i915_gem_engines_iter it;
- struct i915_gem_engines *e;
- struct i915_gem_ww_ctx ww;
- struct intel_context *ce;
- int err = 0;
-
- GEM_BUG_ON(!task);
-
- cb = kmalloc(sizeof(*cb), GFP_KERNEL);
- if (!cb)
- return -ENOMEM;
-
- i915_active_init(&cb->base, NULL, cb_retire, 0);
- err = i915_active_acquire(&cb->base);
- if (err) {
- kfree(cb);
- return err;
- }
-
- e = __context_engines_await(ctx, NULL);
- if (!e) {
- i915_active_release(&cb->base);
- return -ENOENT;
- }
-
- for_each_gem_engine(ce, e, it) {
- struct i915_request *rq;
-
- if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
- ce->engine->mask)) {
- err = -ENXIO;
- break;
- }
-
- if (!(ce->engine->mask & engines))
- continue;
-
- if (skip && skip(ce, data))
- continue;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- err = intel_context_pin_ww(ce, &ww);
- if (err)
- goto err;
-
- if (pin)
- err = pin(ce, &ww, data);
- if (err)
- goto err_unpin;
-
- rq = i915_request_create(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_unpin;
- }
-
- err = 0;
- if (emit)
- err = emit(rq, data);
- if (err == 0)
- err = i915_active_add_request(&cb->base, rq);
-
- i915_request_add(rq);
-err_unpin:
- intel_context_unpin(ce);
-err:
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- if (err)
- break;
- }
- i915_sw_fence_complete(&e->fence);
-
- cb->task = err ? NULL : task; /* caller needs to unwind instead */
- cb->data = data;
-
- i915_active_release(&cb->base);
-
- return err;
-}
-
static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
return err;
}
-static void set_ppgtt_barrier(void *data)
-{
- struct i915_address_space *old = data;
-
- if (GRAPHICS_VER(old->i915) < 8)
- gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
-
- i915_vm_close(old);
-}
-
-static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
-{
- struct i915_address_space *vm = ce->vm;
-
- if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
- /* ppGTT is not part of the legacy context image */
- return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
-
- return 0;
-}
-
-static int emit_ppgtt_update(struct i915_request *rq, void *data)
-{
- struct i915_address_space *vm = rq->context->vm;
- struct intel_engine_cs *engine = rq->engine;
- u32 base = engine->mmio_base;
- u32 *cs;
- int i;
-
- if (i915_vm_is_4lvl(vm)) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
- *cs++ = lower_32_bits(pd_daddr);
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
- } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- int err;
-
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
- }
-
- return 0;
-}
-
-static bool skip_ppgtt_update(struct intel_context *ce, void *data)
-{
- if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
- return !ce->state;
- else
- return !atomic_read(&ce->pin_count);
-}
-
-static int set_ppgtt(struct drm_i915_file_private *file_priv,
- struct i915_gem_context *ctx,
- struct drm_i915_gem_context_param *args)
-{
- struct i915_address_space *vm, *old;
- int err;
-
- if (args->size)
- return -EINVAL;
-
- if (!rcu_access_pointer(ctx->vm))
- return -ENODEV;
-
- if (upper_32_bits(args->value))
- return -ENOENT;
-
- vm = i915_gem_vm_lookup(file_priv, args->value);
- if (!vm)
- return -ENOENT;
-
- err = mutex_lock_interruptible(&ctx->mutex);
- if (err)
- goto out;
-
- if (i915_gem_context_is_closed(ctx)) {
- err = -ENOENT;
- goto unlock;
- }
-
- if (vm == rcu_access_pointer(ctx->vm))
- goto unlock;
-
- old = __set_ppgtt(ctx, vm);
-
- /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
- lut_close(ctx);
-
- /*
- * We need to flush any requests using the current ppgtt before
- * we release it as the requests do not hold a reference themselves,
- * only indirectly through the context.
- */
- err = context_barrier_task(ctx, ALL_ENGINES,
- skip_ppgtt_update,
- pin_ppgtt_update,
- emit_ppgtt_update,
- set_ppgtt_barrier,
- old);
- if (err) {
- i915_vm_close(__set_ppgtt(ctx, old));
- i915_vm_close(old);
- lut_close(ctx); /* force a rebuild of the old obj:vma cache */
- }
-
-unlock:
- mutex_unlock(&ctx->mutex);
-out:
- i915_vm_put(vm);
- return err;
-}
-
int
i915_gem_user_to_context_sseu(struct intel_gt *gt,
const struct drm_i915_gem_context_param_sseu *user,
ret = set_sseu(ctx, args);
break;
- case I915_CONTEXT_PARAM_VM:
- ret = set_ppgtt(fpriv, ctx, args);
- break;
-
case I915_CONTEXT_PARAM_ENGINES:
ret = set_engines(ctx, args);
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
+ case I915_CONTEXT_PARAM_VM:
default:
ret = -EINVAL;
break;
return err;
}
-static bool skip_unused_engines(struct intel_context *ce, void *data)
-{
- return !ce->state;
-}
-
-static void mock_barrier_task(void *data)
-{
- unsigned int *counter = data;
-
- ++*counter;
-}
-
-static int mock_context_barrier(void *arg)
-{
-#undef pr_fmt
-#define pr_fmt(x) "context_barrier_task():" # x
- struct drm_i915_private *i915 = arg;
- struct i915_gem_context *ctx;
- struct i915_request *rq;
- unsigned int counter;
- int err;
-
- /*
- * The context barrier provides us with a callback after it emits
- * a request; useful for retiring old state after loading new.
- */
-
- ctx = mock_context(i915, "mock");
- if (!ctx)
- return -ENOMEM;
-
- counter = 0;
- err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
- mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- if (counter == 0) {
- pr_err("Did not retire immediately with 0 engines\n");
- err = -EINVAL;
- goto out;
- }
-
- counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
- NULL, NULL, mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- if (counter == 0) {
- pr_err("Did not retire immediately for all unused engines\n");
- err = -EINVAL;
- goto out;
- }
-
- rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
- if (IS_ERR(rq)) {
- pr_err("Request allocation failed!\n");
- goto out;
- }
- i915_request_add(rq);
-
- counter = 0;
- context_barrier_inject_fault = BIT(RCS0);
- err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
- mock_barrier_task, &counter);
- context_barrier_inject_fault = 0;
- if (err == -ENXIO)
- err = 0;
- else
- pr_err("Did not hit fault injection!\n");
- if (counter != 0) {
- pr_err("Invoked callback on error!\n");
- err = -EIO;
- }
- if (err)
- goto out;
-
- counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
- NULL, NULL, mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- mock_device_flush(i915);
- if (counter == 0) {
- pr_err("Did not retire on each active engines\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- mock_context_close(ctx);
- return err;
-#undef pr_fmt
-#define pr_fmt(x) x
-}
-
-int i915_gem_context_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(mock_context_barrier),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- mock_destroy_device(i915);
- return err;
-}
-
int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {