static void proto_context_close(struct i915_gem_proto_context *pc)
{
+ int i;
+
if (pc->vm)
i915_vm_put(pc->vm);
+ if (pc->user_engines) {
+ for (i = 0; i < pc->num_user_engines; i++)
+ kfree(pc->user_engines[i].siblings);
+ kfree(pc->user_engines);
+ }
kfree(pc);
}
+static int proto_context_set_persistence(struct drm_i915_private *i915,
+ struct i915_gem_proto_context *pc,
+ bool persist)
+{
+ if (persist) {
+ /*
+ * Only contexts that are short-lived [that will expire or be
+ * reset] are allowed to survive past termination. We require
+ * hangcheck to ensure that the persistent requests are healthy.
+ */
+ if (!i915->params.enable_hangcheck)
+ return -EINVAL;
+
+ pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
+ } else {
+ /* To cancel a context we use "preempt-to-idle" */
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ return -ENODEV;
+
+ /*
+ * If the cancel fails, we then need to reset, cleanly!
+ *
+ * If the per-engine reset fails, all hope is lost! We resort
+ * to a full GPU reset in that unlikely case, but realistically
+ * if the engine could not reset, the full reset does not fare
+ * much better. The damage has been done.
+ *
+ * However, if we cannot reset an engine by itself, we cannot
+ * cleanup a hanging persistent context without causing
+ * colateral damage, and we should not pretend we can by
+ * exposing the interface.
+ */
+ if (!intel_has_reset_engine(&i915->gt))
+ return -ENODEV;
+
+ pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
+ }
+
+ return 0;
+}
+
static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private *i915, unsigned int flags)
{
if (!pc)
return ERR_PTR(-ENOMEM);
+ pc->num_user_engines = -1;
+ pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
BIT(UCONTEXT_RECOVERABLE);
if (i915->params.enable_hangcheck)
return err;
}
+static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
+ struct i915_gem_proto_context *pc,
+ const struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct i915_address_space *vm;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!HAS_FULL_PPGTT(i915))
+ return -ENODEV;
+
+ if (upper_32_bits(args->value))
+ return -ENOENT;
+
+ vm = i915_gem_vm_lookup(fpriv, args->value);
+ if (!vm)
+ return -ENOENT;
+
+ if (pc->vm)
+ i915_vm_put(pc->vm);
+ pc->vm = vm;
+
+ return 0;
+}
+
+struct set_proto_ctx_engines {
+ struct drm_i915_private *i915;
+ unsigned num_engines;
+ struct i915_gem_proto_engine *engines;
+};
+
+static int
+set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct i915_context_engines_load_balance __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_proto_ctx_engines *set = data;
+ struct drm_i915_private *i915 = set->i915;
+ struct intel_engine_cs **siblings;
+ u16 num_siblings, idx;
+ unsigned int n;
+ int err;
+
+ if (!HAS_EXECLISTS(i915))
+ return -ENODEV;
+
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
+ return -ENODEV; /* not implement yet */
+
+ if (get_user(idx, &ext->engine_index))
+ return -EFAULT;
+
+ if (idx >= set->num_engines) {
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ idx, set->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->num_engines);
+ if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", idx);
+ return -EEXIST;
+ }
+
+ if (get_user(num_siblings, &ext->num_siblings))
+ return -EFAULT;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ err = check_user_mbz(&ext->mbz64);
+ if (err)
+ return err;
+
+ if (num_siblings == 0)
+ return 0;
+
+ siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
+ if (!siblings)
+ return -ENOMEM;
+
+ for (n = 0; n < num_siblings; n++) {
+ struct i915_engine_class_instance ci;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+ err = -EFAULT;
+ goto err_siblings;
+ }
+
+ siblings[n] = intel_engine_lookup_user(i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!siblings[n]) {
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ err = -EINVAL;
+ goto err_siblings;
+ }
+ }
+
+ if (num_siblings == 1) {
+ set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
+ set->engines[idx].engine = siblings[0];
+ kfree(siblings);
+ } else {
+ set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
+ set->engines[idx].num_siblings = num_siblings;
+ set->engines[idx].siblings = siblings;
+ }
+
+ return 0;
+
+err_siblings:
+ kfree(siblings);
+
+ return err;
+}
+
+static int
+set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
+{
+ struct i915_context_engines_bond __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_proto_ctx_engines *set = data;
+ struct drm_i915_private *i915 = set->i915;
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *master;
+ u16 idx, num_bonds;
+ int err, n;
+
+ if (get_user(idx, &ext->virtual_index))
+ return -EFAULT;
+
+ if (idx >= set->num_engines) {
+ drm_dbg(&i915->drm,
+ "Invalid index for virtual engine: %d >= %d\n",
+ idx, set->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->num_engines);
+ if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
+ drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
+ return -EINVAL;
+ }
+
+ if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
+ drm_dbg(&i915->drm,
+ "Bonding with virtual engines not allowed\n");
+ return -EINVAL;
+ }
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+ err = check_user_mbz(&ext->mbz64[n]);
+ if (err)
+ return err;
+ }
+
+ if (copy_from_user(&ci, &ext->master, sizeof(ci)))
+ return -EFAULT;
+
+ master = intel_engine_lookup_user(i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!master) {
+ drm_dbg(&i915->drm,
+ "Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+
+ if (get_user(num_bonds, &ext->num_bonds))
+ return -EFAULT;
+
+ for (n = 0; n < num_bonds; n++) {
+ struct intel_engine_cs *bond;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
+ return -EFAULT;
+
+ bond = intel_engine_lookup_user(i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!bond) {
+ drm_dbg(&i915->drm,
+ "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
+ [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
+ [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
+};
+
+static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
+ struct i915_gem_proto_context *pc,
+ const struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct set_proto_ctx_engines set = { .i915 = i915 };
+ struct i915_context_param_engines __user *user =
+ u64_to_user_ptr(args->value);
+ unsigned int n;
+ u64 extensions;
+ int err;
+
+ if (pc->num_user_engines >= 0) {
+ drm_dbg(&i915->drm, "Cannot set engines twice");
+ return -EINVAL;
+ }
+
+ if (args->size < sizeof(*user) ||
+ !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
+ drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+ args->size);
+ return -EINVAL;
+ }
+
+ set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
+ /* RING_MASK has no shift so we can use it directly here */
+ if (set.num_engines > I915_EXEC_RING_MASK + 1)
+ return -EINVAL;
+
+ set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
+ if (!set.engines)
+ return -ENOMEM;
+
+ for (n = 0; n < set.num_engines; n++) {
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *engine;
+
+ if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
+ kfree(set.engines);
+ return -EFAULT;
+ }
+
+ memset(&set.engines[n], 0, sizeof(set.engines[n]));
+
+ if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
+ ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
+ continue;
+
+ engine = intel_engine_lookup_user(i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!engine) {
+ drm_dbg(&i915->drm,
+ "Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ kfree(set.engines);
+ return -ENOENT;
+ }
+
+ set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
+ set.engines[n].engine = engine;
+ }
+
+ err = -EFAULT;
+ if (!get_user(extensions, &user->extensions))
+ err = i915_user_extensions(u64_to_user_ptr(extensions),
+ set_proto_ctx_engines_extensions,
+ ARRAY_SIZE(set_proto_ctx_engines_extensions),
+ &set);
+ if (err) {
+ kfree(set.engines);
+ return err;
+ }
+
+ pc->num_user_engines = set.num_engines;
+ pc->user_engines = set.engines;
+
+ return 0;
+}
+
+static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ struct i915_gem_proto_context *pc,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_sseu *sseu;
+ int ret;
+
+ if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (GRAPHICS_VER(i915) != 11)
+ return -ENODEV;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.rsvd)
+ return -EINVAL;
+
+ if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
+ return -EINVAL;
+
+ if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
+ return -EINVAL;
+
+ if (pc->num_user_engines >= 0) {
+ int idx = user_sseu.engine.engine_instance;
+ struct i915_gem_proto_engine *pe;
+
+ if (idx >= pc->num_user_engines)
+ return -EINVAL;
+
+ pe = &pc->user_engines[idx];
+
+ /* Only render engine supports RPCS configuration. */
+ if (pe->engine->class != RENDER_CLASS)
+ return -EINVAL;
+
+ sseu = &pe->sseu;
+ } else {
+ /* Only render engine supports RPCS configuration. */
+ if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
+ return -EINVAL;
+
+ /* There is only one render engine */
+ if (user_sseu.engine.engine_instance != 0)
+ return -EINVAL;
+
+ sseu = &pc->legacy_rcs_sseu;
+ }
+
+ ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+ if (ret)
+ return ret;
+
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
+static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
+ struct i915_gem_proto_context *pc,
+ struct drm_i915_gem_context_param *args)
+{
+ int ret = 0;
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value)
+ pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
+ else
+ pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
+ break;
+
+ case I915_CONTEXT_PARAM_BANNABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (!capable(CAP_SYS_ADMIN) && !args->value)
+ ret = -EPERM;
+ else if (args->value)
+ pc->user_flags |= BIT(UCONTEXT_BANNABLE);
+ else
+ pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
+ break;
+
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value)
+ pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
+ else
+ pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
+ break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ ret = validate_priority(fpriv->dev_priv, args);
+ if (!ret)
+ pc->sched.priority = args->value;
+ break;
+
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = set_proto_ctx_sseu(fpriv, pc, args);
+ break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = set_proto_ctx_vm(fpriv, pc, args);
+ break;
+
+ case I915_CONTEXT_PARAM_ENGINES:
+ ret = set_proto_ctx_engines(fpriv, pc, args);
+ break;
+
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ if (args->size)
+ ret = -EINVAL;
+ ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+ args->value);
+ break;
+
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static struct i915_address_space *
context_get_vm_rcu(struct i915_gem_context *ctx)
{
return err;
}
+static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
+ unsigned int num_engines,
+ struct i915_gem_proto_engine *pe)
+{
+ struct i915_gem_engines *e, *err;
+ unsigned int n;
+
+ e = alloc_engines(num_engines);
+ for (n = 0; n < num_engines; n++) {
+ struct intel_context *ce;
+ int ret;
+
+ switch (pe[n].type) {
+ case I915_GEM_ENGINE_TYPE_PHYSICAL:
+ ce = intel_context_create(pe[n].engine);
+ break;
+
+ case I915_GEM_ENGINE_TYPE_BALANCED:
+ ce = intel_execlists_create_virtual(pe[n].siblings,
+ pe[n].num_siblings);
+ break;
+
+ case I915_GEM_ENGINE_TYPE_INVALID:
+ default:
+ GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
+ continue;
+ }
+
+ if (IS_ERR(ce)) {
+ err = ERR_CAST(ce);
+ goto free_engines;
+ }
+
+ e->engines[n] = ce;
+
+ ret = intel_context_set_gem(ce, ctx, pe->sseu);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_engines;
+ }
+ }
+ e->num_engines = num_engines;
+
+ return e;
+
+free_engines:
+ free_engines(e);
+ return err;
+}
+
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
- struct intel_sseu null_sseu = {};
int err;
int i;
INIT_LIST_HEAD(&ctx->stale.engines);
mutex_init(&ctx->engines_mutex);
- e = default_engines(ctx, null_sseu);
+ e = default_engines(ctx, pc->legacy_rcs_sseu);
if (IS_ERR(e)) {
err = PTR_ERR(e);
goto err_free;
i915_vm_put(&ppgtt->vm);
}
+ if (pc->num_user_engines >= 0) {
+ struct i915_gem_engines *engines;
+
+ engines = user_engines(ctx, pc->num_user_engines,
+ pc->user_engines);
+ if (IS_ERR(engines)) {
+ context_close(ctx);
+ return ERR_CAST(engines);
+ }
+
+ mutex_lock(&ctx->engines_mutex);
+ i915_gem_context_set_user_engines(ctx);
+ engines = rcu_replace_pointer(ctx->engines, engines, 1);
+ mutex_unlock(&ctx->engines_mutex);
+
+ free_engines(engines);
+ }
+
if (pc->single_timeline) {
ret = drm_syncobj_create(&ctx->syncobj,
DRM_SYNCOBJ_CREATE_SIGNALED,
}
struct create_ext {
- struct i915_gem_context *ctx;
+ struct i915_gem_proto_context *pc;
struct drm_i915_file_private *fpriv;
};
if (local.param.ctx_id)
return -EINVAL;
- return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
+ return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
}
static int invalid_ext(struct i915_user_extension __user *ext, void *data)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_context_create_ext *args = data;
- struct i915_gem_proto_context *pc;
+ struct i915_gem_context *ctx;
struct create_ext ext_data;
int ret;
u32 id;
return -EIO;
}
- pc = proto_context_create(i915, args->flags);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
-
- ext_data.ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
- if (IS_ERR(ext_data.ctx))
- return PTR_ERR(ext_data.ctx);
+ ext_data.pc = proto_context_create(i915, args->flags);
+ if (IS_ERR(ext_data.pc))
+ return PTR_ERR(ext_data.pc);
if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
create_extensions,
ARRAY_SIZE(create_extensions),
&ext_data);
- if (ret)
- goto err_ctx;
+ if (ret) {
+ proto_context_close(ext_data.pc);
+ return ret;
+ }
}
- ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
+ ctx = i915_gem_create_context(i915, ext_data.pc);
+ proto_context_close(ext_data.pc);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = gem_context_register(ctx, ext_data.fpriv, &id);
if (ret < 0)
goto err_ctx;
return 0;
err_ctx:
- context_close(ext_data.ctx);
+ context_close(ctx);
return ret;
}