Previously, we were storing the ring size in the ring pointer before it
was actually allocated. We would then guard setting the ring size on
checking for CONTEXT_ALLOC_BIT. This is error-prone at best and really
only saves us a few bytes on something that already burns at least 4K.
Instead, this patch adds a new ring_size field and makes everything use
that.
v2 (Daniel Vetter):
- Replace 512 * SZ_4K with SZ_2M
v2 (Jason Ekstrand):
- Rebase on top of page migration code
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210708154835.528166-3-jason@jlekstrand.net
GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
RCU_INIT_POINTER(ce->gem_context, ctx);
- if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
- ce->ring = __intel_context_ring_size(SZ_16K);
+ ce->ring_size = SZ_16K;
if (rcu_access_pointer(ctx->vm)) {
struct i915_address_space *vm;
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
- ce->ring = __intel_context_ring_size(SZ_4K);
+ ce->ring = NULL;
+ ce->ring_size = SZ_4K;
ewma_runtime_init(&ce->runtime.avg);
struct i915_request *intel_context_create_request(struct intel_context *ce);
-static inline struct intel_ring *__intel_context_ring_size(u64 sz)
-{
- return u64_to_ptr(struct intel_ring, sz);
-}
-
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
spinlock_t signal_lock; /* protects signals, the list of requests */
struct i915_vma *state;
+ u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
ce->timeline = page_pack_bits(NULL, hwsp);
- ce->ring = __intel_context_ring_size(ring_size);
+ ce->ring = NULL;
+ ce->ring_size = ring_size;
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
- ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+ ring = intel_engine_create_ring(engine, ce->ring_size);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err_vma;
if (IS_ERR(ce))
return ce;
- ce->ring = __intel_context_ring_size(SZ_256K);
+ ce->ring = NULL;
+ ce->ring_size = SZ_256K;
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(m->context->vm);
goto err_ce;
}
- tmp->ring = __intel_context_ring_size(ring_sz);
+ tmp->ring_size = ring_sz;
err = intel_context_pin(tmp);
if (err) {
return ce;
/* We build large requests to read the registers from the ring */
- ce->ring = __intel_context_ring_size(SZ_16K);
+ ce->ring_size = SZ_16K;
return ce;
}
if (IS_ERR(ce))
return PTR_ERR(ce);
- ce->ring = __intel_context_ring_size(ringsz);
+ ce->ring_size = ringsz;
w->rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(w->rq))
intel_context_set_single_submission(ce);
/* Max ring buffer size */
- if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
- const unsigned int ring_size = 512 * SZ_4K;
-
- ce->ring = __intel_context_ring_size(ring_size);
- }
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc))
+ ce->ring_size = SZ_2M;
s->shadow[i] = ce;
}