1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
15 #include "i915_active_types.h"
16 #include "i915_sw_fence.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
21 #include "uc/intel_guc_fwif.h"
23 #define CONTEXT_REDZONE POISON_INUSE
24 DECLARE_EWMA(runtime, 3, 8);
26 struct i915_gem_context;
27 struct i915_gem_ww_ctx;
29 struct intel_breadcrumbs;
33 struct intel_context_ops {
35 #define COPS_HAS_INFLIGHT_BIT 0
36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
38 int (*alloc)(struct intel_context *ce);
40 void (*ban)(struct intel_context *ce, struct i915_request *rq);
42 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
43 int (*pin)(struct intel_context *ce, void *vaddr);
44 void (*unpin)(struct intel_context *ce);
45 void (*post_unpin)(struct intel_context *ce);
47 void (*cancel_request)(struct intel_context *ce,
48 struct i915_request *rq);
50 void (*enter)(struct intel_context *ce);
51 void (*exit)(struct intel_context *ce);
53 void (*sched_disable)(struct intel_context *ce);
55 void (*reset)(struct intel_context *ce);
56 void (*destroy)(struct kref *kref);
58 /* virtual engine/context interface */
59 struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
61 struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
62 unsigned int sibling);
65 struct intel_context {
67 * Note: Some fields may be accessed under RCU.
69 * Unless otherwise noted a field can safely be assumed to be protected
70 * by strong reference counting.
73 struct kref ref; /* no kref_get_unless_zero()! */
77 struct intel_engine_cs *engine;
78 struct intel_engine_cs *inflight;
79 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
80 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
81 #define intel_context_inflight(ce) \
82 __intel_context_inflight(READ_ONCE((ce)->inflight))
83 #define intel_context_inflight_count(ce) \
84 __intel_context_inflight_count(READ_ONCE((ce)->inflight))
86 struct i915_address_space *vm;
87 struct i915_gem_context __rcu *gem_context;
90 * @signal_lock protects the list of requests that need signaling,
91 * @signals. While there are any requests that need signaling,
92 * we add the context to the breadcrumbs worker, and remove it
93 * upon completion/cancellation of the last request.
95 struct list_head signal_link; /* Accessed under RCU */
96 struct list_head signals; /* Guarded by signal_lock */
97 spinlock_t signal_lock; /* protects signals, the list of requests */
99 struct i915_vma *state;
101 struct intel_ring *ring;
102 struct intel_timeline *timeline;
105 #define CONTEXT_BARRIER_BIT 0
106 #define CONTEXT_ALLOC_BIT 1
107 #define CONTEXT_INIT_BIT 2
108 #define CONTEXT_VALID_BIT 3
109 #define CONTEXT_CLOSED_BIT 4
110 #define CONTEXT_USE_SEMAPHORES 5
111 #define CONTEXT_BANNED 6
112 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7
113 #define CONTEXT_NOPREEMPT 8
114 #define CONTEXT_LRCA_DIRTY 9
115 #define CONTEXT_IS_PARKED 10
129 u32 tag; /* cookie passed to HW to track this context on submission */
131 /* Time on GPU as tracked by the hw. */
133 struct ewma_runtime avg;
136 I915_SELFTEST_DECLARE(u32 num_underflow);
137 I915_SELFTEST_DECLARE(u32 max_underflow);
140 unsigned int active_count; /* protected by timeline->mutex */
143 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
146 * active: Active tracker for the rq activity (inc. external) on this
147 * intel_context object.
149 struct i915_active active;
151 const struct intel_context_ops *ops;
153 /** sseu: Control eu/slice partitioning */
154 struct intel_sseu sseu;
157 * pinned_contexts_link: List link for the engine's pinned contexts.
158 * This is only used if this is a perma-pinned kernel context and
159 * the list is assumed to only be manipulated during driver load
160 * or unload time so no mutex protection currently.
162 struct list_head pinned_contexts_link;
164 u8 wa_bb_page; /* if set, page num reserved for context workarounds */
167 /** lock: protects everything in guc_state */
170 * sched_state: scheduling state of this context using GuC
175 * fences: maintains of list of requests that have a submit
176 * fence related to GuC submission
178 struct list_head fences;
182 /** lock: protects everything in guc_active */
184 /** requests: active requests on this context */
185 struct list_head requests;
188 /* GuC scheduling state flags that do not require a lock. */
189 atomic_t guc_sched_state_no_lock;
191 /* GuC LRC descriptor ID */
194 /* GuC LRC descriptor reference count */
198 * GuC ID link - in list when unpinned but guc_id still valid in GuC
200 struct list_head guc_id_link;
202 /* GuC context blocked fence */
203 struct i915_sw_fence guc_blocked;
206 * GuC priority management
209 u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
212 #endif /* __INTEL_CONTEXT_TYPES__ */