patch-5.15.79-rt54.patch
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gt / intel_context_types.h
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
8
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
14
15 #include "i915_active_types.h"
16 #include "i915_sw_fence.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
20
21 #include "uc/intel_guc_fwif.h"
22
23 #define CONTEXT_REDZONE POISON_INUSE
24 DECLARE_EWMA(runtime, 3, 8);
25
26 struct i915_gem_context;
27 struct i915_gem_ww_ctx;
28 struct i915_vma;
29 struct intel_breadcrumbs;
30 struct intel_context;
31 struct intel_ring;
32
33 struct intel_context_ops {
34         unsigned long flags;
35 #define COPS_HAS_INFLIGHT_BIT 0
36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
37
38         int (*alloc)(struct intel_context *ce);
39
40         void (*ban)(struct intel_context *ce, struct i915_request *rq);
41
42         int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
43         int (*pin)(struct intel_context *ce, void *vaddr);
44         void (*unpin)(struct intel_context *ce);
45         void (*post_unpin)(struct intel_context *ce);
46
47         void (*cancel_request)(struct intel_context *ce,
48                                struct i915_request *rq);
49
50         void (*enter)(struct intel_context *ce);
51         void (*exit)(struct intel_context *ce);
52
53         void (*sched_disable)(struct intel_context *ce);
54
55         void (*reset)(struct intel_context *ce);
56         void (*destroy)(struct kref *kref);
57
58         /* virtual engine/context interface */
59         struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
60                                                 unsigned int count);
61         struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
62                                                unsigned int sibling);
63 };
64
65 struct intel_context {
66         /*
67          * Note: Some fields may be accessed under RCU.
68          *
69          * Unless otherwise noted a field can safely be assumed to be protected
70          * by strong reference counting.
71          */
72         union {
73                 struct kref ref; /* no kref_get_unless_zero()! */
74                 struct rcu_head rcu;
75         };
76
77         struct intel_engine_cs *engine;
78         struct intel_engine_cs *inflight;
79 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
80 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
81 #define intel_context_inflight(ce) \
82         __intel_context_inflight(READ_ONCE((ce)->inflight))
83 #define intel_context_inflight_count(ce) \
84         __intel_context_inflight_count(READ_ONCE((ce)->inflight))
85
86         struct i915_address_space *vm;
87         struct i915_gem_context __rcu *gem_context;
88
89         /*
90          * @signal_lock protects the list of requests that need signaling,
91          * @signals. While there are any requests that need signaling,
92          * we add the context to the breadcrumbs worker, and remove it
93          * upon completion/cancellation of the last request.
94          */
95         struct list_head signal_link; /* Accessed under RCU */
96         struct list_head signals; /* Guarded by signal_lock */
97         spinlock_t signal_lock; /* protects signals, the list of requests */
98
99         struct i915_vma *state;
100         u32 ring_size;
101         struct intel_ring *ring;
102         struct intel_timeline *timeline;
103
104         unsigned long flags;
105 #define CONTEXT_BARRIER_BIT             0
106 #define CONTEXT_ALLOC_BIT               1
107 #define CONTEXT_INIT_BIT                2
108 #define CONTEXT_VALID_BIT               3
109 #define CONTEXT_CLOSED_BIT              4
110 #define CONTEXT_USE_SEMAPHORES          5
111 #define CONTEXT_BANNED                  6
112 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7
113 #define CONTEXT_NOPREEMPT               8
114 #define CONTEXT_LRCA_DIRTY              9
115 #define CONTEXT_IS_PARKED               10
116
117         struct {
118                 u64 timeout_us;
119         } watchdog;
120
121         u32 *lrc_reg_state;
122         union {
123                 struct {
124                         u32 lrca;
125                         u32 ccid;
126                 };
127                 u64 desc;
128         } lrc;
129         u32 tag; /* cookie passed to HW to track this context on submission */
130
131         /* Time on GPU as tracked by the hw. */
132         struct {
133                 struct ewma_runtime avg;
134                 u64 total;
135                 u32 last;
136                 I915_SELFTEST_DECLARE(u32 num_underflow);
137                 I915_SELFTEST_DECLARE(u32 max_underflow);
138         } runtime;
139
140         unsigned int active_count; /* protected by timeline->mutex */
141
142         atomic_t pin_count;
143         struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
144
145         /**
146          * active: Active tracker for the rq activity (inc. external) on this
147          * intel_context object.
148          */
149         struct i915_active active;
150
151         const struct intel_context_ops *ops;
152
153         /** sseu: Control eu/slice partitioning */
154         struct intel_sseu sseu;
155
156         /**
157          * pinned_contexts_link: List link for the engine's pinned contexts.
158          * This is only used if this is a perma-pinned kernel context and
159          * the list is assumed to only be manipulated during driver load
160          * or unload time so no mutex protection currently.
161          */
162         struct list_head pinned_contexts_link;
163
164         u8 wa_bb_page; /* if set, page num reserved for context workarounds */
165
166         struct {
167                 /** lock: protects everything in guc_state */
168                 spinlock_t lock;
169                 /**
170                  * sched_state: scheduling state of this context using GuC
171                  * submission
172                  */
173                 u16 sched_state;
174                 /*
175                  * fences: maintains of list of requests that have a submit
176                  * fence related to GuC submission
177                  */
178                 struct list_head fences;
179         } guc_state;
180
181         struct {
182                 /** lock: protects everything in guc_active */
183                 spinlock_t lock;
184                 /** requests: active requests on this context */
185                 struct list_head requests;
186         } guc_active;
187
188         /* GuC scheduling state flags that do not require a lock. */
189         atomic_t guc_sched_state_no_lock;
190
191         /* GuC LRC descriptor ID */
192         u16 guc_id;
193
194         /* GuC LRC descriptor reference count */
195         atomic_t guc_id_ref;
196
197         /*
198          * GuC ID link - in list when unpinned but guc_id still valid in GuC
199          */
200         struct list_head guc_id_link;
201
202         /* GuC context blocked fence */
203         struct i915_sw_fence guc_blocked;
204
205         /*
206          * GuC priority management
207          */
208         u8 guc_prio;
209         u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
210 };
211
212 #endif /* __INTEL_CONTEXT_TYPES__ */