1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/mutex.h>
13 #include <linux/notifier.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/workqueue.h>
19 #include "uc/intel_uc.h"
20 #include "intel_gsc.h"
23 #include "intel_engine_types.h"
24 #include "intel_gt_buffer_pool_types.h"
25 #include "intel_hwconfig.h"
26 #include "intel_llc_types.h"
27 #include "intel_reset_types.h"
28 #include "intel_rc6_types.h"
29 #include "intel_rps_types.h"
30 #include "intel_migrate_types.h"
31 #include "intel_wakeref.h"
32 #include "pxp/intel_pxp_types.h"
34 struct drm_i915_private;
36 struct intel_engine_cs;
39 struct intel_mmio_range {
45 * The hardware has multiple kinds of multicast register ranges that need
46 * special register steering (and future platforms are expected to add
49 * During driver startup, we initialize the steering control register to
50 * direct reads to a slice/subslice that are valid for the 'subslice' class
51 * of multicast registers. If another type of steering does not have any
52 * overlap in valid steering targets with 'subslice' style registers, we will
53 * need to explicitly re-steer reads of registers of the other type.
55 * Only the replication types that may need additional non-default steering
58 enum intel_steering_type {
64 * On some platforms there are multiple types of MCR registers that
65 * will always return a non-terminated value at instance (0, 0). We'll
66 * lump those all into a single category to keep things simple.
73 enum intel_submission_method {
74 INTEL_SUBMISSION_RING,
75 INTEL_SUBMISSION_ELSP,
80 struct drm_i915_private *i915;
81 struct intel_uncore *uncore;
82 struct i915_ggtt *ggtt;
88 /* Serialize global tlb invalidations */
89 struct mutex invalidate_lock;
92 * Batch TLB invalidations
94 * After unbinding the PTE, we need to ensure the TLB
95 * are invalidated prior to releasing the physical pages.
96 * But we only need one such invalidation for all unbinds,
97 * so we track how many TLB invalidations have been
98 * performed since unbind the PTE and only emit an extra
99 * invalidate if no full barrier has been passed.
101 seqcount_mutex_t seqno;
104 struct i915_wa_list wa_list;
106 struct intel_gt_timelines {
107 spinlock_t lock; /* protects active_list */
108 struct list_head active_list;
111 struct intel_gt_requests {
113 * We leave the user IRQ off as much as possible,
114 * but this means that requests will finish and never
115 * be retired once the system goes idle. Set a timer to
116 * fire periodically while the ring is running. When it
117 * fires, go retire requests.
119 struct delayed_work retire_work;
123 struct llist_head list;
124 struct work_struct work;
127 struct intel_wakeref wakeref;
128 atomic_t user_wakeref;
130 struct list_head closed_vma;
131 spinlock_t closed_lock; /* guards the list of closed_vma */
133 ktime_t last_init_time;
134 struct intel_reset reset;
137 * Is the GPU currently considered idle, or busy executing
138 * userspace requests? Whilst idle, we allow runtime power
139 * management to power down the hardware and display clocks.
140 * In order to reduce the effect on performance, there
141 * is a slight delay before we do so.
143 intel_wakeref_t awake;
148 struct intel_llc llc;
149 struct intel_rc6 rc6;
150 struct intel_rps rps;
163 * @lock: Lock protecting the below fields.
165 seqcount_mutex_t lock;
168 * @total: Total time this engine was busy.
170 * Accumulated time not counting the most recent block in cases
171 * where engine is currently busy (active > 0).
176 * @start: Timestamp of the last idle to active transition.
178 * Idle is defined as active == 0, active is active > 0.
183 struct intel_engine_cs *engine[I915_NUM_ENGINES];
184 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
185 [MAX_ENGINE_INSTANCE + 1];
186 enum intel_submission_method submission_method;
189 * Default address space (either GGTT or ppGTT depending on arch).
191 * Reserved for exclusive use by the kernel.
193 struct i915_address_space *vm;
196 * A pool of objects to use as shadow copies of client batch buffers
197 * when the command parser is enabled. Prevents the client from
198 * modifying the batch contents after software parsing.
200 * Buffers older than 1s are periodically reaped from the pool,
201 * or may be reclaimed by the shrinker before then.
203 struct intel_gt_buffer_pool buffer_pool;
205 struct i915_vma *scratch;
207 struct intel_migrate migrate;
209 const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
217 * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
219 phys_addr_t phys_addr;
221 struct intel_gt_info {
224 intel_engine_mask_t engine_mask;
230 /* General presence of SFC units */
233 /* Media engine access to SFC per instance */
236 /* Slice/subslice/EU info */
237 struct sseu_dev_info sseu;
239 unsigned long mslice_mask;
241 /** @hwconfig: hardware configuration data */
242 struct intel_hwconfig hwconfig;
247 u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
250 struct intel_pxp pxp;
253 struct kobject sysfs_gt;
256 enum intel_gt_scratch_field {
258 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
261 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
264 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
267 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
270 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
273 #endif /* __INTEL_GT_TYPES_H__ */