3509eb88b4521426fa8dc1cf6a4b906e7bd45f2c
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / i915_drv.h
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35
36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h>
38 #include <linux/i2c-algo-bit.h>
39 #include <linux/backlight.h>
40 #include <linux/hash.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/kref.h>
43 #include <linux/mm_types.h>
44 #include <linux/perf_event.h>
45 #include <linux/pm_qos.h>
46 #include <linux/reservation.h>
47 #include <linux/shmem_fs.h>
48 #include <linux/stackdepot.h>
49
50 #include <drm/intel-gtt.h>
51 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
52 #include <drm/drm_gem.h>
53 #include <drm/drm_auth.h>
54 #include <drm/drm_cache.h>
55 #include <drm/drm_util.h>
56 #include <drm/drm_dsc.h>
57 #include <drm/drm_atomic.h>
58 #include <drm/drm_connector.h>
59 #include <drm/i915_mei_hdcp_interface.h>
60
61 #include "i915_fixed.h"
62 #include "i915_params.h"
63 #include "i915_reg.h"
64 #include "i915_utils.h"
65
66 #include "display/intel_bios.h"
67 #include "display/intel_display.h"
68 #include "display/intel_display_power.h"
69 #include "display/intel_dpll_mgr.h"
70 #include "display/intel_frontbuffer.h"
71 #include "display/intel_opregion.h"
72
73 #include "gt/intel_lrc.h"
74 #include "gt/intel_engine.h"
75 #include "gt/intel_gt_types.h"
76 #include "gt/intel_workarounds.h"
77
78 #include "intel_device_info.h"
79 #include "intel_runtime_pm.h"
80 #include "intel_uc.h"
81 #include "intel_uncore.h"
82 #include "intel_wakeref.h"
83 #include "intel_wopcm.h"
84
85 #include "i915_gem.h"
86 #include "gem/i915_gem_context_types.h"
87 #include "i915_gem_fence_reg.h"
88 #include "i915_gem_gtt.h"
89 #include "i915_gpu_error.h"
90 #include "i915_request.h"
91 #include "i915_scheduler.h"
92 #include "i915_timeline.h"
93 #include "i915_vma.h"
94
95 #include "intel_gvt.h"
96
97 /* General customization:
98  */
99
100 #define DRIVER_NAME             "i915"
101 #define DRIVER_DESC             "Intel Graphics"
102 #define DRIVER_DATE             "20190619"
103 #define DRIVER_TIMESTAMP        1560947544
104
105 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
106  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
107  * which may not necessarily be a user visible problem.  This will either
108  * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
109  * enable distros and users to tailor their preferred amount of i915 abrt
110  * spam.
111  */
112 #define I915_STATE_WARN(condition, format...) ({                        \
113         int __ret_warn_on = !!(condition);                              \
114         if (unlikely(__ret_warn_on))                                    \
115                 if (!WARN(i915_modparams.verbose_state_checks, format)) \
116                         DRM_ERROR(format);                              \
117         unlikely(__ret_warn_on);                                        \
118 })
119
120 #define I915_STATE_WARN_ON(x)                                           \
121         I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
122
123 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
124
125 bool __i915_inject_load_failure(const char *func, int line);
126 #define i915_inject_load_failure() \
127         __i915_inject_load_failure(__func__, __LINE__)
128
129 bool i915_error_injected(void);
130
131 #else
132
133 #define i915_inject_load_failure() false
134 #define i915_error_injected() false
135
136 #endif
137
138 #define i915_load_error(i915, fmt, ...)                                  \
139         __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
140                       fmt, ##__VA_ARGS__)
141
142 struct drm_i915_gem_object;
143
144 enum hpd_pin {
145         HPD_NONE = 0,
146         HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
147         HPD_CRT,
148         HPD_SDVO_B,
149         HPD_SDVO_C,
150         HPD_PORT_A,
151         HPD_PORT_B,
152         HPD_PORT_C,
153         HPD_PORT_D,
154         HPD_PORT_E,
155         HPD_PORT_F,
156         HPD_NUM_PINS
157 };
158
159 #define for_each_hpd_pin(__pin) \
160         for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
161
162 /* Threshold == 5 for long IRQs, 50 for short */
163 #define HPD_STORM_DEFAULT_THRESHOLD 50
164
165 struct i915_hotplug {
166         struct work_struct hotplug_work;
167
168         struct {
169                 unsigned long last_jiffies;
170                 int count;
171                 enum {
172                         HPD_ENABLED = 0,
173                         HPD_DISABLED = 1,
174                         HPD_MARK_DISABLED = 2
175                 } state;
176         } stats[HPD_NUM_PINS];
177         u32 event_bits;
178         struct delayed_work reenable_work;
179
180         u32 long_port_mask;
181         u32 short_port_mask;
182         struct work_struct dig_port_work;
183
184         struct work_struct poll_init_work;
185         bool poll_enabled;
186
187         unsigned int hpd_storm_threshold;
188         /* Whether or not to count short HPD IRQs in HPD storms */
189         u8 hpd_short_storm_enabled;
190
191         /*
192          * if we get a HPD irq from DP and a HPD irq from non-DP
193          * the non-DP HPD could block the workqueue on a mode config
194          * mutex getting, that userspace may have taken. However
195          * userspace is waiting on the DP workqueue to run which is
196          * blocked behind the non-DP one.
197          */
198         struct workqueue_struct *dp_wq;
199 };
200
201 #define I915_GEM_GPU_DOMAINS \
202         (I915_GEM_DOMAIN_RENDER | \
203          I915_GEM_DOMAIN_SAMPLER | \
204          I915_GEM_DOMAIN_COMMAND | \
205          I915_GEM_DOMAIN_INSTRUCTION | \
206          I915_GEM_DOMAIN_VERTEX)
207
208 struct drm_i915_private;
209 struct i915_mm_struct;
210 struct i915_mmu_object;
211
212 struct drm_i915_file_private {
213         struct drm_i915_private *dev_priv;
214         struct drm_file *file;
215
216         struct {
217                 spinlock_t lock;
218                 struct list_head request_list;
219         } mm;
220
221         struct idr context_idr;
222         struct mutex context_idr_lock; /* guards context_idr */
223
224         struct idr vm_idr;
225         struct mutex vm_idr_lock; /* guards vm_idr */
226
227         unsigned int bsd_engine;
228
229 /*
230  * Every context ban increments per client ban score. Also
231  * hangs in short succession increments ban score. If ban threshold
232  * is reached, client is considered banned and submitting more work
233  * will fail. This is a stop gap measure to limit the badly behaving
234  * clients access to gpu. Note that unbannable contexts never increment
235  * the client ban score.
236  */
237 #define I915_CLIENT_SCORE_HANG_FAST     1
238 #define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
239 #define I915_CLIENT_SCORE_CONTEXT_BAN   3
240 #define I915_CLIENT_SCORE_BANNED        9
241         /** ban_score: Accumulated score of all ctx bans and fast hangs. */
242         atomic_t ban_score;
243         unsigned long hang_timestamp;
244 };
245
246 /* Interface history:
247  *
248  * 1.1: Original.
249  * 1.2: Add Power Management
250  * 1.3: Add vblank support
251  * 1.4: Fix cmdbuffer path, add heap destroy
252  * 1.5: Add vblank pipe configuration
253  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
254  *      - Support vertical blank on secondary display pipe
255  */
256 #define DRIVER_MAJOR            1
257 #define DRIVER_MINOR            6
258 #define DRIVER_PATCHLEVEL       0
259
260 struct intel_overlay;
261 struct intel_overlay_error_state;
262
263 struct sdvo_device_mapping {
264         u8 initialized;
265         u8 dvo_port;
266         u8 slave_addr;
267         u8 dvo_wiring;
268         u8 i2c_pin;
269         u8 ddc_pin;
270 };
271
272 struct intel_connector;
273 struct intel_encoder;
274 struct intel_atomic_state;
275 struct intel_crtc_state;
276 struct intel_initial_plane_config;
277 struct intel_crtc;
278 struct intel_limit;
279 struct dpll;
280 struct intel_cdclk_state;
281
282 struct drm_i915_display_funcs {
283         void (*get_cdclk)(struct drm_i915_private *dev_priv,
284                           struct intel_cdclk_state *cdclk_state);
285         void (*set_cdclk)(struct drm_i915_private *dev_priv,
286                           const struct intel_cdclk_state *cdclk_state,
287                           enum pipe pipe);
288         int (*get_fifo_size)(struct drm_i915_private *dev_priv,
289                              enum i9xx_plane_id i9xx_plane);
290         int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
291         int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
292         void (*initial_watermarks)(struct intel_atomic_state *state,
293                                    struct intel_crtc_state *cstate);
294         void (*atomic_update_watermarks)(struct intel_atomic_state *state,
295                                          struct intel_crtc_state *cstate);
296         void (*optimize_watermarks)(struct intel_atomic_state *state,
297                                     struct intel_crtc_state *cstate);
298         int (*compute_global_watermarks)(struct intel_atomic_state *state);
299         void (*update_wm)(struct intel_crtc *crtc);
300         int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
301         /* Returns the active state of the crtc, and if the crtc is active,
302          * fills out the pipe-config with the hw state. */
303         bool (*get_pipe_config)(struct intel_crtc *,
304                                 struct intel_crtc_state *);
305         void (*get_initial_plane_config)(struct intel_crtc *,
306                                          struct intel_initial_plane_config *);
307         int (*crtc_compute_clock)(struct intel_crtc *crtc,
308                                   struct intel_crtc_state *crtc_state);
309         void (*crtc_enable)(struct intel_crtc_state *pipe_config,
310                             struct drm_atomic_state *old_state);
311         void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
312                              struct drm_atomic_state *old_state);
313         void (*update_crtcs)(struct drm_atomic_state *state);
314         void (*audio_codec_enable)(struct intel_encoder *encoder,
315                                    const struct intel_crtc_state *crtc_state,
316                                    const struct drm_connector_state *conn_state);
317         void (*audio_codec_disable)(struct intel_encoder *encoder,
318                                     const struct intel_crtc_state *old_crtc_state,
319                                     const struct drm_connector_state *old_conn_state);
320         void (*fdi_link_train)(struct intel_crtc *crtc,
321                                const struct intel_crtc_state *crtc_state);
322         void (*init_clock_gating)(struct drm_i915_private *dev_priv);
323         void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
324         /* clock updates for mode set */
325         /* cursor updates */
326         /* render clock increase/decrease */
327         /* display clock increase/decrease */
328         /* pll clock increase/decrease */
329
330         int (*color_check)(struct intel_crtc_state *crtc_state);
331         /*
332          * Program double buffered color management registers during
333          * vblank evasion. The registers should then latch during the
334          * next vblank start, alongside any other double buffered registers
335          * involved with the same commit.
336          */
337         void (*color_commit)(const struct intel_crtc_state *crtc_state);
338         /*
339          * Load LUTs (and other single buffered color management
340          * registers). Will (hopefully) be called during the vblank
341          * following the latching of any double buffered registers
342          * involved with the same commit.
343          */
344         void (*load_luts)(const struct intel_crtc_state *crtc_state);
345         void (*read_luts)(struct intel_crtc_state *crtc_state);
346 };
347
348 struct intel_csr {
349         struct work_struct work;
350         const char *fw_path;
351         u32 required_version;
352         u32 max_fw_size; /* bytes */
353         u32 *dmc_payload;
354         u32 dmc_fw_size; /* dwords */
355         u32 version;
356         u32 mmio_count;
357         i915_reg_t mmioaddr[20];
358         u32 mmiodata[20];
359         u32 dc_state;
360         u32 allowed_dc_mask;
361         intel_wakeref_t wakeref;
362 };
363
364 enum i915_cache_level {
365         I915_CACHE_NONE = 0,
366         I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
367         I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
368                               caches, eg sampler/render caches, and the
369                               large Last-Level-Cache. LLC is coherent with
370                               the CPU, but L3 is only visible to the GPU. */
371         I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
372 };
373
374 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
375
376 struct intel_fbc {
377         /* This is always the inner lock when overlapping with struct_mutex and
378          * it's the outer lock when overlapping with stolen_lock. */
379         struct mutex lock;
380         unsigned threshold;
381         unsigned int possible_framebuffer_bits;
382         unsigned int busy_bits;
383         unsigned int visible_pipes_mask;
384         struct intel_crtc *crtc;
385
386         struct drm_mm_node compressed_fb;
387         struct drm_mm_node *compressed_llb;
388
389         bool false_color;
390
391         bool enabled;
392         bool active;
393         bool flip_pending;
394
395         bool underrun_detected;
396         struct work_struct underrun_work;
397
398         /*
399          * Due to the atomic rules we can't access some structures without the
400          * appropriate locking, so we cache information here in order to avoid
401          * these problems.
402          */
403         struct intel_fbc_state_cache {
404                 struct i915_vma *vma;
405                 unsigned long flags;
406
407                 struct {
408                         unsigned int mode_flags;
409                         u32 hsw_bdw_pixel_rate;
410                 } crtc;
411
412                 struct {
413                         unsigned int rotation;
414                         int src_w;
415                         int src_h;
416                         bool visible;
417                         /*
418                          * Display surface base address adjustement for
419                          * pageflips. Note that on gen4+ this only adjusts up
420                          * to a tile, offsets within a tile are handled in
421                          * the hw itself (with the TILEOFF register).
422                          */
423                         int adjusted_x;
424                         int adjusted_y;
425
426                         int y;
427
428                         u16 pixel_blend_mode;
429                 } plane;
430
431                 struct {
432                         const struct drm_format_info *format;
433                         unsigned int stride;
434                 } fb;
435         } state_cache;
436
437         /*
438          * This structure contains everything that's relevant to program the
439          * hardware registers. When we want to figure out if we need to disable
440          * and re-enable FBC for a new configuration we just check if there's
441          * something different in the struct. The genx_fbc_activate functions
442          * are supposed to read from it in order to program the registers.
443          */
444         struct intel_fbc_reg_params {
445                 struct i915_vma *vma;
446                 unsigned long flags;
447
448                 struct {
449                         enum pipe pipe;
450                         enum i9xx_plane_id i9xx_plane;
451                         unsigned int fence_y_offset;
452                 } crtc;
453
454                 struct {
455                         const struct drm_format_info *format;
456                         unsigned int stride;
457                 } fb;
458
459                 int cfb_size;
460                 unsigned int gen9_wa_cfb_stride;
461         } params;
462
463         const char *no_fbc_reason;
464 };
465
466 /*
467  * HIGH_RR is the highest eDP panel refresh rate read from EDID
468  * LOW_RR is the lowest eDP panel refresh rate found from EDID
469  * parsing for same resolution.
470  */
471 enum drrs_refresh_rate_type {
472         DRRS_HIGH_RR,
473         DRRS_LOW_RR,
474         DRRS_MAX_RR, /* RR count */
475 };
476
477 enum drrs_support_type {
478         DRRS_NOT_SUPPORTED = 0,
479         STATIC_DRRS_SUPPORT = 1,
480         SEAMLESS_DRRS_SUPPORT = 2
481 };
482
483 struct intel_dp;
484 struct i915_drrs {
485         struct mutex mutex;
486         struct delayed_work work;
487         struct intel_dp *dp;
488         unsigned busy_frontbuffer_bits;
489         enum drrs_refresh_rate_type refresh_rate_type;
490         enum drrs_support_type type;
491 };
492
493 struct i915_psr {
494         struct mutex lock;
495
496 #define I915_PSR_DEBUG_MODE_MASK        0x0f
497 #define I915_PSR_DEBUG_DEFAULT          0x00
498 #define I915_PSR_DEBUG_DISABLE          0x01
499 #define I915_PSR_DEBUG_ENABLE           0x02
500 #define I915_PSR_DEBUG_FORCE_PSR1       0x03
501 #define I915_PSR_DEBUG_IRQ              0x10
502
503         u32 debug;
504         bool sink_support;
505         bool enabled;
506         struct intel_dp *dp;
507         enum pipe pipe;
508         bool active;
509         struct work_struct work;
510         unsigned busy_frontbuffer_bits;
511         bool sink_psr2_support;
512         bool link_standby;
513         bool colorimetry_support;
514         bool psr2_enabled;
515         u8 sink_sync_latency;
516         ktime_t last_entry_attempt;
517         ktime_t last_exit;
518         bool sink_not_reliable;
519         bool irq_aux_error;
520         u16 su_x_granularity;
521 };
522
523 /*
524  * Sorted by south display engine compatibility.
525  * If the new PCH comes with a south display engine that is not
526  * inherited from the latest item, please do not add it to the
527  * end. Instead, add it right after its "parent" PCH.
528  */
529 enum intel_pch {
530         PCH_NOP = -1,   /* PCH without south display */
531         PCH_NONE = 0,   /* No PCH present */
532         PCH_IBX,        /* Ibexpeak PCH */
533         PCH_CPT,        /* Cougarpoint/Pantherpoint PCH */
534         PCH_LPT,        /* Lynxpoint/Wildcatpoint PCH */
535         PCH_SPT,        /* Sunrisepoint/Kaby Lake PCH */
536         PCH_CNP,        /* Cannon/Comet Lake PCH */
537         PCH_ICP,        /* Ice Lake PCH */
538         PCH_MCC,        /* Mule Creek Canyon PCH */
539 };
540
541 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
542 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
543 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
544 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
545 #define QUIRK_INCREASE_T12_DELAY (1<<6)
546 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
547
548 struct intel_fbdev;
549 struct intel_fbc_work;
550
551 struct intel_gmbus {
552         struct i2c_adapter adapter;
553 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
554         u32 force_bit;
555         u32 reg0;
556         i915_reg_t gpio_reg;
557         struct i2c_algo_bit_data bit_algo;
558         struct drm_i915_private *dev_priv;
559 };
560
561 struct i915_suspend_saved_registers {
562         u32 saveDSPARB;
563         u32 saveFBC_CONTROL;
564         u32 saveCACHE_MODE_0;
565         u32 saveMI_ARB_STATE;
566         u32 saveSWF0[16];
567         u32 saveSWF1[16];
568         u32 saveSWF3[3];
569         u64 saveFENCE[I915_MAX_NUM_FENCES];
570         u32 savePCH_PORT_HOTPLUG;
571         u16 saveGCDGMBUS;
572 };
573
574 struct vlv_s0ix_state {
575         /* GAM */
576         u32 wr_watermark;
577         u32 gfx_prio_ctrl;
578         u32 arb_mode;
579         u32 gfx_pend_tlb0;
580         u32 gfx_pend_tlb1;
581         u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
582         u32 media_max_req_count;
583         u32 gfx_max_req_count;
584         u32 render_hwsp;
585         u32 ecochk;
586         u32 bsd_hwsp;
587         u32 blt_hwsp;
588         u32 tlb_rd_addr;
589
590         /* MBC */
591         u32 g3dctl;
592         u32 gsckgctl;
593         u32 mbctl;
594
595         /* GCP */
596         u32 ucgctl1;
597         u32 ucgctl3;
598         u32 rcgctl1;
599         u32 rcgctl2;
600         u32 rstctl;
601         u32 misccpctl;
602
603         /* GPM */
604         u32 gfxpause;
605         u32 rpdeuhwtc;
606         u32 rpdeuc;
607         u32 ecobus;
608         u32 pwrdwnupctl;
609         u32 rp_down_timeout;
610         u32 rp_deucsw;
611         u32 rcubmabdtmr;
612         u32 rcedata;
613         u32 spare2gh;
614
615         /* Display 1 CZ domain */
616         u32 gt_imr;
617         u32 gt_ier;
618         u32 pm_imr;
619         u32 pm_ier;
620         u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
621
622         /* GT SA CZ domain */
623         u32 tilectl;
624         u32 gt_fifoctl;
625         u32 gtlc_wake_ctrl;
626         u32 gtlc_survive;
627         u32 pmwgicz;
628
629         /* Display 2 CZ domain */
630         u32 gu_ctl0;
631         u32 gu_ctl1;
632         u32 pcbr;
633         u32 clock_gate_dis2;
634 };
635
636 struct intel_rps_ei {
637         ktime_t ktime;
638         u32 render_c0;
639         u32 media_c0;
640 };
641
642 struct intel_rps {
643         struct mutex lock; /* protects enabling and the worker */
644
645         /*
646          * work, interrupts_enabled and pm_iir are protected by
647          * dev_priv->irq_lock
648          */
649         struct work_struct work;
650         bool interrupts_enabled;
651         u32 pm_iir;
652
653         /* PM interrupt bits that should never be masked */
654         u32 pm_intrmsk_mbz;
655
656         /* Frequencies are stored in potentially platform dependent multiples.
657          * In other words, *_freq needs to be multiplied by X to be interesting.
658          * Soft limits are those which are used for the dynamic reclocking done
659          * by the driver (raise frequencies under heavy loads, and lower for
660          * lighter loads). Hard limits are those imposed by the hardware.
661          *
662          * A distinction is made for overclocking, which is never enabled by
663          * default, and is considered to be above the hard limit if it's
664          * possible at all.
665          */
666         u8 cur_freq;            /* Current frequency (cached, may not == HW) */
667         u8 min_freq_softlimit;  /* Minimum frequency permitted by the driver */
668         u8 max_freq_softlimit;  /* Max frequency permitted by the driver */
669         u8 max_freq;            /* Maximum frequency, RP0 if not overclocking */
670         u8 min_freq;            /* AKA RPn. Minimum frequency */
671         u8 boost_freq;          /* Frequency to request when wait boosting */
672         u8 idle_freq;           /* Frequency to request when we are idle */
673         u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
674         u8 rp1_freq;            /* "less than" RP0 power/freqency */
675         u8 rp0_freq;            /* Non-overclocked max frequency. */
676         u16 gpll_ref_freq;      /* vlv/chv GPLL reference frequency */
677
678         int last_adj;
679
680         struct {
681                 struct mutex mutex;
682
683                 enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
684                 unsigned int interactive;
685
686                 u8 up_threshold; /* Current %busy required to uplock */
687                 u8 down_threshold; /* Current %busy required to downclock */
688         } power;
689
690         bool enabled;
691         atomic_t num_waiters;
692         atomic_t boosts;
693
694         /* manual wa residency calculations */
695         struct intel_rps_ei ei;
696 };
697
698 struct intel_rc6 {
699         bool enabled;
700         u64 prev_hw_residency[4];
701         u64 cur_residency[4];
702 };
703
704 struct intel_llc_pstate {
705         bool enabled;
706 };
707
708 struct intel_gen6_power_mgmt {
709         struct intel_rps rps;
710         struct intel_rc6 rc6;
711         struct intel_llc_pstate llc_pstate;
712 };
713
714 /* defined intel_pm.c */
715 extern spinlock_t mchdev_lock;
716
717 struct intel_ilk_power_mgmt {
718         u8 cur_delay;
719         u8 min_delay;
720         u8 max_delay;
721         u8 fmax;
722         u8 fstart;
723
724         u64 last_count1;
725         unsigned long last_time1;
726         unsigned long chipset_power;
727         u64 last_count2;
728         u64 last_time2;
729         unsigned long gfx_power;
730         u8 corr;
731
732         int c_m;
733         int r_t;
734 };
735
736 #define MAX_L3_SLICES 2
737 struct intel_l3_parity {
738         u32 *remap_info[MAX_L3_SLICES];
739         struct work_struct error_work;
740         int which_slice;
741 };
742
743 struct i915_gem_mm {
744         /** Memory allocator for GTT stolen memory */
745         struct drm_mm stolen;
746         /** Protects the usage of the GTT stolen memory allocator. This is
747          * always the inner lock when overlapping with struct_mutex. */
748         struct mutex stolen_lock;
749
750         /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
751         spinlock_t obj_lock;
752
753         /**
754          * List of objects which are purgeable.
755          */
756         struct list_head purge_list;
757
758         /**
759          * List of objects which have allocated pages and are shrinkable.
760          */
761         struct list_head shrink_list;
762
763         /**
764          * List of objects which are pending destruction.
765          */
766         struct llist_head free_list;
767         struct work_struct free_work;
768         spinlock_t free_lock;
769         /**
770          * Count of objects pending destructions. Used to skip needlessly
771          * waiting on an RCU barrier if no objects are waiting to be freed.
772          */
773         atomic_t free_count;
774
775         /**
776          * Small stash of WC pages
777          */
778         struct pagestash wc_stash;
779
780         /**
781          * tmpfs instance used for shmem backed objects
782          */
783         struct vfsmount *gemfs;
784
785         /** PPGTT used for aliasing the PPGTT with the GTT */
786         struct i915_ppgtt *aliasing_ppgtt;
787
788         struct notifier_block oom_notifier;
789         struct notifier_block vmap_notifier;
790         struct shrinker shrinker;
791
792         /**
793          * Workqueue to fault in userptr pages, flushed by the execbuf
794          * when required but otherwise left to userspace to try again
795          * on EAGAIN.
796          */
797         struct workqueue_struct *userptr_wq;
798
799         u64 unordered_timeline;
800
801         /* the indicator for dispatch video commands on two BSD rings */
802         atomic_t bsd_engine_dispatch_index;
803
804         /** Bit 6 swizzling required for X tiling */
805         u32 bit_6_swizzle_x;
806         /** Bit 6 swizzling required for Y tiling */
807         u32 bit_6_swizzle_y;
808
809         /* shrinker accounting, also useful for userland debugging */
810         u64 shrink_memory;
811         u32 shrink_count;
812 };
813
814 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
815
816 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
817 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
818
819 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
820 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
821
822 #define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
823
824 struct ddi_vbt_port_info {
825         /* Non-NULL if port present. */
826         const struct child_device_config *child;
827
828         int max_tmds_clock;
829
830         /*
831          * This is an index in the HDMI/DVI DDI buffer translation table.
832          * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
833          * populate this field.
834          */
835 #define HDMI_LEVEL_SHIFT_UNKNOWN        0xff
836         u8 hdmi_level_shift;
837
838         u8 supports_dvi:1;
839         u8 supports_hdmi:1;
840         u8 supports_dp:1;
841         u8 supports_edp:1;
842         u8 supports_typec_usb:1;
843         u8 supports_tbt:1;
844
845         u8 alternate_aux_channel;
846         u8 alternate_ddc_pin;
847
848         u8 dp_boost_level;
849         u8 hdmi_boost_level;
850         int dp_max_link_rate;           /* 0 for not limited by VBT */
851 };
852
853 enum psr_lines_to_wait {
854         PSR_0_LINES_TO_WAIT = 0,
855         PSR_1_LINE_TO_WAIT,
856         PSR_4_LINES_TO_WAIT,
857         PSR_8_LINES_TO_WAIT
858 };
859
860 struct intel_vbt_data {
861         struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
862         struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
863
864         /* Feature bits */
865         unsigned int int_tv_support:1;
866         unsigned int lvds_dither:1;
867         unsigned int int_crt_support:1;
868         unsigned int lvds_use_ssc:1;
869         unsigned int int_lvds_support:1;
870         unsigned int display_clock_mode:1;
871         unsigned int fdi_rx_polarity_inverted:1;
872         unsigned int panel_type:4;
873         int lvds_ssc_freq;
874         unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
875         enum drm_panel_orientation orientation;
876
877         enum drrs_support_type drrs_type;
878
879         struct {
880                 int rate;
881                 int lanes;
882                 int preemphasis;
883                 int vswing;
884                 bool low_vswing;
885                 bool initialized;
886                 int bpp;
887                 struct edp_power_seq pps;
888         } edp;
889
890         struct {
891                 bool enable;
892                 bool full_link;
893                 bool require_aux_wakeup;
894                 int idle_frames;
895                 enum psr_lines_to_wait lines_to_wait;
896                 int tp1_wakeup_time_us;
897                 int tp2_tp3_wakeup_time_us;
898                 int psr2_tp2_tp3_wakeup_time_us;
899         } psr;
900
901         struct {
902                 u16 pwm_freq_hz;
903                 bool present;
904                 bool active_low_pwm;
905                 u8 min_brightness;      /* min_brightness/255 of max */
906                 u8 controller;          /* brightness controller number */
907                 enum intel_backlight_type type;
908         } backlight;
909
910         /* MIPI DSI */
911         struct {
912                 u16 panel_id;
913                 struct mipi_config *config;
914                 struct mipi_pps_data *pps;
915                 u16 bl_ports;
916                 u16 cabc_ports;
917                 u8 seq_version;
918                 u32 size;
919                 u8 *data;
920                 const u8 *sequence[MIPI_SEQ_MAX];
921                 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
922                 enum drm_panel_orientation orientation;
923         } dsi;
924
925         int crt_ddc_pin;
926
927         int child_dev_num;
928         struct child_device_config *child_dev;
929
930         struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
931         struct sdvo_device_mapping sdvo_mappings[2];
932 };
933
934 enum intel_ddb_partitioning {
935         INTEL_DDB_PART_1_2,
936         INTEL_DDB_PART_5_6, /* IVB+ */
937 };
938
939 struct intel_wm_level {
940         bool enable;
941         u32 pri_val;
942         u32 spr_val;
943         u32 cur_val;
944         u32 fbc_val;
945 };
946
947 struct ilk_wm_values {
948         u32 wm_pipe[3];
949         u32 wm_lp[3];
950         u32 wm_lp_spr[3];
951         u32 wm_linetime[3];
952         bool enable_fbc_wm;
953         enum intel_ddb_partitioning partitioning;
954 };
955
956 struct g4x_pipe_wm {
957         u16 plane[I915_MAX_PLANES];
958         u16 fbc;
959 };
960
961 struct g4x_sr_wm {
962         u16 plane;
963         u16 cursor;
964         u16 fbc;
965 };
966
967 struct vlv_wm_ddl_values {
968         u8 plane[I915_MAX_PLANES];
969 };
970
971 struct vlv_wm_values {
972         struct g4x_pipe_wm pipe[3];
973         struct g4x_sr_wm sr;
974         struct vlv_wm_ddl_values ddl[3];
975         u8 level;
976         bool cxsr;
977 };
978
979 struct g4x_wm_values {
980         struct g4x_pipe_wm pipe[2];
981         struct g4x_sr_wm sr;
982         struct g4x_sr_wm hpll;
983         bool cxsr;
984         bool hpll_en;
985         bool fbc_en;
986 };
987
988 struct skl_ddb_entry {
989         u16 start, end; /* in number of blocks, 'end' is exclusive */
990 };
991
992 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
993 {
994         return entry->end - entry->start;
995 }
996
997 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
998                                        const struct skl_ddb_entry *e2)
999 {
1000         if (e1->start == e2->start && e1->end == e2->end)
1001                 return true;
1002
1003         return false;
1004 }
1005
1006 struct skl_ddb_allocation {
1007         u8 enabled_slices; /* GEN11 has configurable 2 slices */
1008 };
1009
1010 struct skl_ddb_values {
1011         unsigned dirty_pipes;
1012         struct skl_ddb_allocation ddb;
1013 };
1014
1015 struct skl_wm_level {
1016         u16 min_ddb_alloc;
1017         u16 plane_res_b;
1018         u8 plane_res_l;
1019         bool plane_en;
1020         bool ignore_lines;
1021 };
1022
1023 /* Stores plane specific WM parameters */
1024 struct skl_wm_params {
1025         bool x_tiled, y_tiled;
1026         bool rc_surface;
1027         bool is_planar;
1028         u32 width;
1029         u8 cpp;
1030         u32 plane_pixel_rate;
1031         u32 y_min_scanlines;
1032         u32 plane_bytes_per_line;
1033         uint_fixed_16_16_t plane_blocks_per_line;
1034         uint_fixed_16_16_t y_tile_minimum;
1035         u32 linetime_us;
1036         u32 dbuf_block_size;
1037 };
1038
1039 enum intel_pipe_crc_source {
1040         INTEL_PIPE_CRC_SOURCE_NONE,
1041         INTEL_PIPE_CRC_SOURCE_PLANE1,
1042         INTEL_PIPE_CRC_SOURCE_PLANE2,
1043         INTEL_PIPE_CRC_SOURCE_PLANE3,
1044         INTEL_PIPE_CRC_SOURCE_PLANE4,
1045         INTEL_PIPE_CRC_SOURCE_PLANE5,
1046         INTEL_PIPE_CRC_SOURCE_PLANE6,
1047         INTEL_PIPE_CRC_SOURCE_PLANE7,
1048         INTEL_PIPE_CRC_SOURCE_PIPE,
1049         /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1050         INTEL_PIPE_CRC_SOURCE_TV,
1051         INTEL_PIPE_CRC_SOURCE_DP_B,
1052         INTEL_PIPE_CRC_SOURCE_DP_C,
1053         INTEL_PIPE_CRC_SOURCE_DP_D,
1054         INTEL_PIPE_CRC_SOURCE_AUTO,
1055         INTEL_PIPE_CRC_SOURCE_MAX,
1056 };
1057
1058 #define INTEL_PIPE_CRC_ENTRIES_NR       128
1059 struct intel_pipe_crc {
1060         spinlock_t lock;
1061         int skipped;
1062         enum intel_pipe_crc_source source;
1063 };
1064
1065 struct i915_frontbuffer_tracking {
1066         spinlock_t lock;
1067
1068         /*
1069          * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1070          * scheduled flips.
1071          */
1072         unsigned busy_bits;
1073         unsigned flip_bits;
1074 };
1075
1076 struct i915_virtual_gpu {
1077         bool active;
1078         u32 caps;
1079 };
1080
1081 /* used in computing the new watermarks state */
1082 struct intel_wm_config {
1083         unsigned int num_pipes_active;
1084         bool sprites_enabled;
1085         bool sprites_scaled;
1086 };
1087
1088 struct i915_oa_format {
1089         u32 format;
1090         int size;
1091 };
1092
1093 struct i915_oa_reg {
1094         i915_reg_t addr;
1095         u32 value;
1096 };
1097
1098 struct i915_oa_config {
1099         char uuid[UUID_STRING_LEN + 1];
1100         int id;
1101
1102         const struct i915_oa_reg *mux_regs;
1103         u32 mux_regs_len;
1104         const struct i915_oa_reg *b_counter_regs;
1105         u32 b_counter_regs_len;
1106         const struct i915_oa_reg *flex_regs;
1107         u32 flex_regs_len;
1108
1109         struct attribute_group sysfs_metric;
1110         struct attribute *attrs[2];
1111         struct device_attribute sysfs_metric_id;
1112
1113         atomic_t ref_count;
1114 };
1115
1116 struct i915_perf_stream;
1117
1118 /**
1119  * struct i915_perf_stream_ops - the OPs to support a specific stream type
1120  */
1121 struct i915_perf_stream_ops {
1122         /**
1123          * @enable: Enables the collection of HW samples, either in response to
1124          * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1125          * without `I915_PERF_FLAG_DISABLED`.
1126          */
1127         void (*enable)(struct i915_perf_stream *stream);
1128
1129         /**
1130          * @disable: Disables the collection of HW samples, either in response
1131          * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1132          * the stream.
1133          */
1134         void (*disable)(struct i915_perf_stream *stream);
1135
1136         /**
1137          * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1138          * once there is something ready to read() for the stream
1139          */
1140         void (*poll_wait)(struct i915_perf_stream *stream,
1141                           struct file *file,
1142                           poll_table *wait);
1143
1144         /**
1145          * @wait_unlocked: For handling a blocking read, wait until there is
1146          * something to ready to read() for the stream. E.g. wait on the same
1147          * wait queue that would be passed to poll_wait().
1148          */
1149         int (*wait_unlocked)(struct i915_perf_stream *stream);
1150
1151         /**
1152          * @read: Copy buffered metrics as records to userspace
1153          * **buf**: the userspace, destination buffer
1154          * **count**: the number of bytes to copy, requested by userspace
1155          * **offset**: zero at the start of the read, updated as the read
1156          * proceeds, it represents how many bytes have been copied so far and
1157          * the buffer offset for copying the next record.
1158          *
1159          * Copy as many buffered i915 perf samples and records for this stream
1160          * to userspace as will fit in the given buffer.
1161          *
1162          * Only write complete records; returning -%ENOSPC if there isn't room
1163          * for a complete record.
1164          *
1165          * Return any error condition that results in a short read such as
1166          * -%ENOSPC or -%EFAULT, even though these may be squashed before
1167          * returning to userspace.
1168          */
1169         int (*read)(struct i915_perf_stream *stream,
1170                     char __user *buf,
1171                     size_t count,
1172                     size_t *offset);
1173
1174         /**
1175          * @destroy: Cleanup any stream specific resources.
1176          *
1177          * The stream will always be disabled before this is called.
1178          */
1179         void (*destroy)(struct i915_perf_stream *stream);
1180 };
1181
1182 /**
1183  * struct i915_perf_stream - state for a single open stream FD
1184  */
1185 struct i915_perf_stream {
1186         /**
1187          * @dev_priv: i915 drm device
1188          */
1189         struct drm_i915_private *dev_priv;
1190
1191         /**
1192          * @link: Links the stream into ``&drm_i915_private->streams``
1193          */
1194         struct list_head link;
1195
1196         /**
1197          * @wakeref: As we keep the device awake while the perf stream is
1198          * active, we track our runtime pm reference for later release.
1199          */
1200         intel_wakeref_t wakeref;
1201
1202         /**
1203          * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1204          * properties given when opening a stream, representing the contents
1205          * of a single sample as read() by userspace.
1206          */
1207         u32 sample_flags;
1208
1209         /**
1210          * @sample_size: Considering the configured contents of a sample
1211          * combined with the required header size, this is the total size
1212          * of a single sample record.
1213          */
1214         int sample_size;
1215
1216         /**
1217          * @ctx: %NULL if measuring system-wide across all contexts or a
1218          * specific context that is being monitored.
1219          */
1220         struct i915_gem_context *ctx;
1221
1222         /**
1223          * @enabled: Whether the stream is currently enabled, considering
1224          * whether the stream was opened in a disabled state and based
1225          * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1226          */
1227         bool enabled;
1228
1229         /**
1230          * @ops: The callbacks providing the implementation of this specific
1231          * type of configured stream.
1232          */
1233         const struct i915_perf_stream_ops *ops;
1234
1235         /**
1236          * @oa_config: The OA configuration used by the stream.
1237          */
1238         struct i915_oa_config *oa_config;
1239 };
1240
1241 /**
1242  * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1243  */
1244 struct i915_oa_ops {
1245         /**
1246          * @is_valid_b_counter_reg: Validates register's address for
1247          * programming boolean counters for a particular platform.
1248          */
1249         bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1250                                        u32 addr);
1251
1252         /**
1253          * @is_valid_mux_reg: Validates register's address for programming mux
1254          * for a particular platform.
1255          */
1256         bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1257
1258         /**
1259          * @is_valid_flex_reg: Validates register's address for programming
1260          * flex EU filtering for a particular platform.
1261          */
1262         bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1263
1264         /**
1265          * @enable_metric_set: Selects and applies any MUX configuration to set
1266          * up the Boolean and Custom (B/C) counters that are part of the
1267          * counter reports being sampled. May apply system constraints such as
1268          * disabling EU clock gating as required.
1269          */
1270         int (*enable_metric_set)(struct i915_perf_stream *stream);
1271
1272         /**
1273          * @disable_metric_set: Remove system constraints associated with using
1274          * the OA unit.
1275          */
1276         void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1277
1278         /**
1279          * @oa_enable: Enable periodic sampling
1280          */
1281         void (*oa_enable)(struct i915_perf_stream *stream);
1282
1283         /**
1284          * @oa_disable: Disable periodic sampling
1285          */
1286         void (*oa_disable)(struct i915_perf_stream *stream);
1287
1288         /**
1289          * @read: Copy data from the circular OA buffer into a given userspace
1290          * buffer.
1291          */
1292         int (*read)(struct i915_perf_stream *stream,
1293                     char __user *buf,
1294                     size_t count,
1295                     size_t *offset);
1296
1297         /**
1298          * @oa_hw_tail_read: read the OA tail pointer register
1299          *
1300          * In particular this enables us to share all the fiddly code for
1301          * handling the OA unit tail pointer race that affects multiple
1302          * generations.
1303          */
1304         u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1305 };
1306
1307 struct intel_cdclk_state {
1308         unsigned int cdclk, vco, ref, bypass;
1309         u8 voltage_level;
1310 };
1311
1312 struct drm_i915_private {
1313         struct drm_device drm;
1314
1315         const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
1316         struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
1317         struct intel_driver_caps caps;
1318
1319         /**
1320          * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
1321          * end of stolen which we can optionally use to create GEM objects
1322          * backed by stolen memory. Note that stolen_usable_size tells us
1323          * exactly how much of this we are actually allowed to use, given that
1324          * some portion of it is in fact reserved for use by hardware functions.
1325          */
1326         struct resource dsm;
1327         /**
1328          * Reseved portion of Data Stolen Memory
1329          */
1330         struct resource dsm_reserved;
1331
1332         /*
1333          * Stolen memory is segmented in hardware with different portions
1334          * offlimits to certain functions.
1335          *
1336          * The drm_mm is initialised to the total accessible range, as found
1337          * from the PCI config. On Broadwell+, this is further restricted to
1338          * avoid the first page! The upper end of stolen memory is reserved for
1339          * hardware functions and similarly removed from the accessible range.
1340          */
1341         resource_size_t stolen_usable_size;     /* Total size minus reserved ranges */
1342
1343         struct intel_uncore uncore;
1344
1345         struct i915_virtual_gpu vgpu;
1346
1347         struct intel_gvt *gvt;
1348
1349         struct intel_wopcm wopcm;
1350
1351         struct intel_huc huc;
1352         struct intel_guc guc;
1353
1354         struct intel_csr csr;
1355
1356         struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1357
1358         /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1359          * controller on different i2c buses. */
1360         struct mutex gmbus_mutex;
1361
1362         /**
1363          * Base address of where the gmbus and gpio blocks are located (either
1364          * on PCH or on SoC for platforms without PCH).
1365          */
1366         u32 gpio_mmio_base;
1367
1368         /* MMIO base address for MIPI regs */
1369         u32 mipi_mmio_base;
1370
1371         u32 psr_mmio_base;
1372
1373         u32 pps_mmio_base;
1374
1375         wait_queue_head_t gmbus_wait_queue;
1376
1377         struct pci_dev *bridge_dev;
1378         struct intel_engine_cs *engine[I915_NUM_ENGINES];
1379         /* Context used internally to idle the GPU and setup initial state */
1380         struct i915_gem_context *kernel_context;
1381         /* Context only to be used for injecting preemption commands */
1382         struct i915_gem_context *preempt_context;
1383         struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1384                                             [MAX_ENGINE_INSTANCE + 1];
1385
1386         struct resource mch_res;
1387
1388         /* protects the irq masks */
1389         spinlock_t irq_lock;
1390
1391         bool display_irqs_enabled;
1392
1393         /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1394         struct pm_qos_request pm_qos;
1395
1396         /* Sideband mailbox protection */
1397         struct mutex sb_lock;
1398         struct pm_qos_request sb_qos;
1399
1400         /** Cached value of IMR to avoid reads in updating the bitfield */
1401         union {
1402                 u32 irq_mask;
1403                 u32 de_irq_mask[I915_MAX_PIPES];
1404         };
1405         u32 gt_irq_mask;
1406         u32 pm_imr;
1407         u32 pm_ier;
1408         u32 pm_rps_events;
1409         u32 pm_guc_events;
1410         u32 pipestat_irq_mask[I915_MAX_PIPES];
1411
1412         struct i915_hotplug hotplug;
1413         struct intel_fbc fbc;
1414         struct i915_drrs drrs;
1415         struct intel_opregion opregion;
1416         struct intel_vbt_data vbt;
1417
1418         bool preserve_bios_swizzle;
1419
1420         /* overlay */
1421         struct intel_overlay *overlay;
1422
1423         /* backlight registers and fields in struct intel_panel */
1424         struct mutex backlight_lock;
1425
1426         /* LVDS info */
1427         bool no_aux_handshake;
1428
1429         /* protects panel power sequencer state */
1430         struct mutex pps_mutex;
1431
1432         unsigned int fsb_freq, mem_freq, is_ddr3;
1433         unsigned int skl_preferred_vco_freq;
1434         unsigned int max_cdclk_freq;
1435
1436         unsigned int max_dotclk_freq;
1437         unsigned int rawclk_freq;
1438         unsigned int hpll_freq;
1439         unsigned int fdi_pll_freq;
1440         unsigned int czclk_freq;
1441
1442         struct {
1443                 /*
1444                  * The current logical cdclk state.
1445                  * See intel_atomic_state.cdclk.logical
1446                  *
1447                  * For reading holding any crtc lock is sufficient,
1448                  * for writing must hold all of them.
1449                  */
1450                 struct intel_cdclk_state logical;
1451                 /*
1452                  * The current actual cdclk state.
1453                  * See intel_atomic_state.cdclk.actual
1454                  */
1455                 struct intel_cdclk_state actual;
1456                 /* The current hardware cdclk state */
1457                 struct intel_cdclk_state hw;
1458
1459                 int force_min_cdclk;
1460         } cdclk;
1461
1462         /**
1463          * wq - Driver workqueue for GEM.
1464          *
1465          * NOTE: Work items scheduled here are not allowed to grab any modeset
1466          * locks, for otherwise the flushing done in the pageflip code will
1467          * result in deadlocks.
1468          */
1469         struct workqueue_struct *wq;
1470
1471         /* ordered wq for modesets */
1472         struct workqueue_struct *modeset_wq;
1473
1474         /* Display functions */
1475         struct drm_i915_display_funcs display;
1476
1477         /* PCH chipset type */
1478         enum intel_pch pch_type;
1479         unsigned short pch_id;
1480
1481         unsigned long quirks;
1482
1483         struct drm_atomic_state *modeset_restore_state;
1484         struct drm_modeset_acquire_ctx reset_ctx;
1485
1486         struct i915_ggtt ggtt; /* VM representing the global address space */
1487
1488         struct i915_gem_mm mm;
1489         DECLARE_HASHTABLE(mm_structs, 7);
1490         struct mutex mm_lock;
1491
1492         struct intel_ppat ppat;
1493
1494         /* Kernel Modesetting */
1495
1496         struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1497         struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1498
1499 #ifdef CONFIG_DEBUG_FS
1500         struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1501 #endif
1502
1503         /* dpll and cdclk state is protected by connection_mutex */
1504         int num_shared_dpll;
1505         struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1506         const struct intel_dpll_mgr *dpll_mgr;
1507
1508         /*
1509          * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1510          * Must be global rather than per dpll, because on some platforms
1511          * plls share registers.
1512          */
1513         struct mutex dpll_lock;
1514
1515         unsigned int active_crtcs;
1516         /* minimum acceptable cdclk for each pipe */
1517         int min_cdclk[I915_MAX_PIPES];
1518         /* minimum acceptable voltage level for each pipe */
1519         u8 min_voltage_level[I915_MAX_PIPES];
1520
1521         int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1522
1523         struct i915_wa_list gt_wa_list;
1524
1525         struct i915_frontbuffer_tracking fb_tracking;
1526
1527         struct intel_atomic_helper {
1528                 struct llist_head free_list;
1529                 struct work_struct free_work;
1530         } atomic_helper;
1531
1532         u16 orig_clock;
1533
1534         bool mchbar_need_disable;
1535
1536         struct intel_l3_parity l3_parity;
1537
1538         /*
1539          * edram size in MB.
1540          * Cannot be determined by PCIID. You must always read a register.
1541          */
1542         u32 edram_size_mb;
1543
1544         /* gen6+ GT PM state */
1545         struct intel_gen6_power_mgmt gt_pm;
1546
1547         /* ilk-only ips/rps state. Everything in here is protected by the global
1548          * mchdev_lock in intel_pm.c */
1549         struct intel_ilk_power_mgmt ips;
1550
1551         struct i915_power_domains power_domains;
1552
1553         struct i915_psr psr;
1554
1555         struct i915_gpu_error gpu_error;
1556
1557         struct drm_i915_gem_object *vlv_pctx;
1558
1559         /* list of fbdev register on this device */
1560         struct intel_fbdev *fbdev;
1561         struct work_struct fbdev_suspend_work;
1562
1563         struct drm_property *broadcast_rgb_property;
1564         struct drm_property *force_audio_property;
1565
1566         /* hda/i915 audio component */
1567         struct i915_audio_component *audio_component;
1568         bool audio_component_registered;
1569         /**
1570          * av_mutex - mutex for audio/video sync
1571          *
1572          */
1573         struct mutex av_mutex;
1574         int audio_power_refcount;
1575
1576         struct {
1577                 struct mutex mutex;
1578                 struct list_head list;
1579                 struct llist_head free_list;
1580                 struct work_struct free_work;
1581
1582                 /* The hw wants to have a stable context identifier for the
1583                  * lifetime of the context (for OA, PASID, faults, etc).
1584                  * This is limited in execlists to 21 bits.
1585                  */
1586                 struct ida hw_ida;
1587 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1588 #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
1589 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
1590                 struct list_head hw_id_list;
1591         } contexts;
1592
1593         u32 fdi_rx_config;
1594
1595         /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1596         u32 chv_phy_control;
1597         /*
1598          * Shadows for CHV DPLL_MD regs to keep the state
1599          * checker somewhat working in the presence hardware
1600          * crappiness (can't read out DPLL_MD for pipes B & C).
1601          */
1602         u32 chv_dpll_md[I915_MAX_PIPES];
1603         u32 bxt_phy_grc;
1604
1605         u32 suspend_count;
1606         bool power_domains_suspended;
1607         struct i915_suspend_saved_registers regfile;
1608         struct vlv_s0ix_state vlv_s0ix_state;
1609
1610         enum {
1611                 I915_SAGV_UNKNOWN = 0,
1612                 I915_SAGV_DISABLED,
1613                 I915_SAGV_ENABLED,
1614                 I915_SAGV_NOT_CONTROLLED
1615         } sagv_status;
1616
1617         struct {
1618                 /*
1619                  * Raw watermark latency values:
1620                  * in 0.1us units for WM0,
1621                  * in 0.5us units for WM1+.
1622                  */
1623                 /* primary */
1624                 u16 pri_latency[5];
1625                 /* sprite */
1626                 u16 spr_latency[5];
1627                 /* cursor */
1628                 u16 cur_latency[5];
1629                 /*
1630                  * Raw watermark memory latency values
1631                  * for SKL for all 8 levels
1632                  * in 1us units.
1633                  */
1634                 u16 skl_latency[8];
1635
1636                 /* current hardware state */
1637                 union {
1638                         struct ilk_wm_values hw;
1639                         struct skl_ddb_values skl_hw;
1640                         struct vlv_wm_values vlv;
1641                         struct g4x_wm_values g4x;
1642                 };
1643
1644                 u8 max_level;
1645
1646                 /*
1647                  * Should be held around atomic WM register writing; also
1648                  * protects * intel_crtc->wm.active and
1649                  * cstate->wm.need_postvbl_update.
1650                  */
1651                 struct mutex wm_mutex;
1652
1653                 /*
1654                  * Set during HW readout of watermarks/DDB.  Some platforms
1655                  * need to know when we're still using BIOS-provided values
1656                  * (which we don't fully trust).
1657                  */
1658                 bool distrust_bios_wm;
1659         } wm;
1660
1661         struct dram_info {
1662                 bool valid;
1663                 bool is_16gb_dimm;
1664                 u8 num_channels;
1665                 u8 ranks;
1666                 u32 bandwidth_kbps;
1667                 bool symmetric_memory;
1668                 enum intel_dram_type {
1669                         INTEL_DRAM_UNKNOWN,
1670                         INTEL_DRAM_DDR3,
1671                         INTEL_DRAM_DDR4,
1672                         INTEL_DRAM_LPDDR3,
1673                         INTEL_DRAM_LPDDR4
1674                 } type;
1675         } dram_info;
1676
1677         struct intel_bw_info {
1678                 int num_planes;
1679                 int deratedbw[3];
1680         } max_bw[6];
1681
1682         struct drm_private_obj bw_obj;
1683
1684         struct intel_runtime_pm runtime_pm;
1685
1686         struct {
1687                 bool initialized;
1688
1689                 struct kobject *metrics_kobj;
1690                 struct ctl_table_header *sysctl_header;
1691
1692                 /*
1693                  * Lock associated with adding/modifying/removing OA configs
1694                  * in dev_priv->perf.metrics_idr.
1695                  */
1696                 struct mutex metrics_lock;
1697
1698                 /*
1699                  * List of dynamic configurations, you need to hold
1700                  * dev_priv->perf.metrics_lock to access it.
1701                  */
1702                 struct idr metrics_idr;
1703
1704                 /*
1705                  * Lock associated with anything below within this structure
1706                  * except exclusive_stream.
1707                  */
1708                 struct mutex lock;
1709                 struct list_head streams;
1710
1711                 struct {
1712                         /*
1713                          * The stream currently using the OA unit. If accessed
1714                          * outside a syscall associated to its file
1715                          * descriptor, you need to hold
1716                          * dev_priv->drm.struct_mutex.
1717                          */
1718                         struct i915_perf_stream *exclusive_stream;
1719
1720                         struct intel_context *pinned_ctx;
1721                         u32 specific_ctx_id;
1722                         u32 specific_ctx_id_mask;
1723
1724                         struct hrtimer poll_check_timer;
1725                         wait_queue_head_t poll_wq;
1726                         bool pollin;
1727
1728                         /**
1729                          * For rate limiting any notifications of spurious
1730                          * invalid OA reports
1731                          */
1732                         struct ratelimit_state spurious_report_rs;
1733
1734                         bool periodic;
1735                         int period_exponent;
1736
1737                         struct i915_oa_config test_config;
1738
1739                         struct {
1740                                 struct i915_vma *vma;
1741                                 u8 *vaddr;
1742                                 u32 last_ctx_id;
1743                                 int format;
1744                                 int format_size;
1745
1746                                 /**
1747                                  * Locks reads and writes to all head/tail state
1748                                  *
1749                                  * Consider: the head and tail pointer state
1750                                  * needs to be read consistently from a hrtimer
1751                                  * callback (atomic context) and read() fop
1752                                  * (user context) with tail pointer updates
1753                                  * happening in atomic context and head updates
1754                                  * in user context and the (unlikely)
1755                                  * possibility of read() errors needing to
1756                                  * reset all head/tail state.
1757                                  *
1758                                  * Note: Contention or performance aren't
1759                                  * currently a significant concern here
1760                                  * considering the relatively low frequency of
1761                                  * hrtimer callbacks (5ms period) and that
1762                                  * reads typically only happen in response to a
1763                                  * hrtimer event and likely complete before the
1764                                  * next callback.
1765                                  *
1766                                  * Note: This lock is not held *while* reading
1767                                  * and copying data to userspace so the value
1768                                  * of head observed in htrimer callbacks won't
1769                                  * represent any partial consumption of data.
1770                                  */
1771                                 spinlock_t ptr_lock;
1772
1773                                 /**
1774                                  * One 'aging' tail pointer and one 'aged'
1775                                  * tail pointer ready to used for reading.
1776                                  *
1777                                  * Initial values of 0xffffffff are invalid
1778                                  * and imply that an update is required
1779                                  * (and should be ignored by an attempted
1780                                  * read)
1781                                  */
1782                                 struct {
1783                                         u32 offset;
1784                                 } tails[2];
1785
1786                                 /**
1787                                  * Index for the aged tail ready to read()
1788                                  * data up to.
1789                                  */
1790                                 unsigned int aged_tail_idx;
1791
1792                                 /**
1793                                  * A monotonic timestamp for when the current
1794                                  * aging tail pointer was read; used to
1795                                  * determine when it is old enough to trust.
1796                                  */
1797                                 u64 aging_timestamp;
1798
1799                                 /**
1800                                  * Although we can always read back the head
1801                                  * pointer register, we prefer to avoid
1802                                  * trusting the HW state, just to avoid any
1803                                  * risk that some hardware condition could
1804                                  * somehow bump the head pointer unpredictably
1805                                  * and cause us to forward the wrong OA buffer
1806                                  * data to userspace.
1807                                  */
1808                                 u32 head;
1809                         } oa_buffer;
1810
1811                         u32 gen7_latched_oastatus1;
1812                         u32 ctx_oactxctrl_offset;
1813                         u32 ctx_flexeu0_offset;
1814
1815                         /**
1816                          * The RPT_ID/reason field for Gen8+ includes a bit
1817                          * to determine if the CTX ID in the report is valid
1818                          * but the specific bit differs between Gen 8 and 9
1819                          */
1820                         u32 gen8_valid_ctx_bit;
1821
1822                         struct i915_oa_ops ops;
1823                         const struct i915_oa_format *oa_formats;
1824                 } oa;
1825         } perf;
1826
1827         /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1828         struct intel_gt gt;
1829
1830         struct {
1831                 struct notifier_block pm_notifier;
1832
1833                 /**
1834                  * We leave the user IRQ off as much as possible,
1835                  * but this means that requests will finish and never
1836                  * be retired once the system goes idle. Set a timer to
1837                  * fire periodically while the ring is running. When it
1838                  * fires, go retire requests.
1839                  */
1840                 struct delayed_work retire_work;
1841
1842                 /**
1843                  * When we detect an idle GPU, we want to turn on
1844                  * powersaving features. So once we see that there
1845                  * are no more requests outstanding and no more
1846                  * arrive within a small period of time, we fire
1847                  * off the idle_work.
1848                  */
1849                 struct work_struct idle_work;
1850         } gem;
1851
1852         /* For i945gm vblank irq vs. C3 workaround */
1853         struct {
1854                 struct work_struct work;
1855                 struct pm_qos_request pm_qos;
1856                 u8 c3_disable_latency;
1857                 u8 enabled;
1858         } i945gm_vblank;
1859
1860         /* perform PHY state sanity checks? */
1861         bool chv_phy_assert[2];
1862
1863         bool ipc_enabled;
1864
1865         /* Used to save the pipe-to-encoder mapping for audio */
1866         struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1867
1868         /* necessary resource sharing with HDMI LPE audio driver. */
1869         struct {
1870                 struct platform_device *platdev;
1871                 int     irq;
1872         } lpe_audio;
1873
1874         struct i915_pmu pmu;
1875
1876         struct i915_hdcp_comp_master *hdcp_master;
1877         bool hdcp_comp_added;
1878
1879         /* Mutex to protect the above hdcp component related values. */
1880         struct mutex hdcp_comp_mutex;
1881
1882         /*
1883          * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1884          * will be rejected. Instead look for a better place.
1885          */
1886 };
1887
1888 struct dram_dimm_info {
1889         u8 size, width, ranks;
1890 };
1891
1892 struct dram_channel_info {
1893         struct dram_dimm_info dimm_l, dimm_s;
1894         u8 ranks;
1895         bool is_16gb_dimm;
1896 };
1897
1898 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1899 {
1900         return container_of(dev, struct drm_i915_private, drm);
1901 }
1902
1903 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1904 {
1905         return to_i915(dev_get_drvdata(kdev));
1906 }
1907
1908 static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
1909 {
1910         return container_of(wopcm, struct drm_i915_private, wopcm);
1911 }
1912
1913 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
1914 {
1915         return container_of(guc, struct drm_i915_private, guc);
1916 }
1917
1918 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
1919 {
1920         return container_of(huc, struct drm_i915_private, huc);
1921 }
1922
1923 /* Simple iterator over all initialised engines */
1924 #define for_each_engine(engine__, dev_priv__, id__) \
1925         for ((id__) = 0; \
1926              (id__) < I915_NUM_ENGINES; \
1927              (id__)++) \
1928                 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1929
1930 /* Iterator over subset of engines selected by mask */
1931 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
1932         for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
1933              (tmp__) ? \
1934              ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
1935              0;)
1936
1937 enum hdmi_force_audio {
1938         HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
1939         HDMI_AUDIO_OFF,                 /* force turn off HDMI audio */
1940         HDMI_AUDIO_AUTO,                /* trust EDID */
1941         HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
1942 };
1943
1944 #define I915_GTT_OFFSET_NONE ((u32)-1)
1945
1946 /*
1947  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1948  * considered to be the frontbuffer for the given plane interface-wise. This
1949  * doesn't mean that the hw necessarily already scans it out, but that any
1950  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1951  *
1952  * We have one bit per pipe and per scanout plane type.
1953  */
1954 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1955 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1956         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1957         BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1958         BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1959 })
1960 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1961         BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1962 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1963         GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1964                 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1965
1966 #define INTEL_INFO(dev_priv)    (&(dev_priv)->__info)
1967 #define RUNTIME_INFO(dev_priv)  (&(dev_priv)->__runtime)
1968 #define DRIVER_CAPS(dev_priv)   (&(dev_priv)->caps)
1969
1970 #define INTEL_GEN(dev_priv)     (INTEL_INFO(dev_priv)->gen)
1971 #define INTEL_DEVID(dev_priv)   (RUNTIME_INFO(dev_priv)->device_id)
1972
1973 #define REVID_FOREVER           0xff
1974 #define INTEL_REVID(dev_priv)   ((dev_priv)->drm.pdev->revision)
1975
1976 #define INTEL_GEN_MASK(s, e) ( \
1977         BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1978         BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1979         GENMASK((e) - 1, (s) - 1))
1980
1981 /* Returns true if Gen is in inclusive range [Start, End] */
1982 #define IS_GEN_RANGE(dev_priv, s, e) \
1983         (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1984
1985 #define IS_GEN(dev_priv, n) \
1986         (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1987          INTEL_INFO(dev_priv)->gen == (n))
1988
1989 /*
1990  * Return true if revision is in range [since,until] inclusive.
1991  *
1992  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1993  */
1994 #define IS_REVID(p, since, until) \
1995         (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1996
1997 static __always_inline unsigned int
1998 __platform_mask_index(const struct intel_runtime_info *info,
1999                       enum intel_platform p)
2000 {
2001         const unsigned int pbits =
2002                 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
2003
2004         /* Expand the platform_mask array if this fails. */
2005         BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
2006                      pbits * ARRAY_SIZE(info->platform_mask));
2007
2008         return p / pbits;
2009 }
2010
2011 static __always_inline unsigned int
2012 __platform_mask_bit(const struct intel_runtime_info *info,
2013                     enum intel_platform p)
2014 {
2015         const unsigned int pbits =
2016                 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
2017
2018         return p % pbits + INTEL_SUBPLATFORM_BITS;
2019 }
2020
2021 static inline u32
2022 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
2023 {
2024         const unsigned int pi = __platform_mask_index(info, p);
2025
2026         return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
2027 }
2028
2029 static __always_inline bool
2030 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
2031 {
2032         const struct intel_runtime_info *info = RUNTIME_INFO(i915);
2033         const unsigned int pi = __platform_mask_index(info, p);
2034         const unsigned int pb = __platform_mask_bit(info, p);
2035
2036         BUILD_BUG_ON(!__builtin_constant_p(p));
2037
2038         return info->platform_mask[pi] & BIT(pb);
2039 }
2040
2041 static __always_inline bool
2042 IS_SUBPLATFORM(const struct drm_i915_private *i915,
2043                enum intel_platform p, unsigned int s)
2044 {
2045         const struct intel_runtime_info *info = RUNTIME_INFO(i915);
2046         const unsigned int pi = __platform_mask_index(info, p);
2047         const unsigned int pb = __platform_mask_bit(info, p);
2048         const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
2049         const u32 mask = info->platform_mask[pi];
2050
2051         BUILD_BUG_ON(!__builtin_constant_p(p));
2052         BUILD_BUG_ON(!__builtin_constant_p(s));
2053         BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
2054
2055         /* Shift and test on the MSB position so sign flag can be used. */
2056         return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
2057 }
2058
2059 #define IS_MOBILE(dev_priv)     (INTEL_INFO(dev_priv)->is_mobile)
2060
2061 #define IS_I830(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I830)
2062 #define IS_I845G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I845G)
2063 #define IS_I85X(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I85X)
2064 #define IS_I865G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I865G)
2065 #define IS_I915G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I915G)
2066 #define IS_I915GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I915GM)
2067 #define IS_I945G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I945G)
2068 #define IS_I945GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I945GM)
2069 #define IS_I965G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I965G)
2070 #define IS_I965GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I965GM)
2071 #define IS_G45(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G45)
2072 #define IS_GM45(dev_priv)       IS_PLATFORM(dev_priv, INTEL_GM45)
2073 #define IS_G4X(dev_priv)        (IS_G45(dev_priv) || IS_GM45(dev_priv))
2074 #define IS_PINEVIEW(dev_priv)   IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2075 #define IS_G33(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G33)
2076 #define IS_IRONLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
2077 #define IS_IRONLAKE_M(dev_priv) \
2078         (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
2079 #define IS_IVYBRIDGE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2080 #define IS_IVB_GT1(dev_priv)    (IS_IVYBRIDGE(dev_priv) && \
2081                                  INTEL_INFO(dev_priv)->gt == 1)
2082 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2083 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2084 #define IS_HASWELL(dev_priv)    IS_PLATFORM(dev_priv, INTEL_HASWELL)
2085 #define IS_BROADWELL(dev_priv)  IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2086 #define IS_SKYLAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2087 #define IS_BROXTON(dev_priv)    IS_PLATFORM(dev_priv, INTEL_BROXTON)
2088 #define IS_KABYLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2089 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2090 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2091 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2092 #define IS_ICELAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2093 #define IS_ELKHARTLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
2094 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2095                                     (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2096 #define IS_BDW_ULT(dev_priv) \
2097         IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
2098 #define IS_BDW_ULX(dev_priv) \
2099         IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
2100 #define IS_BDW_GT3(dev_priv)    (IS_BROADWELL(dev_priv) && \
2101                                  INTEL_INFO(dev_priv)->gt == 3)
2102 #define IS_HSW_ULT(dev_priv) \
2103         IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
2104 #define IS_HSW_GT3(dev_priv)    (IS_HASWELL(dev_priv) && \
2105                                  INTEL_INFO(dev_priv)->gt == 3)
2106 #define IS_HSW_GT1(dev_priv)    (IS_HASWELL(dev_priv) && \
2107                                  INTEL_INFO(dev_priv)->gt == 1)
2108 /* ULX machines are also considered ULT. */
2109 #define IS_HSW_ULX(dev_priv) \
2110         IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
2111 #define IS_SKL_ULT(dev_priv) \
2112         IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
2113 #define IS_SKL_ULX(dev_priv) \
2114         IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
2115 #define IS_KBL_ULT(dev_priv) \
2116         IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
2117 #define IS_KBL_ULX(dev_priv) \
2118         IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
2119 #define IS_SKL_GT2(dev_priv)    (IS_SKYLAKE(dev_priv) && \
2120                                  INTEL_INFO(dev_priv)->gt == 2)
2121 #define IS_SKL_GT3(dev_priv)    (IS_SKYLAKE(dev_priv) && \
2122                                  INTEL_INFO(dev_priv)->gt == 3)
2123 #define IS_SKL_GT4(dev_priv)    (IS_SKYLAKE(dev_priv) && \
2124                                  INTEL_INFO(dev_priv)->gt == 4)
2125 #define IS_KBL_GT2(dev_priv)    (IS_KABYLAKE(dev_priv) && \
2126                                  INTEL_INFO(dev_priv)->gt == 2)
2127 #define IS_KBL_GT3(dev_priv)    (IS_KABYLAKE(dev_priv) && \
2128                                  INTEL_INFO(dev_priv)->gt == 3)
2129 #define IS_CFL_ULT(dev_priv) \
2130         IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
2131 #define IS_CFL_ULX(dev_priv) \
2132         IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
2133 #define IS_CFL_GT2(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
2134                                  INTEL_INFO(dev_priv)->gt == 2)
2135 #define IS_CFL_GT3(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
2136                                  INTEL_INFO(dev_priv)->gt == 3)
2137 #define IS_CNL_WITH_PORT_F(dev_priv) \
2138         IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
2139 #define IS_ICL_WITH_PORT_F(dev_priv) \
2140         IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
2141
2142 #define SKL_REVID_A0            0x0
2143 #define SKL_REVID_B0            0x1
2144 #define SKL_REVID_C0            0x2
2145 #define SKL_REVID_D0            0x3
2146 #define SKL_REVID_E0            0x4
2147 #define SKL_REVID_F0            0x5
2148 #define SKL_REVID_G0            0x6
2149 #define SKL_REVID_H0            0x7
2150
2151 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2152
2153 #define BXT_REVID_A0            0x0
2154 #define BXT_REVID_A1            0x1
2155 #define BXT_REVID_B0            0x3
2156 #define BXT_REVID_B_LAST        0x8
2157 #define BXT_REVID_C0            0x9
2158
2159 #define IS_BXT_REVID(dev_priv, since, until) \
2160         (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2161
2162 #define KBL_REVID_A0            0x0
2163 #define KBL_REVID_B0            0x1
2164 #define KBL_REVID_C0            0x2
2165 #define KBL_REVID_D0            0x3
2166 #define KBL_REVID_E0            0x4
2167
2168 #define IS_KBL_REVID(dev_priv, since, until) \
2169         (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2170
2171 #define GLK_REVID_A0            0x0
2172 #define GLK_REVID_A1            0x1
2173
2174 #define IS_GLK_REVID(dev_priv, since, until) \
2175         (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2176
2177 #define CNL_REVID_A0            0x0
2178 #define CNL_REVID_B0            0x1
2179 #define CNL_REVID_C0            0x2
2180
2181 #define IS_CNL_REVID(p, since, until) \
2182         (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2183
2184 #define ICL_REVID_A0            0x0
2185 #define ICL_REVID_A2            0x1
2186 #define ICL_REVID_B0            0x3
2187 #define ICL_REVID_B2            0x4
2188 #define ICL_REVID_C0            0x5
2189
2190 #define IS_ICL_REVID(p, since, until) \
2191         (IS_ICELAKE(p) && IS_REVID(p, since, until))
2192
2193 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
2194 #define IS_GEN9_LP(dev_priv)    (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
2195 #define IS_GEN9_BC(dev_priv)    (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
2196
2197 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
2198
2199 #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({                \
2200         unsigned int first__ = (first);                                 \
2201         unsigned int count__ = (count);                                 \
2202         (INTEL_INFO(dev_priv)->engine_mask &                            \
2203          GENMASK(first__ + count__ - 1, first__)) >> first__;           \
2204 })
2205 #define VDBOX_MASK(dev_priv) \
2206         ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
2207 #define VEBOX_MASK(dev_priv) \
2208         ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
2209
2210 #define HAS_LLC(dev_priv)       (INTEL_INFO(dev_priv)->has_llc)
2211 #define HAS_SNOOP(dev_priv)     (INTEL_INFO(dev_priv)->has_snoop)
2212 #define HAS_EDRAM(dev_priv)     ((dev_priv)->edram_size_mb)
2213 #define HAS_WT(dev_priv)        ((IS_HASWELL(dev_priv) || \
2214                                  IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2215
2216 #define HWS_NEEDS_PHYSICAL(dev_priv)    (INTEL_INFO(dev_priv)->hws_needs_physical)
2217
2218 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2219                 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
2220 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2221                 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
2222 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2223                 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
2224
2225 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2226
2227 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
2228 #define HAS_PPGTT(dev_priv) \
2229         (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
2230 #define HAS_FULL_PPGTT(dev_priv) \
2231         (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
2232
2233 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2234         GEM_BUG_ON((sizes) == 0); \
2235         ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
2236 })
2237
2238 #define HAS_OVERLAY(dev_priv)            (INTEL_INFO(dev_priv)->display.has_overlay)
2239 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2240                 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
2241
2242 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
2243 #define HAS_BROKEN_CS_TLB(dev_priv)     (IS_I830(dev_priv) || IS_I845G(dev_priv))
2244
2245 /* WaRsDisableCoarsePowerGating:skl,cnl */
2246 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2247         (IS_CANNONLAKE(dev_priv) || \
2248          IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2249
2250 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
2251 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
2252                                         IS_GEMINILAKE(dev_priv) || \
2253                                         IS_KABYLAKE(dev_priv))
2254
2255 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2256  * rows, which changed the alignment requirements and fence programming.
2257  */
2258 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
2259                                          !(IS_I915G(dev_priv) || \
2260                                          IS_I915GM(dev_priv)))
2261 #define SUPPORTS_TV(dev_priv)           (INTEL_INFO(dev_priv)->display.supports_tv)
2262 #define I915_HAS_HOTPLUG(dev_priv)      (INTEL_INFO(dev_priv)->display.has_hotplug)
2263
2264 #define HAS_FW_BLC(dev_priv)    (INTEL_GEN(dev_priv) > 2)
2265 #define HAS_FBC(dev_priv)       (INTEL_INFO(dev_priv)->display.has_fbc)
2266 #define HAS_CUR_FBC(dev_priv)   (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2267
2268 #define HAS_IPS(dev_priv)       (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2269
2270 #define HAS_DP_MST(dev_priv)    (INTEL_INFO(dev_priv)->display.has_dp_mst)
2271
2272 #define HAS_DDI(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ddi)
2273 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
2274 #define HAS_PSR(dev_priv)                (INTEL_INFO(dev_priv)->display.has_psr)
2275 #define HAS_TRANSCODER_EDP(dev_priv)     (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
2276
2277 #define HAS_RC6(dev_priv)                (INTEL_INFO(dev_priv)->has_rc6)
2278 #define HAS_RC6p(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6p)
2279 #define HAS_RC6pp(dev_priv)              (false) /* HW was never validated */
2280
2281 #define HAS_RPS(dev_priv)       (INTEL_INFO(dev_priv)->has_rps)
2282
2283 #define HAS_CSR(dev_priv)       (INTEL_INFO(dev_priv)->display.has_csr)
2284
2285 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
2286 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
2287
2288 #define HAS_IPC(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ipc)
2289
2290 /*
2291  * For now, anything with a GuC requires uCode loading, and then supports
2292  * command submission once loaded. But these are logically independent
2293  * properties, so we have separate macros to test them.
2294  */
2295 #define HAS_GUC(dev_priv)       (INTEL_INFO(dev_priv)->has_guc)
2296 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2297 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
2298
2299 /* For now, anything with a GuC has also HuC */
2300 #define HAS_HUC(dev_priv)       (HAS_GUC(dev_priv))
2301 #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2302
2303 /* Having a GuC is not the same as using a GuC */
2304 #define USES_GUC(dev_priv)              intel_uc_is_using_guc(dev_priv)
2305 #define USES_GUC_SUBMISSION(dev_priv)   intel_uc_is_using_guc_submission(dev_priv)
2306 #define USES_HUC(dev_priv)              intel_uc_is_using_huc(dev_priv)
2307
2308 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
2309
2310 #define INTEL_PCH_DEVICE_ID_MASK                0xff80
2311 #define INTEL_PCH_IBX_DEVICE_ID_TYPE            0x3b00
2312 #define INTEL_PCH_CPT_DEVICE_ID_TYPE            0x1c00
2313 #define INTEL_PCH_PPT_DEVICE_ID_TYPE            0x1e00
2314 #define INTEL_PCH_LPT_DEVICE_ID_TYPE            0x8c00
2315 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE         0x9c00
2316 #define INTEL_PCH_WPT_DEVICE_ID_TYPE            0x8c80
2317 #define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE         0x9c80
2318 #define INTEL_PCH_SPT_DEVICE_ID_TYPE            0xA100
2319 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE         0x9D00
2320 #define INTEL_PCH_KBP_DEVICE_ID_TYPE            0xA280
2321 #define INTEL_PCH_CNP_DEVICE_ID_TYPE            0xA300
2322 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE         0x9D80
2323 #define INTEL_PCH_CMP_DEVICE_ID_TYPE            0x0280
2324 #define INTEL_PCH_ICP_DEVICE_ID_TYPE            0x3480
2325 #define INTEL_PCH_MCC_DEVICE_ID_TYPE            0x4B00
2326 #define INTEL_PCH_P2X_DEVICE_ID_TYPE            0x7100
2327 #define INTEL_PCH_P3X_DEVICE_ID_TYPE            0x7000
2328 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE           0x2900 /* qemu q35 has 2918 */
2329
2330 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2331 #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2332 #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
2333 #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2334 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2335 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2336 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2337 #define HAS_PCH_LPT_LP(dev_priv) \
2338         (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2339          INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2340 #define HAS_PCH_LPT_H(dev_priv) \
2341         (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2342          INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2343 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2344 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2345 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2346 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2347
2348 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
2349
2350 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2351
2352 /* DPF == dynamic parity feature */
2353 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
2354 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2355                                  2 : HAS_L3_DPF(dev_priv))
2356
2357 #define GT_FREQUENCY_MULTIPLIER 50
2358 #define GEN9_FREQ_SCALER 3
2359
2360 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
2361
2362 #include "i915_trace.h"
2363
2364 static inline bool intel_vtd_active(void)
2365 {
2366 #ifdef CONFIG_INTEL_IOMMU
2367         if (intel_iommu_gfx_mapped)
2368                 return true;
2369 #endif
2370         return false;
2371 }
2372
2373 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2374 {
2375         return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2376 }
2377
2378 static inline bool
2379 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2380 {
2381         return IS_BROXTON(dev_priv) && intel_vtd_active();
2382 }
2383
2384 /* i915_drv.c */
2385 void __printf(3, 4)
2386 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2387               const char *fmt, ...);
2388
2389 #define i915_report_error(dev_priv, fmt, ...)                              \
2390         __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2391
2392 #ifdef CONFIG_COMPAT
2393 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2394                               unsigned long arg);
2395 #else
2396 #define i915_compat_ioctl NULL
2397 #endif
2398 extern const struct dev_pm_ops i915_pm_ops;
2399
2400 extern int i915_driver_load(struct pci_dev *pdev,
2401                             const struct pci_device_id *ent);
2402 extern void i915_driver_unload(struct drm_device *dev);
2403
2404 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2405 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2406 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2407
2408 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
2409
2410 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2411 {
2412         unsigned long delay;
2413
2414         if (unlikely(!i915_modparams.enable_hangcheck))
2415                 return;
2416
2417         /* Don't continually defer the hangcheck so that it is always run at
2418          * least once after work has been scheduled on any ring. Otherwise,
2419          * we will ignore a hung ring if a second ring is kept busy.
2420          */
2421
2422         delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2423         queue_delayed_work(system_long_wq,
2424                            &dev_priv->gpu_error.hangcheck_work, delay);
2425 }
2426
2427 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2428 {
2429         return dev_priv->gvt;
2430 }
2431
2432 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2433 {
2434         return dev_priv->vgpu.active;
2435 }
2436
2437 /* i915_gem.c */
2438 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2439 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
2440 void i915_gem_sanitize(struct drm_i915_private *i915);
2441 int i915_gem_init_early(struct drm_i915_private *dev_priv);
2442 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
2443 int i915_gem_freeze(struct drm_i915_private *dev_priv);
2444 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
2445
2446 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
2447 {
2448         if (!atomic_read(&i915->mm.free_count))
2449                 return;
2450
2451         /* A single pass should suffice to release all the freed objects (along
2452          * most call paths) , but be a little more paranoid in that freeing
2453          * the objects does take a little amount of time, during which the rcu
2454          * callbacks could have added new objects into the freed list, and
2455          * armed the work again.
2456          */
2457         do {
2458                 rcu_barrier();
2459         } while (flush_work(&i915->mm.free_work));
2460 }
2461
2462 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
2463 {
2464         /*
2465          * Similar to objects above (see i915_gem_drain_freed-objects), in
2466          * general we have workers that are armed by RCU and then rearm
2467          * themselves in their callbacks. To be paranoid, we need to
2468          * drain the workqueue a second time after waiting for the RCU
2469          * grace period so that we catch work queued via RCU from the first
2470          * pass. As neither drain_workqueue() nor flush_workqueue() report
2471          * a result, we make an assumption that we only don't require more
2472          * than 3 passes to catch all _recursive_ RCU delayed work.
2473          *
2474          */
2475         int pass = 3;
2476         do {
2477                 rcu_barrier();
2478                 i915_gem_drain_freed_objects(i915);
2479         } while (--pass);
2480         drain_workqueue(i915->wq);
2481 }
2482
2483 struct i915_vma * __must_check
2484 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2485                          const struct i915_ggtt_view *view,
2486                          u64 size,
2487                          u64 alignment,
2488                          u64 flags);
2489
2490 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
2491
2492 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
2493
2494 static inline int __must_check
2495 i915_mutex_lock_interruptible(struct drm_device *dev)
2496 {
2497         return mutex_lock_interruptible(&dev->struct_mutex);
2498 }
2499
2500 int i915_gem_dumb_create(struct drm_file *file_priv,
2501                          struct drm_device *dev,
2502                          struct drm_mode_create_dumb *args);
2503 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2504                       u32 handle, u64 *offset);
2505 int i915_gem_mmap_gtt_version(void);
2506
2507 void i915_gem_track_fb(struct drm_i915_gem_object *old,
2508                        struct drm_i915_gem_object *new,
2509                        unsigned frontbuffer_bits);
2510
2511 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
2512
2513 static inline bool __i915_wedged(struct i915_gpu_error *error)
2514 {
2515         return unlikely(test_bit(I915_WEDGED, &error->flags));
2516 }
2517
2518 static inline bool i915_reset_failed(struct drm_i915_private *i915)
2519 {
2520         return __i915_wedged(&i915->gpu_error);
2521 }
2522
2523 static inline u32 i915_reset_count(struct i915_gpu_error *error)
2524 {
2525         return READ_ONCE(error->reset_count);
2526 }
2527
2528 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
2529                                           struct intel_engine_cs *engine)
2530 {
2531         return READ_ONCE(error->reset_engine_count[engine->id]);
2532 }
2533
2534 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
2535 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
2536
2537 void i915_gem_init_mmio(struct drm_i915_private *i915);
2538 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
2539 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
2540 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
2541 void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
2542 void i915_gem_fini(struct drm_i915_private *dev_priv);
2543 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2544                            unsigned int flags, long timeout);
2545 void i915_gem_suspend(struct drm_i915_private *dev_priv);
2546 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
2547 void i915_gem_resume(struct drm_i915_private *dev_priv);
2548 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
2549
2550 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
2551 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2552
2553 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2554                                     enum i915_cache_level cache_level);
2555
2556 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2557                                 struct dma_buf *dma_buf);
2558
2559 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2560                                 struct drm_gem_object *gem_obj, int flags);
2561
2562 static inline struct i915_gem_context *
2563 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
2564 {
2565         return idr_find(&file_priv->context_idr, id);
2566 }
2567
2568 static inline struct i915_gem_context *
2569 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2570 {
2571         struct i915_gem_context *ctx;
2572
2573         rcu_read_lock();
2574         ctx = __i915_gem_context_lookup_rcu(file_priv, id);
2575         if (ctx && !kref_get_unless_zero(&ctx->ref))
2576                 ctx = NULL;
2577         rcu_read_unlock();
2578
2579         return ctx;
2580 }
2581
2582 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2583                          struct drm_file *file);
2584 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
2585                                struct drm_file *file);
2586 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
2587                                   struct drm_file *file);
2588 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
2589                             struct intel_context *ce,
2590                             u32 *reg_state);
2591
2592 /* i915_gem_evict.c */
2593 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
2594                                           u64 min_size, u64 alignment,
2595                                           unsigned cache_level,
2596                                           u64 start, u64 end,
2597                                           unsigned flags);
2598 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
2599                                          struct drm_mm_node *node,
2600                                          unsigned int flags);
2601 int i915_gem_evict_vm(struct i915_address_space *vm);
2602
2603 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
2604
2605 /* belongs in i915_gem_gtt.h */
2606 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
2607 {
2608         wmb();
2609         if (INTEL_GEN(dev_priv) < 6)
2610                 intel_gtt_chipset_flush();
2611 }
2612
2613 /* i915_gem_stolen.c */
2614 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
2615                                 struct drm_mm_node *node, u64 size,
2616                                 unsigned alignment);
2617 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
2618                                          struct drm_mm_node *node, u64 size,
2619                                          unsigned alignment, u64 start,
2620                                          u64 end);
2621 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
2622                                  struct drm_mm_node *node);
2623 int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
2624 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
2625 struct drm_i915_gem_object *
2626 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
2627                               resource_size_t size);
2628 struct drm_i915_gem_object *
2629 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
2630                                                resource_size_t stolen_offset,
2631                                                resource_size_t gtt_offset,
2632                                                resource_size_t size);
2633
2634 /* i915_gem_internal.c */
2635 struct drm_i915_gem_object *
2636 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
2637                                 phys_addr_t size);
2638
2639 /* i915_gem_shrinker.c */
2640 unsigned long i915_gem_shrink(struct drm_i915_private *i915,
2641                               unsigned long target,
2642                               unsigned long *nr_scanned,
2643                               unsigned flags);
2644 #define I915_SHRINK_UNBOUND     BIT(0)
2645 #define I915_SHRINK_BOUND       BIT(1)
2646 #define I915_SHRINK_ACTIVE      BIT(2)
2647 #define I915_SHRINK_VMAPS       BIT(3)
2648 #define I915_SHRINK_WRITEBACK   BIT(4)
2649
2650 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
2651 void i915_gem_shrinker_register(struct drm_i915_private *i915);
2652 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
2653 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
2654                                     struct mutex *mutex);
2655
2656 /* i915_gem_tiling.c */
2657 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2658 {
2659         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2660
2661         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2662                 i915_gem_object_is_tiled(obj);
2663 }
2664
2665 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
2666                         unsigned int tiling, unsigned int stride);
2667 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
2668                              unsigned int tiling, unsigned int stride);
2669
2670 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2671
2672 /* i915_cmd_parser.c */
2673 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
2674 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
2675 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
2676 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
2677                             struct drm_i915_gem_object *batch_obj,
2678                             struct drm_i915_gem_object *shadow_batch_obj,
2679                             u32 batch_start_offset,
2680                             u32 batch_len,
2681                             bool is_master);
2682
2683 /* i915_perf.c */
2684 extern void i915_perf_init(struct drm_i915_private *dev_priv);
2685 extern void i915_perf_fini(struct drm_i915_private *dev_priv);
2686 extern void i915_perf_register(struct drm_i915_private *dev_priv);
2687 extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
2688
2689 /* i915_suspend.c */
2690 extern int i915_save_state(struct drm_i915_private *dev_priv);
2691 extern int i915_restore_state(struct drm_i915_private *dev_priv);
2692
2693 /* i915_sysfs.c */
2694 void i915_setup_sysfs(struct drm_i915_private *dev_priv);
2695 void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
2696
2697 /* intel_device_info.c */
2698 static inline struct intel_device_info *
2699 mkwrite_device_info(struct drm_i915_private *dev_priv)
2700 {
2701         return (struct intel_device_info *)INTEL_INFO(dev_priv);
2702 }
2703
2704 /* modesetting */
2705 extern void intel_modeset_init_hw(struct drm_device *dev);
2706 extern int intel_modeset_init(struct drm_device *dev);
2707 extern void intel_modeset_cleanup(struct drm_device *dev);
2708 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
2709                                        bool state);
2710 extern void intel_display_resume(struct drm_device *dev);
2711 extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
2712 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
2713 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
2714
2715 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2716                         struct drm_file *file);
2717
2718 extern struct intel_display_error_state *
2719 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
2720 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2721                                             struct intel_display_error_state *error);
2722
2723 #define __I915_REG_OP(op__, dev_priv__, ...) \
2724         intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
2725
2726 #define I915_READ(reg__)         __I915_REG_OP(read, dev_priv, (reg__))
2727 #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
2728
2729 #define POSTING_READ(reg__)     __I915_REG_OP(posting_read, dev_priv, (reg__))
2730
2731 /* These are untraced mmio-accessors that are only valid to be used inside
2732  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
2733  * controlled.
2734  *
2735  * Think twice, and think again, before using these.
2736  *
2737  * As an example, these accessors can possibly be used between:
2738  *
2739  * spin_lock_irq(&dev_priv->uncore.lock);
2740  * intel_uncore_forcewake_get__locked();
2741  *
2742  * and
2743  *
2744  * intel_uncore_forcewake_put__locked();
2745  * spin_unlock_irq(&dev_priv->uncore.lock);
2746  *
2747  *
2748  * Note: some registers may not need forcewake held, so
2749  * intel_uncore_forcewake_{get,put} can be omitted, see
2750  * intel_uncore_forcewake_for_reg().
2751  *
2752  * Certain architectures will die if the same cacheline is concurrently accessed
2753  * by different clients (e.g. on Ivybridge). Access to registers should
2754  * therefore generally be serialised, by either the dev_priv->uncore.lock or
2755  * a more localised lock guarding all access to that bank of registers.
2756  */
2757 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
2758 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
2759
2760 /* "Broadcast RGB" property */
2761 #define INTEL_BROADCAST_RGB_AUTO 0
2762 #define INTEL_BROADCAST_RGB_FULL 1
2763 #define INTEL_BROADCAST_RGB_LIMITED 2
2764
2765 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
2766 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
2767
2768 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
2769  * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
2770  * perform the operation. To check beforehand, pass in the parameters to
2771  * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
2772  * you only need to pass in the minor offsets, page-aligned pointers are
2773  * always valid.
2774  *
2775  * For just checking for SSE4.1, in the foreknowledge that the future use
2776  * will be correctly aligned, just use i915_has_memcpy_from_wc().
2777  */
2778 #define i915_can_memcpy_from_wc(dst, src, len) \
2779         i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
2780
2781 #define i915_has_memcpy_from_wc() \
2782         i915_memcpy_from_wc(NULL, NULL, 0)
2783
2784 /* i915_mm.c */
2785 int remap_io_mapping(struct vm_area_struct *vma,
2786                      unsigned long addr, unsigned long pfn, unsigned long size,
2787                      struct io_mapping *iomap);
2788
2789 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
2790 {
2791         if (INTEL_GEN(i915) >= 10)
2792                 return CNL_HWS_CSB_WRITE_INDEX;
2793         else
2794                 return I915_HWS_CSB_WRITE_INDEX;
2795 }
2796
2797 static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
2798 {
2799         return i915_ggtt_offset(i915->gt.scratch);
2800 }
2801
2802 static inline enum i915_map_type
2803 i915_coherent_map_type(struct drm_i915_private *i915)
2804 {
2805         return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
2806 }
2807
2808 static inline void add_taint_for_CI(unsigned int taint)
2809 {
2810         /*
2811          * The system is "ok", just about surviving for the user, but
2812          * CI results are now unreliable as the HW is very suspect.
2813          * CI checks the taint state after every test and will reboot
2814          * the machine if the kernel is tainted.
2815          */
2816         add_taint(taint, LOCKDEP_STILL_OK);
2817 }
2818
2819 #endif