drm/i915/guc: Update rps.pm_intrmsk_mbz in guc_interrupts_capture/release
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27
28 #include <linux/pm_runtime.h>
29
30 #define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33
34 static const char * const forcewake_domain_names[] = {
35         "render",
36         "blitter",
37         "media",
38 };
39
40 const char *
41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42 {
43         BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
45         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46                 return forcewake_domain_names[id];
47
48         WARN_ON(id);
49
50         return "unknown";
51 }
52
53 static inline void
54 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55 {
56         WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58 }
59
60 static inline void
61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62 {
63         d->wake_count++;
64         hrtimer_start_range_ns(&d->timer,
65                                NSEC_PER_MSEC,
66                                NSEC_PER_MSEC,
67                                HRTIMER_MODE_REL);
68 }
69
70 static inline void
71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
72 {
73         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
74                              FORCEWAKE_KERNEL) == 0,
75                             FORCEWAKE_ACK_TIMEOUT_MS))
76                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
77                           intel_uncore_forcewake_domain_to_str(d->id));
78 }
79
80 static inline void
81 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
82 {
83         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
84 }
85
86 static inline void
87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
88 {
89         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
90                              FORCEWAKE_KERNEL),
91                             FORCEWAKE_ACK_TIMEOUT_MS))
92                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
93                           intel_uncore_forcewake_domain_to_str(d->id));
94 }
95
96 static inline void
97 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
98 {
99         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
100 }
101
102 static inline void
103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
104 {
105         /* something from same cacheline, but not from the set register */
106         if (i915_mmio_reg_valid(d->reg_post))
107                 __raw_posting_read(d->i915, d->reg_post);
108 }
109
110 static void
111 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
112 {
113         struct intel_uncore_forcewake_domain *d;
114
115         for_each_fw_domain_masked(d, fw_domains, dev_priv) {
116                 fw_domain_wait_ack_clear(d);
117                 fw_domain_get(d);
118         }
119
120         for_each_fw_domain_masked(d, fw_domains, dev_priv)
121                 fw_domain_wait_ack(d);
122
123         dev_priv->uncore.fw_domains_active |= fw_domains;
124 }
125
126 static void
127 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
128 {
129         struct intel_uncore_forcewake_domain *d;
130
131         for_each_fw_domain_masked(d, fw_domains, dev_priv) {
132                 fw_domain_put(d);
133                 fw_domain_posting_read(d);
134         }
135
136         dev_priv->uncore.fw_domains_active &= ~fw_domains;
137 }
138
139 static void
140 vgpu_fw_domains_nop(struct drm_i915_private *dev_priv,
141                     enum forcewake_domains fw_domains)
142 {
143         /* Guest driver doesn't need to takes care forcewake. */
144 }
145
146 static void
147 fw_domains_posting_read(struct drm_i915_private *dev_priv)
148 {
149         struct intel_uncore_forcewake_domain *d;
150
151         /* No need to do for all, just do for first found */
152         for_each_fw_domain(d, dev_priv) {
153                 fw_domain_posting_read(d);
154                 break;
155         }
156 }
157
158 static void
159 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
160 {
161         struct intel_uncore_forcewake_domain *d;
162
163         if (dev_priv->uncore.fw_domains == 0)
164                 return;
165
166         for_each_fw_domain_masked(d, fw_domains, dev_priv)
167                 fw_domain_reset(d);
168
169         fw_domains_posting_read(dev_priv);
170 }
171
172 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
173 {
174         /* w/a for a sporadic read returning 0 by waiting for the GT
175          * thread to wake up.
176          */
177         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
178                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
179                 DRM_ERROR("GT thread status wait timed out\n");
180 }
181
182 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
183                                               enum forcewake_domains fw_domains)
184 {
185         fw_domains_get(dev_priv, fw_domains);
186
187         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
188         __gen6_gt_wait_for_thread_c0(dev_priv);
189 }
190
191 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
192 {
193         u32 gtfifodbg;
194
195         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
196         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
197                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
198 }
199
200 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
201                                      enum forcewake_domains fw_domains)
202 {
203         fw_domains_put(dev_priv, fw_domains);
204         gen6_gt_check_fifodbg(dev_priv);
205 }
206
207 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
208 {
209         u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
210
211         return count & GT_FIFO_FREE_ENTRIES_MASK;
212 }
213
214 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
215 {
216         int ret = 0;
217
218         /* On VLV, FIFO will be shared by both SW and HW.
219          * So, we need to read the FREE_ENTRIES everytime */
220         if (IS_VALLEYVIEW(dev_priv))
221                 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
222
223         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
224                 int loop = 500;
225                 u32 fifo = fifo_free_entries(dev_priv);
226
227                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
228                         udelay(10);
229                         fifo = fifo_free_entries(dev_priv);
230                 }
231                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
232                         ++ret;
233                 dev_priv->uncore.fifo_count = fifo;
234         }
235         dev_priv->uncore.fifo_count--;
236
237         return ret;
238 }
239
240 static enum hrtimer_restart
241 intel_uncore_fw_release_timer(struct hrtimer *timer)
242 {
243         struct intel_uncore_forcewake_domain *domain =
244                container_of(timer, struct intel_uncore_forcewake_domain, timer);
245         struct drm_i915_private *dev_priv = domain->i915;
246         unsigned long irqflags;
247
248         assert_rpm_device_not_suspended(dev_priv);
249
250         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
251         if (WARN_ON(domain->wake_count == 0))
252                 domain->wake_count++;
253
254         if (--domain->wake_count == 0)
255                 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
256
257         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
258
259         return HRTIMER_NORESTART;
260 }
261
262 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
263                                   bool restore)
264 {
265         unsigned long irqflags;
266         struct intel_uncore_forcewake_domain *domain;
267         int retry_count = 100;
268         enum forcewake_domains fw, active_domains;
269
270         /* Hold uncore.lock across reset to prevent any register access
271          * with forcewake not set correctly. Wait until all pending
272          * timers are run before holding.
273          */
274         while (1) {
275                 active_domains = 0;
276
277                 for_each_fw_domain(domain, dev_priv) {
278                         if (hrtimer_cancel(&domain->timer) == 0)
279                                 continue;
280
281                         intel_uncore_fw_release_timer(&domain->timer);
282                 }
283
284                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
285
286                 for_each_fw_domain(domain, dev_priv) {
287                         if (hrtimer_active(&domain->timer))
288                                 active_domains |= domain->mask;
289                 }
290
291                 if (active_domains == 0)
292                         break;
293
294                 if (--retry_count == 0) {
295                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
296                         break;
297                 }
298
299                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
300                 cond_resched();
301         }
302
303         WARN_ON(active_domains);
304
305         fw = dev_priv->uncore.fw_domains_active;
306         if (fw)
307                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
308
309         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
310
311         if (restore) { /* If reset with a user forcewake, try to restore */
312                 if (fw)
313                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
314
315                 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
316                         dev_priv->uncore.fifo_count =
317                                 fifo_free_entries(dev_priv);
318         }
319
320         if (!restore)
321                 assert_forcewakes_inactive(dev_priv);
322
323         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
324 }
325
326 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
327 {
328         const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
329         const unsigned int sets[4] = { 1, 1, 2, 2 };
330         const u32 cap = dev_priv->edram_cap;
331
332         return EDRAM_NUM_BANKS(cap) *
333                 ways[EDRAM_WAYS_IDX(cap)] *
334                 sets[EDRAM_SETS_IDX(cap)] *
335                 1024 * 1024;
336 }
337
338 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
339 {
340         if (!HAS_EDRAM(dev_priv))
341                 return 0;
342
343         /* The needed capability bits for size calculation
344          * are not there with pre gen9 so return 128MB always.
345          */
346         if (INTEL_GEN(dev_priv) < 9)
347                 return 128 * 1024 * 1024;
348
349         return gen9_edram_size(dev_priv);
350 }
351
352 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
353 {
354         if (IS_HASWELL(dev_priv) ||
355             IS_BROADWELL(dev_priv) ||
356             INTEL_GEN(dev_priv) >= 9) {
357                 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
358                                                         HSW_EDRAM_CAP);
359
360                 /* NB: We can't write IDICR yet because we do not have gt funcs
361                  * set up */
362         } else {
363                 dev_priv->edram_cap = 0;
364         }
365
366         if (HAS_EDRAM(dev_priv))
367                 DRM_INFO("Found %lluMB of eDRAM\n",
368                          intel_uncore_edram_size(dev_priv) / (1024 * 1024));
369 }
370
371 static bool
372 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
373 {
374         u32 dbg;
375
376         dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
377         if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
378                 return false;
379
380         __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
381
382         return true;
383 }
384
385 static bool
386 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
387 {
388         u32 cer;
389
390         cer = __raw_i915_read32(dev_priv, CLAIM_ER);
391         if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
392                 return false;
393
394         __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
395
396         return true;
397 }
398
399 static bool
400 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
401 {
402         if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
403                 return fpga_check_for_unclaimed_mmio(dev_priv);
404
405         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
406                 return vlv_check_for_unclaimed_mmio(dev_priv);
407
408         return false;
409 }
410
411 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
412                                           bool restore_forcewake)
413 {
414         struct intel_device_info *info = mkwrite_device_info(dev_priv);
415
416         /* clear out unclaimed reg detection bit */
417         if (check_for_unclaimed_mmio(dev_priv))
418                 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
419
420         /* clear out old GT FIFO errors */
421         if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
422                 __raw_i915_write32(dev_priv, GTFIFODBG,
423                                    __raw_i915_read32(dev_priv, GTFIFODBG));
424
425         /* WaDisableShadowRegForCpd:chv */
426         if (IS_CHERRYVIEW(dev_priv)) {
427                 __raw_i915_write32(dev_priv, GTFIFOCTL,
428                                    __raw_i915_read32(dev_priv, GTFIFOCTL) |
429                                    GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
430                                    GT_FIFO_CTL_RC6_POLICY_STALL);
431         }
432
433         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
434                 info->has_decoupled_mmio = false;
435
436         intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
437 }
438
439 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
440                                  bool restore_forcewake)
441 {
442         __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
443         i915_check_and_clear_faults(dev_priv);
444 }
445
446 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
447 {
448         i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
449
450         /* BIOS often leaves RC6 enabled, but disable it for hw init */
451         intel_sanitize_gt_powersave(dev_priv);
452 }
453
454 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
455                                          enum forcewake_domains fw_domains)
456 {
457         struct intel_uncore_forcewake_domain *domain;
458
459         fw_domains &= dev_priv->uncore.fw_domains;
460
461         for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
462                 if (domain->wake_count++)
463                         fw_domains &= ~domain->mask;
464         }
465
466         if (fw_domains)
467                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
468 }
469
470 /**
471  * intel_uncore_forcewake_get - grab forcewake domain references
472  * @dev_priv: i915 device instance
473  * @fw_domains: forcewake domains to get reference on
474  *
475  * This function can be used get GT's forcewake domain references.
476  * Normal register access will handle the forcewake domains automatically.
477  * However if some sequence requires the GT to not power down a particular
478  * forcewake domains this function should be called at the beginning of the
479  * sequence. And subsequently the reference should be dropped by symmetric
480  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
481  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
482  */
483 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
484                                 enum forcewake_domains fw_domains)
485 {
486         unsigned long irqflags;
487
488         if (!dev_priv->uncore.funcs.force_wake_get)
489                 return;
490
491         assert_rpm_wakelock_held(dev_priv);
492
493         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
494         __intel_uncore_forcewake_get(dev_priv, fw_domains);
495         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
496 }
497
498 /**
499  * intel_uncore_forcewake_get__locked - grab forcewake domain references
500  * @dev_priv: i915 device instance
501  * @fw_domains: forcewake domains to get reference on
502  *
503  * See intel_uncore_forcewake_get(). This variant places the onus
504  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
505  */
506 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
507                                         enum forcewake_domains fw_domains)
508 {
509         lockdep_assert_held(&dev_priv->uncore.lock);
510
511         if (!dev_priv->uncore.funcs.force_wake_get)
512                 return;
513
514         __intel_uncore_forcewake_get(dev_priv, fw_domains);
515 }
516
517 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
518                                          enum forcewake_domains fw_domains)
519 {
520         struct intel_uncore_forcewake_domain *domain;
521
522         fw_domains &= dev_priv->uncore.fw_domains;
523
524         for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
525                 if (WARN_ON(domain->wake_count == 0))
526                         continue;
527
528                 if (--domain->wake_count)
529                         continue;
530
531                 fw_domain_arm_timer(domain);
532         }
533 }
534
535 /**
536  * intel_uncore_forcewake_put - release a forcewake domain reference
537  * @dev_priv: i915 device instance
538  * @fw_domains: forcewake domains to put references
539  *
540  * This function drops the device-level forcewakes for specified
541  * domains obtained by intel_uncore_forcewake_get().
542  */
543 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
544                                 enum forcewake_domains fw_domains)
545 {
546         unsigned long irqflags;
547
548         if (!dev_priv->uncore.funcs.force_wake_put)
549                 return;
550
551         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
552         __intel_uncore_forcewake_put(dev_priv, fw_domains);
553         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
554 }
555
556 /**
557  * intel_uncore_forcewake_put__locked - grab forcewake domain references
558  * @dev_priv: i915 device instance
559  * @fw_domains: forcewake domains to get reference on
560  *
561  * See intel_uncore_forcewake_put(). This variant places the onus
562  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
563  */
564 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
565                                         enum forcewake_domains fw_domains)
566 {
567         lockdep_assert_held(&dev_priv->uncore.lock);
568
569         if (!dev_priv->uncore.funcs.force_wake_put)
570                 return;
571
572         __intel_uncore_forcewake_put(dev_priv, fw_domains);
573 }
574
575 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
576 {
577         if (!dev_priv->uncore.funcs.force_wake_get)
578                 return;
579
580         WARN_ON(dev_priv->uncore.fw_domains_active);
581 }
582
583 /* We give fast paths for the really cool registers */
584 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
585
586 #define __gen6_reg_read_fw_domains(offset) \
587 ({ \
588         enum forcewake_domains __fwd; \
589         if (NEEDS_FORCE_WAKE(offset)) \
590                 __fwd = FORCEWAKE_RENDER; \
591         else \
592                 __fwd = 0; \
593         __fwd; \
594 })
595
596 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
597 {
598         if (offset < entry->start)
599                 return -1;
600         else if (offset > entry->end)
601                 return 1;
602         else
603                 return 0;
604 }
605
606 /* Copied and "macroized" from lib/bsearch.c */
607 #define BSEARCH(key, base, num, cmp) ({                                 \
608         unsigned int start__ = 0, end__ = (num);                        \
609         typeof(base) result__ = NULL;                                   \
610         while (start__ < end__) {                                       \
611                 unsigned int mid__ = start__ + (end__ - start__) / 2;   \
612                 int ret__ = (cmp)((key), (base) + mid__);               \
613                 if (ret__ < 0) {                                        \
614                         end__ = mid__;                                  \
615                 } else if (ret__ > 0) {                                 \
616                         start__ = mid__ + 1;                            \
617                 } else {                                                \
618                         result__ = (base) + mid__;                      \
619                         break;                                          \
620                 }                                                       \
621         }                                                               \
622         result__;                                                       \
623 })
624
625 static enum forcewake_domains
626 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
627 {
628         const struct intel_forcewake_range *entry;
629
630         entry = BSEARCH(offset,
631                         dev_priv->uncore.fw_domains_table,
632                         dev_priv->uncore.fw_domains_table_entries,
633                         fw_range_cmp);
634
635         if (!entry)
636                 return 0;
637
638         WARN(entry->domains & ~dev_priv->uncore.fw_domains,
639              "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
640              entry->domains & ~dev_priv->uncore.fw_domains, offset);
641
642         return entry->domains;
643 }
644
645 #define GEN_FW_RANGE(s, e, d) \
646         { .start = (s), .end = (e), .domains = (d) }
647
648 #define HAS_FWTABLE(dev_priv) \
649         (IS_GEN9(dev_priv) || \
650          IS_CHERRYVIEW(dev_priv) || \
651          IS_VALLEYVIEW(dev_priv))
652
653 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
654 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
655         GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
656         GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
657         GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
658         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
659         GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
660         GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
661         GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
662 };
663
664 #define __fwtable_reg_read_fw_domains(offset) \
665 ({ \
666         enum forcewake_domains __fwd = 0; \
667         if (NEEDS_FORCE_WAKE((offset))) \
668                 __fwd = find_fw_domain(dev_priv, offset); \
669         __fwd; \
670 })
671
672 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
673 static const i915_reg_t gen8_shadowed_regs[] = {
674         RING_TAIL(RENDER_RING_BASE),    /* 0x2000 (base) */
675         GEN6_RPNSWREQ,                  /* 0xA008 */
676         GEN6_RC_VIDEO_FREQ,             /* 0xA00C */
677         RING_TAIL(GEN6_BSD_RING_BASE),  /* 0x12000 (base) */
678         RING_TAIL(VEBOX_RING_BASE),     /* 0x1a000 (base) */
679         RING_TAIL(BLT_RING_BASE),       /* 0x22000 (base) */
680         /* TODO: Other registers are not yet used */
681 };
682
683 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
684 {
685         u32 offset = i915_mmio_reg_offset(*reg);
686
687         if (key < offset)
688                 return -1;
689         else if (key > offset)
690                 return 1;
691         else
692                 return 0;
693 }
694
695 static bool is_gen8_shadowed(u32 offset)
696 {
697         const i915_reg_t *regs = gen8_shadowed_regs;
698
699         return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
700                        mmio_reg_cmp);
701 }
702
703 #define __gen8_reg_write_fw_domains(offset) \
704 ({ \
705         enum forcewake_domains __fwd; \
706         if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
707                 __fwd = FORCEWAKE_RENDER; \
708         else \
709                 __fwd = 0; \
710         __fwd; \
711 })
712
713 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
714 static const struct intel_forcewake_range __chv_fw_ranges[] = {
715         GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
716         GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
717         GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
718         GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
719         GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
720         GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
721         GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
722         GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
723         GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
724         GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
725         GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
726         GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
727         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
728         GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
729         GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
730         GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
731 };
732
733 #define __fwtable_reg_write_fw_domains(offset) \
734 ({ \
735         enum forcewake_domains __fwd = 0; \
736         if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
737                 __fwd = find_fw_domain(dev_priv, offset); \
738         __fwd; \
739 })
740
741 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
742 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
743         GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
744         GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
745         GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
746         GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
747         GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
748         GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
749         GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
750         GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
751         GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
752         GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
753         GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
754         GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
755         GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
756         GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
757         GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
758         GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
759         GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
760         GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
761         GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
762         GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
763         GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
764         GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
765         GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
766         GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
767         GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
768         GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
769         GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
770         GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
771         GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
772         GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
773         GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
774         GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
775 };
776
777 static void
778 ilk_dummy_write(struct drm_i915_private *dev_priv)
779 {
780         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
781          * the chip from rc6 before touching it for real. MI_MODE is masked,
782          * hence harmless to write 0 into. */
783         __raw_i915_write32(dev_priv, MI_MODE, 0);
784 }
785
786 static void
787 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
788                       const i915_reg_t reg,
789                       const bool read,
790                       const bool before)
791 {
792         if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
793                  "Unclaimed %s register 0x%x\n",
794                  read ? "read from" : "write to",
795                  i915_mmio_reg_offset(reg)))
796                 i915.mmio_debug--; /* Only report the first N failures */
797 }
798
799 static inline void
800 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
801                     const i915_reg_t reg,
802                     const bool read,
803                     const bool before)
804 {
805         if (likely(!i915.mmio_debug))
806                 return;
807
808         __unclaimed_reg_debug(dev_priv, reg, read, before);
809 }
810
811 static const enum decoupled_power_domain fw2dpd_domain[] = {
812         GEN9_DECOUPLED_PD_RENDER,
813         GEN9_DECOUPLED_PD_BLITTER,
814         GEN9_DECOUPLED_PD_ALL,
815         GEN9_DECOUPLED_PD_MEDIA,
816         GEN9_DECOUPLED_PD_ALL,
817         GEN9_DECOUPLED_PD_ALL,
818         GEN9_DECOUPLED_PD_ALL
819 };
820
821 /*
822  * Decoupled MMIO access for only 1 DWORD
823  */
824 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
825                                          u32 reg,
826                                          enum forcewake_domains fw_domain,
827                                          enum decoupled_ops operation)
828 {
829         enum decoupled_power_domain dp_domain;
830         u32 ctrl_reg_data = 0;
831
832         dp_domain = fw2dpd_domain[fw_domain - 1];
833
834         ctrl_reg_data |= reg;
835         ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
836         ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
837         ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
838         __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
839
840         if (wait_for_atomic((__raw_i915_read32(dev_priv,
841                             GEN9_DECOUPLED_REG0_DW1) &
842                             GEN9_DECOUPLED_DW1_GO) == 0,
843                             FORCEWAKE_ACK_TIMEOUT_MS))
844                 DRM_ERROR("Decoupled MMIO wait timed out\n");
845 }
846
847 static inline u32
848 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
849                              u32 reg,
850                              enum forcewake_domains fw_domain)
851 {
852         __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
853                                      GEN9_DECOUPLED_OP_READ);
854
855         return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
856 }
857
858 static inline void
859 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
860                             u32 reg, u32 data,
861                             enum forcewake_domains fw_domain)
862 {
863
864         __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
865
866         __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
867                                      GEN9_DECOUPLED_OP_WRITE);
868 }
869
870
871 #define GEN2_READ_HEADER(x) \
872         u##x val = 0; \
873         assert_rpm_wakelock_held(dev_priv);
874
875 #define GEN2_READ_FOOTER \
876         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
877         return val
878
879 #define __gen2_read(x) \
880 static u##x \
881 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
882         GEN2_READ_HEADER(x); \
883         val = __raw_i915_read##x(dev_priv, reg); \
884         GEN2_READ_FOOTER; \
885 }
886
887 #define __gen5_read(x) \
888 static u##x \
889 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
890         GEN2_READ_HEADER(x); \
891         ilk_dummy_write(dev_priv); \
892         val = __raw_i915_read##x(dev_priv, reg); \
893         GEN2_READ_FOOTER; \
894 }
895
896 __gen5_read(8)
897 __gen5_read(16)
898 __gen5_read(32)
899 __gen5_read(64)
900 __gen2_read(8)
901 __gen2_read(16)
902 __gen2_read(32)
903 __gen2_read(64)
904
905 #undef __gen5_read
906 #undef __gen2_read
907
908 #undef GEN2_READ_FOOTER
909 #undef GEN2_READ_HEADER
910
911 #define GEN6_READ_HEADER(x) \
912         u32 offset = i915_mmio_reg_offset(reg); \
913         unsigned long irqflags; \
914         u##x val = 0; \
915         assert_rpm_wakelock_held(dev_priv); \
916         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
917         unclaimed_reg_debug(dev_priv, reg, true, true)
918
919 #define GEN6_READ_FOOTER \
920         unclaimed_reg_debug(dev_priv, reg, true, false); \
921         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
922         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
923         return val
924
925 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
926                                         enum forcewake_domains fw_domains)
927 {
928         struct intel_uncore_forcewake_domain *domain;
929
930         for_each_fw_domain_masked(domain, fw_domains, dev_priv)
931                 fw_domain_arm_timer(domain);
932
933         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
934 }
935
936 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
937                                      enum forcewake_domains fw_domains)
938 {
939         if (WARN_ON(!fw_domains))
940                 return;
941
942         /* Turn on all requested but inactive supported forcewake domains. */
943         fw_domains &= dev_priv->uncore.fw_domains;
944         fw_domains &= ~dev_priv->uncore.fw_domains_active;
945
946         if (fw_domains)
947                 ___force_wake_auto(dev_priv, fw_domains);
948 }
949
950 #define __gen_read(func, x) \
951 static u##x \
952 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
953         enum forcewake_domains fw_engine; \
954         GEN6_READ_HEADER(x); \
955         fw_engine = __##func##_reg_read_fw_domains(offset); \
956         if (fw_engine) \
957                 __force_wake_auto(dev_priv, fw_engine); \
958         val = __raw_i915_read##x(dev_priv, reg); \
959         GEN6_READ_FOOTER; \
960 }
961 #define __gen6_read(x) __gen_read(gen6, x)
962 #define __fwtable_read(x) __gen_read(fwtable, x)
963
964 #define __gen9_decoupled_read(x) \
965 static u##x \
966 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
967                        i915_reg_t reg, bool trace) { \
968         enum forcewake_domains fw_engine; \
969         GEN6_READ_HEADER(x); \
970         fw_engine = __fwtable_reg_read_fw_domains(offset); \
971         if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
972                 unsigned i; \
973                 u32 *ptr_data = (u32 *) &val; \
974                 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
975                         *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
976                                                                  offset, \
977                                                                  fw_engine); \
978         } else { \
979                 val = __raw_i915_read##x(dev_priv, reg); \
980         } \
981         GEN6_READ_FOOTER; \
982 }
983
984 __gen9_decoupled_read(32)
985 __gen9_decoupled_read(64)
986 __fwtable_read(8)
987 __fwtable_read(16)
988 __fwtable_read(32)
989 __fwtable_read(64)
990 __gen6_read(8)
991 __gen6_read(16)
992 __gen6_read(32)
993 __gen6_read(64)
994
995 #undef __fwtable_read
996 #undef __gen6_read
997 #undef GEN6_READ_FOOTER
998 #undef GEN6_READ_HEADER
999
1000 #define GEN2_WRITE_HEADER \
1001         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1002         assert_rpm_wakelock_held(dev_priv); \
1003
1004 #define GEN2_WRITE_FOOTER
1005
1006 #define __gen2_write(x) \
1007 static void \
1008 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1009         GEN2_WRITE_HEADER; \
1010         __raw_i915_write##x(dev_priv, reg, val); \
1011         GEN2_WRITE_FOOTER; \
1012 }
1013
1014 #define __gen5_write(x) \
1015 static void \
1016 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1017         GEN2_WRITE_HEADER; \
1018         ilk_dummy_write(dev_priv); \
1019         __raw_i915_write##x(dev_priv, reg, val); \
1020         GEN2_WRITE_FOOTER; \
1021 }
1022
1023 __gen5_write(8)
1024 __gen5_write(16)
1025 __gen5_write(32)
1026 __gen2_write(8)
1027 __gen2_write(16)
1028 __gen2_write(32)
1029
1030 #undef __gen5_write
1031 #undef __gen2_write
1032
1033 #undef GEN2_WRITE_FOOTER
1034 #undef GEN2_WRITE_HEADER
1035
1036 #define GEN6_WRITE_HEADER \
1037         u32 offset = i915_mmio_reg_offset(reg); \
1038         unsigned long irqflags; \
1039         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1040         assert_rpm_wakelock_held(dev_priv); \
1041         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1042         unclaimed_reg_debug(dev_priv, reg, false, true)
1043
1044 #define GEN6_WRITE_FOOTER \
1045         unclaimed_reg_debug(dev_priv, reg, false, false); \
1046         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1047
1048 #define __gen6_write(x) \
1049 static void \
1050 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1051         u32 __fifo_ret = 0; \
1052         GEN6_WRITE_HEADER; \
1053         if (NEEDS_FORCE_WAKE(offset)) { \
1054                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1055         } \
1056         __raw_i915_write##x(dev_priv, reg, val); \
1057         if (unlikely(__fifo_ret)) { \
1058                 gen6_gt_check_fifodbg(dev_priv); \
1059         } \
1060         GEN6_WRITE_FOOTER; \
1061 }
1062
1063 #define __gen_write(func, x) \
1064 static void \
1065 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1066         enum forcewake_domains fw_engine; \
1067         GEN6_WRITE_HEADER; \
1068         fw_engine = __##func##_reg_write_fw_domains(offset); \
1069         if (fw_engine) \
1070                 __force_wake_auto(dev_priv, fw_engine); \
1071         __raw_i915_write##x(dev_priv, reg, val); \
1072         GEN6_WRITE_FOOTER; \
1073 }
1074 #define __gen8_write(x) __gen_write(gen8, x)
1075 #define __fwtable_write(x) __gen_write(fwtable, x)
1076
1077 #define __gen9_decoupled_write(x) \
1078 static void \
1079 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1080                         i915_reg_t reg, u##x val, \
1081                 bool trace) { \
1082         enum forcewake_domains fw_engine; \
1083         GEN6_WRITE_HEADER; \
1084         fw_engine = __fwtable_reg_write_fw_domains(offset); \
1085         if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1086                 __gen9_decoupled_mmio_write(dev_priv, \
1087                                             offset, \
1088                                             val, \
1089                                             fw_engine); \
1090         else \
1091                 __raw_i915_write##x(dev_priv, reg, val); \
1092         GEN6_WRITE_FOOTER; \
1093 }
1094
1095 __gen9_decoupled_write(32)
1096 __fwtable_write(8)
1097 __fwtable_write(16)
1098 __fwtable_write(32)
1099 __gen8_write(8)
1100 __gen8_write(16)
1101 __gen8_write(32)
1102 __gen6_write(8)
1103 __gen6_write(16)
1104 __gen6_write(32)
1105
1106 #undef __fwtable_write
1107 #undef __gen8_write
1108 #undef __gen6_write
1109 #undef GEN6_WRITE_FOOTER
1110 #undef GEN6_WRITE_HEADER
1111
1112 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1113 do { \
1114         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1115         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1116         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1117 } while (0)
1118
1119 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1120 do { \
1121         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1122         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1123         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1124         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1125 } while (0)
1126
1127
1128 static void fw_domain_init(struct drm_i915_private *dev_priv,
1129                            enum forcewake_domain_id domain_id,
1130                            i915_reg_t reg_set,
1131                            i915_reg_t reg_ack)
1132 {
1133         struct intel_uncore_forcewake_domain *d;
1134
1135         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1136                 return;
1137
1138         d = &dev_priv->uncore.fw_domain[domain_id];
1139
1140         WARN_ON(d->wake_count);
1141
1142         d->wake_count = 0;
1143         d->reg_set = reg_set;
1144         d->reg_ack = reg_ack;
1145
1146         if (IS_GEN6(dev_priv)) {
1147                 d->val_reset = 0;
1148                 d->val_set = FORCEWAKE_KERNEL;
1149                 d->val_clear = 0;
1150         } else {
1151                 /* WaRsClearFWBitsAtReset:bdw,skl */
1152                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1153                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1154                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1155         }
1156
1157         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1158                 d->reg_post = FORCEWAKE_ACK_VLV;
1159         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1160                 d->reg_post = ECOBUS;
1161
1162         d->i915 = dev_priv;
1163         d->id = domain_id;
1164
1165         BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1166         BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1167         BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1168
1169         d->mask = 1 << domain_id;
1170
1171         hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1172         d->timer.function = intel_uncore_fw_release_timer;
1173
1174         dev_priv->uncore.fw_domains |= (1 << domain_id);
1175
1176         fw_domain_reset(d);
1177 }
1178
1179 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1180 {
1181         if (INTEL_INFO(dev_priv)->gen <= 5)
1182                 return;
1183
1184         if (IS_GEN9(dev_priv)) {
1185                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1186                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1187                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1188                                FORCEWAKE_RENDER_GEN9,
1189                                FORCEWAKE_ACK_RENDER_GEN9);
1190                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1191                                FORCEWAKE_BLITTER_GEN9,
1192                                FORCEWAKE_ACK_BLITTER_GEN9);
1193                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1194                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1195         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1196                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1197                 if (!IS_CHERRYVIEW(dev_priv))
1198                         dev_priv->uncore.funcs.force_wake_put =
1199                                 fw_domains_put_with_fifo;
1200                 else
1201                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1202                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1203                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1204                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1205                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1206         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1207                 dev_priv->uncore.funcs.force_wake_get =
1208                         fw_domains_get_with_thread_status;
1209                 if (IS_HASWELL(dev_priv))
1210                         dev_priv->uncore.funcs.force_wake_put =
1211                                 fw_domains_put_with_fifo;
1212                 else
1213                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1214                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1215                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1216         } else if (IS_IVYBRIDGE(dev_priv)) {
1217                 u32 ecobus;
1218
1219                 /* IVB configs may use multi-threaded forcewake */
1220
1221                 /* A small trick here - if the bios hasn't configured
1222                  * MT forcewake, and if the device is in RC6, then
1223                  * force_wake_mt_get will not wake the device and the
1224                  * ECOBUS read will return zero. Which will be
1225                  * (correctly) interpreted by the test below as MT
1226                  * forcewake being disabled.
1227                  */
1228                 dev_priv->uncore.funcs.force_wake_get =
1229                         fw_domains_get_with_thread_status;
1230                 dev_priv->uncore.funcs.force_wake_put =
1231                         fw_domains_put_with_fifo;
1232
1233                 /* We need to init first for ECOBUS access and then
1234                  * determine later if we want to reinit, in case of MT access is
1235                  * not working. In this stage we don't know which flavour this
1236                  * ivb is, so it is better to reset also the gen6 fw registers
1237                  * before the ecobus check.
1238                  */
1239
1240                 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1241                 __raw_posting_read(dev_priv, ECOBUS);
1242
1243                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1244                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1245
1246                 spin_lock_irq(&dev_priv->uncore.lock);
1247                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1248                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1249                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1250                 spin_unlock_irq(&dev_priv->uncore.lock);
1251
1252                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1253                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1254                         DRM_INFO("when using vblank-synced partial screen updates.\n");
1255                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1256                                        FORCEWAKE, FORCEWAKE_ACK);
1257                 }
1258         } else if (IS_GEN6(dev_priv)) {
1259                 dev_priv->uncore.funcs.force_wake_get =
1260                         fw_domains_get_with_thread_status;
1261                 dev_priv->uncore.funcs.force_wake_put =
1262                         fw_domains_put_with_fifo;
1263                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1264                                FORCEWAKE, FORCEWAKE_ACK);
1265         }
1266
1267         if (intel_vgpu_active(dev_priv)) {
1268                 dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop;
1269                 dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop;
1270         }
1271
1272         /* All future platforms are expected to require complex power gating */
1273         WARN_ON(dev_priv->uncore.fw_domains == 0);
1274 }
1275
1276 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1277 { \
1278         dev_priv->uncore.fw_domains_table = \
1279                         (struct intel_forcewake_range *)(d); \
1280         dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1281 }
1282
1283 void intel_uncore_init(struct drm_i915_private *dev_priv)
1284 {
1285         i915_check_vgpu(dev_priv);
1286
1287         intel_uncore_edram_detect(dev_priv);
1288         intel_uncore_fw_domains_init(dev_priv);
1289         __intel_uncore_early_sanitize(dev_priv, false);
1290
1291         dev_priv->uncore.unclaimed_mmio_check = 1;
1292
1293         switch (INTEL_INFO(dev_priv)->gen) {
1294         default:
1295         case 9:
1296                 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1297                 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1298                 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1299                 if (HAS_DECOUPLED_MMIO(dev_priv)) {
1300                         dev_priv->uncore.funcs.mmio_readl =
1301                                                 gen9_decoupled_read32;
1302                         dev_priv->uncore.funcs.mmio_readq =
1303                                                 gen9_decoupled_read64;
1304                         dev_priv->uncore.funcs.mmio_writel =
1305                                                 gen9_decoupled_write32;
1306                 }
1307                 break;
1308         case 8:
1309                 if (IS_CHERRYVIEW(dev_priv)) {
1310                         ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1311                         ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1312                         ASSIGN_READ_MMIO_VFUNCS(fwtable);
1313
1314                 } else {
1315                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1316                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1317                 }
1318                 break;
1319         case 7:
1320         case 6:
1321                 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1322
1323                 if (IS_VALLEYVIEW(dev_priv)) {
1324                         ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1325                         ASSIGN_READ_MMIO_VFUNCS(fwtable);
1326                 } else {
1327                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1328                 }
1329                 break;
1330         case 5:
1331                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1332                 ASSIGN_READ_MMIO_VFUNCS(gen5);
1333                 break;
1334         case 4:
1335         case 3:
1336         case 2:
1337                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1338                 ASSIGN_READ_MMIO_VFUNCS(gen2);
1339                 break;
1340         }
1341
1342         i915_check_and_clear_faults(dev_priv);
1343 }
1344 #undef ASSIGN_WRITE_MMIO_VFUNCS
1345 #undef ASSIGN_READ_MMIO_VFUNCS
1346
1347 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1348 {
1349         /* Paranoia: make sure we have disabled everything before we exit. */
1350         intel_uncore_sanitize(dev_priv);
1351         intel_uncore_forcewake_reset(dev_priv, false);
1352 }
1353
1354 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1355
1356 static const struct register_whitelist {
1357         i915_reg_t offset_ldw, offset_udw;
1358         uint32_t size;
1359         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1360         uint32_t gen_bitmask;
1361 } whitelist[] = {
1362         { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1363           .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1364           .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1365 };
1366
1367 int i915_reg_read_ioctl(struct drm_device *dev,
1368                         void *data, struct drm_file *file)
1369 {
1370         struct drm_i915_private *dev_priv = to_i915(dev);
1371         struct drm_i915_reg_read *reg = data;
1372         struct register_whitelist const *entry = whitelist;
1373         unsigned size;
1374         i915_reg_t offset_ldw, offset_udw;
1375         int i, ret = 0;
1376
1377         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1378                 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1379                     (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
1380                         break;
1381         }
1382
1383         if (i == ARRAY_SIZE(whitelist))
1384                 return -EINVAL;
1385
1386         /* We use the low bits to encode extra flags as the register should
1387          * be naturally aligned (and those that are not so aligned merely
1388          * limit the available flags for that register).
1389          */
1390         offset_ldw = entry->offset_ldw;
1391         offset_udw = entry->offset_udw;
1392         size = entry->size;
1393         size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1394
1395         intel_runtime_pm_get(dev_priv);
1396
1397         switch (size) {
1398         case 8 | 1:
1399                 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1400                 break;
1401         case 8:
1402                 reg->val = I915_READ64(offset_ldw);
1403                 break;
1404         case 4:
1405                 reg->val = I915_READ(offset_ldw);
1406                 break;
1407         case 2:
1408                 reg->val = I915_READ16(offset_ldw);
1409                 break;
1410         case 1:
1411                 reg->val = I915_READ8(offset_ldw);
1412                 break;
1413         default:
1414                 ret = -EINVAL;
1415                 goto out;
1416         }
1417
1418 out:
1419         intel_runtime_pm_put(dev_priv);
1420         return ret;
1421 }
1422
1423 static int i915_reset_complete(struct pci_dev *pdev)
1424 {
1425         u8 gdrst;
1426         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1427         return (gdrst & GRDOM_RESET_STATUS) == 0;
1428 }
1429
1430 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1431 {
1432         struct pci_dev *pdev = dev_priv->drm.pdev;
1433
1434         /* assert reset for at least 20 usec */
1435         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1436         udelay(20);
1437         pci_write_config_byte(pdev, I915_GDRST, 0);
1438
1439         return wait_for(i915_reset_complete(pdev), 500);
1440 }
1441
1442 static int g4x_reset_complete(struct pci_dev *pdev)
1443 {
1444         u8 gdrst;
1445         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1446         return (gdrst & GRDOM_RESET_ENABLE) == 0;
1447 }
1448
1449 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1450 {
1451         struct pci_dev *pdev = dev_priv->drm.pdev;
1452         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1453         return wait_for(g4x_reset_complete(pdev), 500);
1454 }
1455
1456 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1457 {
1458         struct pci_dev *pdev = dev_priv->drm.pdev;
1459         int ret;
1460
1461         pci_write_config_byte(pdev, I915_GDRST,
1462                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
1463         ret =  wait_for(g4x_reset_complete(pdev), 500);
1464         if (ret)
1465                 return ret;
1466
1467         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1468         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1469         POSTING_READ(VDECCLK_GATE_D);
1470
1471         pci_write_config_byte(pdev, I915_GDRST,
1472                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1473         ret =  wait_for(g4x_reset_complete(pdev), 500);
1474         if (ret)
1475                 return ret;
1476
1477         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1478         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1479         POSTING_READ(VDECCLK_GATE_D);
1480
1481         pci_write_config_byte(pdev, I915_GDRST, 0);
1482
1483         return 0;
1484 }
1485
1486 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1487                              unsigned engine_mask)
1488 {
1489         int ret;
1490
1491         I915_WRITE(ILK_GDSR,
1492                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1493         ret = intel_wait_for_register(dev_priv,
1494                                       ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1495                                       500);
1496         if (ret)
1497                 return ret;
1498
1499         I915_WRITE(ILK_GDSR,
1500                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1501         ret = intel_wait_for_register(dev_priv,
1502                                       ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1503                                       500);
1504         if (ret)
1505                 return ret;
1506
1507         I915_WRITE(ILK_GDSR, 0);
1508
1509         return 0;
1510 }
1511
1512 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1513 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1514                                 u32 hw_domain_mask)
1515 {
1516         /* GEN6_GDRST is not in the gt power well, no need to check
1517          * for fifo space for the write or forcewake the chip for
1518          * the read
1519          */
1520         __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1521
1522         /* Spin waiting for the device to ack the reset requests */
1523         return intel_wait_for_register_fw(dev_priv,
1524                                           GEN6_GDRST, hw_domain_mask, 0,
1525                                           500);
1526 }
1527
1528 /**
1529  * gen6_reset_engines - reset individual engines
1530  * @dev_priv: i915 device
1531  * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1532  *
1533  * This function will reset the individual engines that are set in engine_mask.
1534  * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1535  *
1536  * Note: It is responsibility of the caller to handle the difference between
1537  * asking full domain reset versus reset for all available individual engines.
1538  *
1539  * Returns 0 on success, nonzero on error.
1540  */
1541 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1542                               unsigned engine_mask)
1543 {
1544         struct intel_engine_cs *engine;
1545         const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1546                 [RCS] = GEN6_GRDOM_RENDER,
1547                 [BCS] = GEN6_GRDOM_BLT,
1548                 [VCS] = GEN6_GRDOM_MEDIA,
1549                 [VCS2] = GEN8_GRDOM_MEDIA2,
1550                 [VECS] = GEN6_GRDOM_VECS,
1551         };
1552         u32 hw_mask;
1553         int ret;
1554
1555         if (engine_mask == ALL_ENGINES) {
1556                 hw_mask = GEN6_GRDOM_FULL;
1557         } else {
1558                 unsigned int tmp;
1559
1560                 hw_mask = 0;
1561                 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1562                         hw_mask |= hw_engine_mask[engine->id];
1563         }
1564
1565         ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1566
1567         intel_uncore_forcewake_reset(dev_priv, true);
1568
1569         return ret;
1570 }
1571
1572 /**
1573  * intel_wait_for_register_fw - wait until register matches expected state
1574  * @dev_priv: the i915 device
1575  * @reg: the register to read
1576  * @mask: mask to apply to register value
1577  * @value: expected value
1578  * @timeout_ms: timeout in millisecond
1579  *
1580  * This routine waits until the target register @reg contains the expected
1581  * @value after applying the @mask, i.e. it waits until ::
1582  *
1583  *     (I915_READ_FW(reg) & mask) == value
1584  *
1585  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1586  *
1587  * Note that this routine assumes the caller holds forcewake asserted, it is
1588  * not suitable for very long waits. See intel_wait_for_register() if you
1589  * wish to wait without holding forcewake for the duration (i.e. you expect
1590  * the wait to be slow).
1591  *
1592  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1593  */
1594 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1595                                i915_reg_t reg,
1596                                const u32 mask,
1597                                const u32 value,
1598                                const unsigned long timeout_ms)
1599 {
1600 #define done ((I915_READ_FW(reg) & mask) == value)
1601         int ret = wait_for_us(done, 2);
1602         if (ret)
1603                 ret = wait_for(done, timeout_ms);
1604         return ret;
1605 #undef done
1606 }
1607
1608 /**
1609  * intel_wait_for_register - wait until register matches expected state
1610  * @dev_priv: the i915 device
1611  * @reg: the register to read
1612  * @mask: mask to apply to register value
1613  * @value: expected value
1614  * @timeout_ms: timeout in millisecond
1615  *
1616  * This routine waits until the target register @reg contains the expected
1617  * @value after applying the @mask, i.e. it waits until ::
1618  *
1619  *     (I915_READ(reg) & mask) == value
1620  *
1621  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1622  *
1623  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1624  */
1625 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1626                             i915_reg_t reg,
1627                             const u32 mask,
1628                             const u32 value,
1629                             const unsigned long timeout_ms)
1630 {
1631
1632         unsigned fw =
1633                 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1634         int ret;
1635
1636         intel_uncore_forcewake_get(dev_priv, fw);
1637         ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1638         intel_uncore_forcewake_put(dev_priv, fw);
1639         if (ret)
1640                 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1641                                timeout_ms);
1642
1643         return ret;
1644 }
1645
1646 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1647 {
1648         struct drm_i915_private *dev_priv = engine->i915;
1649         int ret;
1650
1651         I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1652                       _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1653
1654         ret = intel_wait_for_register_fw(dev_priv,
1655                                          RING_RESET_CTL(engine->mmio_base),
1656                                          RESET_CTL_READY_TO_RESET,
1657                                          RESET_CTL_READY_TO_RESET,
1658                                          700);
1659         if (ret)
1660                 DRM_ERROR("%s: reset request timeout\n", engine->name);
1661
1662         return ret;
1663 }
1664
1665 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1666 {
1667         struct drm_i915_private *dev_priv = engine->i915;
1668
1669         I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670                       _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1671 }
1672
1673 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1674                               unsigned engine_mask)
1675 {
1676         struct intel_engine_cs *engine;
1677         unsigned int tmp;
1678
1679         for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1680                 if (gen8_request_engine_reset(engine))
1681                         goto not_ready;
1682
1683         return gen6_reset_engines(dev_priv, engine_mask);
1684
1685 not_ready:
1686         for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1687                 gen8_unrequest_engine_reset(engine);
1688
1689         return -EIO;
1690 }
1691
1692 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1693
1694 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1695 {
1696         if (!i915.reset)
1697                 return NULL;
1698
1699         if (INTEL_INFO(dev_priv)->gen >= 8)
1700                 return gen8_reset_engines;
1701         else if (INTEL_INFO(dev_priv)->gen >= 6)
1702                 return gen6_reset_engines;
1703         else if (IS_GEN5(dev_priv))
1704                 return ironlake_do_reset;
1705         else if (IS_G4X(dev_priv))
1706                 return g4x_do_reset;
1707         else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1708                 return g33_do_reset;
1709         else if (INTEL_INFO(dev_priv)->gen >= 3)
1710                 return i915_do_reset;
1711         else
1712                 return NULL;
1713 }
1714
1715 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1716 {
1717         reset_func reset;
1718         int ret;
1719
1720         reset = intel_get_gpu_reset(dev_priv);
1721         if (reset == NULL)
1722                 return -ENODEV;
1723
1724         /* If the power well sleeps during the reset, the reset
1725          * request may be dropped and never completes (causing -EIO).
1726          */
1727         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1728         ret = reset(dev_priv, engine_mask);
1729         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1730
1731         return ret;
1732 }
1733
1734 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1735 {
1736         return intel_get_gpu_reset(dev_priv) != NULL;
1737 }
1738
1739 int intel_guc_reset(struct drm_i915_private *dev_priv)
1740 {
1741         int ret;
1742         unsigned long irqflags;
1743
1744         if (!HAS_GUC(dev_priv))
1745                 return -EINVAL;
1746
1747         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1748         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1749
1750         ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1751
1752         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1753         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1754
1755         return ret;
1756 }
1757
1758 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1759 {
1760         return check_for_unclaimed_mmio(dev_priv);
1761 }
1762
1763 bool
1764 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1765 {
1766         if (unlikely(i915.mmio_debug ||
1767                      dev_priv->uncore.unclaimed_mmio_check <= 0))
1768                 return false;
1769
1770         if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1771                 DRM_DEBUG("Unclaimed register detected, "
1772                           "enabling oneshot unclaimed register reporting. "
1773                           "Please use i915.mmio_debug=N for more information.\n");
1774                 i915.mmio_debug++;
1775                 dev_priv->uncore.unclaimed_mmio_check--;
1776                 return true;
1777         }
1778
1779         return false;
1780 }
1781
1782 static enum forcewake_domains
1783 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1784                                 i915_reg_t reg)
1785 {
1786         u32 offset = i915_mmio_reg_offset(reg);
1787         enum forcewake_domains fw_domains;
1788
1789         if (HAS_FWTABLE(dev_priv)) {
1790                 fw_domains = __fwtable_reg_read_fw_domains(offset);
1791         } else if (INTEL_GEN(dev_priv) >= 6) {
1792                 fw_domains = __gen6_reg_read_fw_domains(offset);
1793         } else {
1794                 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1795                 fw_domains = 0;
1796         }
1797
1798         WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1799
1800         return fw_domains;
1801 }
1802
1803 static enum forcewake_domains
1804 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1805                                  i915_reg_t reg)
1806 {
1807         u32 offset = i915_mmio_reg_offset(reg);
1808         enum forcewake_domains fw_domains;
1809
1810         if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1811                 fw_domains = __fwtable_reg_write_fw_domains(offset);
1812         } else if (IS_GEN8(dev_priv)) {
1813                 fw_domains = __gen8_reg_write_fw_domains(offset);
1814         } else if (IS_GEN(dev_priv, 6, 7)) {
1815                 fw_domains = FORCEWAKE_RENDER;
1816         } else {
1817                 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1818                 fw_domains = 0;
1819         }
1820
1821         WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1822
1823         return fw_domains;
1824 }
1825
1826 /**
1827  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1828  *                                  a register
1829  * @dev_priv: pointer to struct drm_i915_private
1830  * @reg: register in question
1831  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1832  *
1833  * Returns a set of forcewake domains required to be taken with for example
1834  * intel_uncore_forcewake_get for the specified register to be accessible in the
1835  * specified mode (read, write or read/write) with raw mmio accessors.
1836  *
1837  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1838  * callers to do FIFO management on their own or risk losing writes.
1839  */
1840 enum forcewake_domains
1841 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1842                                i915_reg_t reg, unsigned int op)
1843 {
1844         enum forcewake_domains fw_domains = 0;
1845
1846         WARN_ON(!op);
1847
1848         if (intel_vgpu_active(dev_priv))
1849                 return 0;
1850
1851         if (op & FW_REG_READ)
1852                 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1853
1854         if (op & FW_REG_WRITE)
1855                 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1856
1857         return fw_domains;
1858 }
1859
1860 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1861 #include "selftests/intel_uncore.c"
1862 #endif