Correct .gbs.conf settings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
43
44 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45 {
46         u32 gt_thread_status_mask;
47
48         if (IS_HASWELL(dev_priv->dev))
49                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
50         else
51                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
52
53         /* w/a for a sporadic read returning 0 by waiting for the GT
54          * thread to wake up.
55          */
56         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
57                 DRM_ERROR("GT thread status wait timed out\n");
58 }
59
60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61 {
62         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
63         /* something from same cacheline, but !FORCEWAKE */
64         __raw_posting_read(dev_priv, ECOBUS);
65 }
66
67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
68                                                         int fw_engine)
69 {
70         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
71                             FORCEWAKE_ACK_TIMEOUT_MS))
72                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
73
74         __raw_i915_write32(dev_priv, FORCEWAKE, 1);
75         /* something from same cacheline, but !FORCEWAKE */
76         __raw_posting_read(dev_priv, ECOBUS);
77
78         if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
79                             FORCEWAKE_ACK_TIMEOUT_MS))
80                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
81
82         /* WaRsForcewakeWaitTC0:snb */
83         __gen6_gt_wait_for_thread_c0(dev_priv);
84 }
85
86 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
87 {
88         __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
89         /* something from same cacheline, but !FORCEWAKE_MT */
90         __raw_posting_read(dev_priv, ECOBUS);
91 }
92
93 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
94                                                         int fw_engine)
95 {
96         u32 forcewake_ack;
97
98         if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
99                 forcewake_ack = FORCEWAKE_ACK_HSW;
100         else
101                 forcewake_ack = FORCEWAKE_MT_ACK;
102
103         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
104                             FORCEWAKE_ACK_TIMEOUT_MS))
105                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
106
107         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
108                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
109         /* something from same cacheline, but !FORCEWAKE_MT */
110         __raw_posting_read(dev_priv, ECOBUS);
111
112         if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
113                             FORCEWAKE_ACK_TIMEOUT_MS))
114                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
115
116         /* WaRsForcewakeWaitTC0:ivb,hsw */
117         if (INTEL_INFO(dev_priv->dev)->gen < 8)
118                 __gen6_gt_wait_for_thread_c0(dev_priv);
119 }
120
121 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
122 {
123         u32 gtfifodbg;
124
125         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
126         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
127                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
128 }
129
130 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
131                                                         int fw_engine)
132 {
133         __raw_i915_write32(dev_priv, FORCEWAKE, 0);
134         /* something from same cacheline, but !FORCEWAKE */
135         __raw_posting_read(dev_priv, ECOBUS);
136         gen6_gt_check_fifodbg(dev_priv);
137 }
138
139 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
140                                                         int fw_engine)
141 {
142         __raw_i915_write32(dev_priv, FORCEWAKE_MT,
143                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
144         /* something from same cacheline, but !FORCEWAKE_MT */
145         __raw_posting_read(dev_priv, ECOBUS);
146         gen6_gt_check_fifodbg(dev_priv);
147 }
148
149 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
150 {
151         int ret = 0;
152
153         /* On VLV, FIFO will be shared by both SW and HW.
154          * So, we need to read the FREE_ENTRIES everytime */
155         if (IS_VALLEYVIEW(dev_priv->dev))
156                 dev_priv->uncore.fifo_count =
157                         __raw_i915_read32(dev_priv, GTFIFOCTL) &
158                                                 GT_FIFO_FREE_ENTRIES_MASK;
159
160         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
161                 int loop = 500;
162                 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
163                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
164                         udelay(10);
165                         fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
166                 }
167                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
168                         ++ret;
169                 dev_priv->uncore.fifo_count = fifo;
170         }
171         dev_priv->uncore.fifo_count--;
172
173         return ret;
174 }
175
176 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
177 {
178         __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
179                            _MASKED_BIT_DISABLE(0xffff));
180         __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
181                            _MASKED_BIT_DISABLE(0xffff));
182         /* something from same cacheline, but !FORCEWAKE_VLV */
183         __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
184 }
185
186 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
187                                                 int fw_engine)
188 {
189         /* Check for Render Engine */
190         if (FORCEWAKE_RENDER & fw_engine) {
191                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
192                                                 FORCEWAKE_ACK_VLV) &
193                                                 FORCEWAKE_KERNEL) == 0,
194                                         FORCEWAKE_ACK_TIMEOUT_MS))
195                         DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
196
197                 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
198                                    _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
199
200                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
201                                                 FORCEWAKE_ACK_VLV) &
202                                                 FORCEWAKE_KERNEL),
203                                         FORCEWAKE_ACK_TIMEOUT_MS))
204                         DRM_ERROR("Timed out: waiting for Render to ack.\n");
205         }
206
207         /* Check for Media Engine */
208         if (FORCEWAKE_MEDIA & fw_engine) {
209                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
210                                                 FORCEWAKE_ACK_MEDIA_VLV) &
211                                                 FORCEWAKE_KERNEL) == 0,
212                                         FORCEWAKE_ACK_TIMEOUT_MS))
213                         DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
214
215                 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
216                                    _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
217
218                 if (wait_for_atomic((__raw_i915_read32(dev_priv,
219                                                 FORCEWAKE_ACK_MEDIA_VLV) &
220                                                 FORCEWAKE_KERNEL),
221                                         FORCEWAKE_ACK_TIMEOUT_MS))
222                         DRM_ERROR("Timed out: waiting for media to ack.\n");
223         }
224
225         /* WaRsForcewakeWaitTC0:vlv */
226         __gen6_gt_wait_for_thread_c0(dev_priv);
227
228 }
229
230 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
231                                         int fw_engine)
232 {
233
234         /* Check for Render Engine */
235         if (FORCEWAKE_RENDER & fw_engine)
236                 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
237                                         _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
238
239
240         /* Check for Media Engine */
241         if (FORCEWAKE_MEDIA & fw_engine)
242                 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
243                                 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
244
245         /* The below doubles as a POSTING_READ */
246         gen6_gt_check_fifodbg(dev_priv);
247
248 }
249
250 void vlv_force_wake_get(struct drm_i915_private *dev_priv,
251                                                 int fw_engine)
252 {
253         unsigned long irqflags;
254
255         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
256         if (FORCEWAKE_RENDER & fw_engine) {
257                 if (dev_priv->uncore.fw_rendercount++ == 0)
258                         dev_priv->uncore.funcs.force_wake_get(dev_priv,
259                                                         FORCEWAKE_RENDER);
260         }
261         if (FORCEWAKE_MEDIA & fw_engine) {
262                 if (dev_priv->uncore.fw_mediacount++ == 0)
263                         dev_priv->uncore.funcs.force_wake_get(dev_priv,
264                                                         FORCEWAKE_MEDIA);
265         }
266
267         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
268 }
269
270 void vlv_force_wake_put(struct drm_i915_private *dev_priv,
271                                                 int fw_engine)
272 {
273         unsigned long irqflags;
274
275         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
276
277         if (FORCEWAKE_RENDER & fw_engine) {
278                 WARN_ON(dev_priv->uncore.fw_rendercount == 0);
279                 if (--dev_priv->uncore.fw_rendercount == 0)
280                         dev_priv->uncore.funcs.force_wake_put(dev_priv,
281                                                         FORCEWAKE_RENDER);
282         }
283
284         if (FORCEWAKE_MEDIA & fw_engine) {
285                 WARN_ON(dev_priv->uncore.fw_mediacount == 0);
286                 if (--dev_priv->uncore.fw_mediacount == 0)
287                         dev_priv->uncore.funcs.force_wake_put(dev_priv,
288                                                         FORCEWAKE_MEDIA);
289         }
290
291         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
292 }
293
294 static void gen6_force_wake_work(struct work_struct *work)
295 {
296         struct drm_i915_private *dev_priv =
297                 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
298         unsigned long irqflags;
299
300         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
301         if (--dev_priv->uncore.forcewake_count == 0)
302                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
303         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
304 }
305
306 static void intel_uncore_forcewake_reset(struct drm_device *dev)
307 {
308         struct drm_i915_private *dev_priv = dev->dev_private;
309
310         if (IS_VALLEYVIEW(dev)) {
311                 vlv_force_wake_reset(dev_priv);
312         } else if (INTEL_INFO(dev)->gen >= 6) {
313                 __gen6_gt_force_wake_reset(dev_priv);
314                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
315                         __gen6_gt_force_wake_mt_reset(dev_priv);
316         }
317 }
318
319 void intel_uncore_early_sanitize(struct drm_device *dev)
320 {
321         struct drm_i915_private *dev_priv = dev->dev_private;
322
323         if (HAS_FPGA_DBG_UNCLAIMED(dev))
324                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
325
326         if (IS_HASWELL(dev) &&
327             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
328                 /* The docs do not explain exactly how the calculation can be
329                  * made. It is somewhat guessable, but for now, it's always
330                  * 128MB.
331                  * NB: We can't write IDICR yet because we do not have gt funcs
332                  * set up */
333                 dev_priv->ellc_size = 128;
334                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
335         }
336
337         /* clear out old GT FIFO errors */
338         if (IS_GEN6(dev) || IS_GEN7(dev))
339                 __raw_i915_write32(dev_priv, GTFIFODBG,
340                                    __raw_i915_read32(dev_priv, GTFIFODBG));
341
342         intel_uncore_forcewake_reset(dev);
343 }
344
345 void intel_uncore_sanitize(struct drm_device *dev)
346 {
347         struct drm_i915_private *dev_priv = dev->dev_private;
348         u32 reg_val;
349
350         /* BIOS often leaves RC6 enabled, but disable it for hw init */
351         intel_disable_gt_powersave(dev);
352
353         /* Turn off power gate, require especially for the BIOS less system */
354         if (IS_VALLEYVIEW(dev)) {
355
356                 mutex_lock(&dev_priv->rps.hw_lock);
357                 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
358
359                 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
360                         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
361
362                 mutex_unlock(&dev_priv->rps.hw_lock);
363
364         }
365 }
366
367 /*
368  * Generally this is called implicitly by the register read function. However,
369  * if some sequence requires the GT to not power down then this function should
370  * be called at the beginning of the sequence followed by a call to
371  * gen6_gt_force_wake_put() at the end of the sequence.
372  */
373 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
374 {
375         unsigned long irqflags;
376
377         if (!dev_priv->uncore.funcs.force_wake_get)
378                 return;
379
380         intel_runtime_pm_get(dev_priv);
381
382         /* Redirect to VLV specific routine */
383         if (IS_VALLEYVIEW(dev_priv->dev))
384                 return vlv_force_wake_get(dev_priv, fw_engine);
385
386         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
387         if (dev_priv->uncore.forcewake_count++ == 0)
388                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
389         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
390 }
391
392 /*
393  * see gen6_gt_force_wake_get()
394  */
395 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
396 {
397         unsigned long irqflags;
398
399         if (!dev_priv->uncore.funcs.force_wake_put)
400                 return;
401
402         /* Redirect to VLV specific routine */
403         if (IS_VALLEYVIEW(dev_priv->dev))
404                 return vlv_force_wake_put(dev_priv, fw_engine);
405
406
407         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
408         if (--dev_priv->uncore.forcewake_count == 0) {
409                 dev_priv->uncore.forcewake_count++;
410                 mod_delayed_work(dev_priv->wq,
411                                  &dev_priv->uncore.force_wake_work,
412                                  1);
413         }
414         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
415
416         intel_runtime_pm_put(dev_priv);
417 }
418
419 /* We give fast paths for the really cool registers */
420 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
421          ((reg) < 0x40000 && (reg) != FORCEWAKE)
422
423 static void
424 ilk_dummy_write(struct drm_i915_private *dev_priv)
425 {
426         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
427          * the chip from rc6 before touching it for real. MI_MODE is masked,
428          * hence harmless to write 0 into. */
429         __raw_i915_write32(dev_priv, MI_MODE, 0);
430 }
431
432 static void
433 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
434 {
435         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
436                 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
437                           reg);
438                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
439         }
440 }
441
442 static void
443 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
444 {
445         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
446                 DRM_ERROR("Unclaimed write to %x\n", reg);
447                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
448         }
449 }
450
451 static void
452 assert_device_not_suspended(struct drm_i915_private *dev_priv)
453 {
454         WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
455              "Device suspended\n");
456 }
457
458 #define REG_READ_HEADER(x) \
459         unsigned long irqflags; \
460         u##x val = 0; \
461         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
462
463 #define REG_READ_FOOTER \
464         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
465         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
466         return val
467
468 #define __gen4_read(x) \
469 static u##x \
470 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
471         REG_READ_HEADER(x); \
472         val = __raw_i915_read##x(dev_priv, reg); \
473         REG_READ_FOOTER; \
474 }
475
476 #define __gen5_read(x) \
477 static u##x \
478 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
479         REG_READ_HEADER(x); \
480         ilk_dummy_write(dev_priv); \
481         val = __raw_i915_read##x(dev_priv, reg); \
482         REG_READ_FOOTER; \
483 }
484
485 #define __gen6_read(x) \
486 static u##x \
487 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
488         REG_READ_HEADER(x); \
489         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
490                 if (dev_priv->uncore.forcewake_count == 0) \
491                         dev_priv->uncore.funcs.force_wake_get(dev_priv, \
492                                                         FORCEWAKE_ALL); \
493                 val = __raw_i915_read##x(dev_priv, reg); \
494                 if (dev_priv->uncore.forcewake_count == 0) \
495                         dev_priv->uncore.funcs.force_wake_put(dev_priv, \
496                                                         FORCEWAKE_ALL); \
497         } else { \
498                 val = __raw_i915_read##x(dev_priv, reg); \
499         } \
500         REG_READ_FOOTER; \
501 }
502
503 #define __vlv_read(x) \
504 static u##x \
505 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
506         unsigned fwengine = 0; \
507         unsigned *fwcount; \
508         REG_READ_HEADER(x); \
509         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
510                 fwengine = FORCEWAKE_RENDER;            \
511                 fwcount = &dev_priv->uncore.fw_rendercount;    \
512         }                                               \
513         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
514                 fwengine = FORCEWAKE_MEDIA;             \
515                 fwcount = &dev_priv->uncore.fw_mediacount;     \
516         }  \
517         if (fwengine != 0) {            \
518                 if ((*fwcount)++ == 0) \
519                         (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
520                                                                 fwengine); \
521                 val = __raw_i915_read##x(dev_priv, reg); \
522                 if (--(*fwcount) == 0) \
523                         (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
524                                                         fwengine); \
525         } else { \
526                 val = __raw_i915_read##x(dev_priv, reg); \
527         } \
528         REG_READ_FOOTER; \
529 }
530
531
532 __vlv_read(8)
533 __vlv_read(16)
534 __vlv_read(32)
535 __vlv_read(64)
536 __gen6_read(8)
537 __gen6_read(16)
538 __gen6_read(32)
539 __gen6_read(64)
540 __gen5_read(8)
541 __gen5_read(16)
542 __gen5_read(32)
543 __gen5_read(64)
544 __gen4_read(8)
545 __gen4_read(16)
546 __gen4_read(32)
547 __gen4_read(64)
548
549 #undef __vlv_read
550 #undef __gen6_read
551 #undef __gen5_read
552 #undef __gen4_read
553 #undef REG_READ_FOOTER
554 #undef REG_READ_HEADER
555
556 #define REG_WRITE_HEADER \
557         unsigned long irqflags; \
558         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
559         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
560
561 #define REG_WRITE_FOOTER \
562         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
563
564 #define __gen4_write(x) \
565 static void \
566 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
567         REG_WRITE_HEADER; \
568         __raw_i915_write##x(dev_priv, reg, val); \
569         REG_WRITE_FOOTER; \
570 }
571
572 #define __gen5_write(x) \
573 static void \
574 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
575         REG_WRITE_HEADER; \
576         ilk_dummy_write(dev_priv); \
577         __raw_i915_write##x(dev_priv, reg, val); \
578         REG_WRITE_FOOTER; \
579 }
580
581 #define __gen6_write(x) \
582 static void \
583 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
584         u32 __fifo_ret = 0; \
585         REG_WRITE_HEADER; \
586         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
587                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
588         } \
589         assert_device_not_suspended(dev_priv); \
590         __raw_i915_write##x(dev_priv, reg, val); \
591         if (unlikely(__fifo_ret)) { \
592                 gen6_gt_check_fifodbg(dev_priv); \
593         } \
594         REG_WRITE_FOOTER; \
595 }
596
597 #define __hsw_write(x) \
598 static void \
599 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
600         u32 __fifo_ret = 0; \
601         REG_WRITE_HEADER; \
602         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
603                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
604         } \
605         assert_device_not_suspended(dev_priv); \
606         hsw_unclaimed_reg_clear(dev_priv, reg); \
607         __raw_i915_write##x(dev_priv, reg, val); \
608         if (unlikely(__fifo_ret)) { \
609                 gen6_gt_check_fifodbg(dev_priv); \
610         } \
611         hsw_unclaimed_reg_check(dev_priv, reg); \
612         REG_WRITE_FOOTER; \
613 }
614
615 static const u32 gen8_shadowed_regs[] = {
616         FORCEWAKE_MT,
617         GEN6_RPNSWREQ,
618         GEN6_RC_VIDEO_FREQ,
619         RING_TAIL(RENDER_RING_BASE),
620         RING_TAIL(GEN6_BSD_RING_BASE),
621         RING_TAIL(VEBOX_RING_BASE),
622         RING_TAIL(BLT_RING_BASE),
623         /* TODO: Other registers are not yet used */
624 };
625
626 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
627 {
628         int i;
629         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
630                 if (reg == gen8_shadowed_regs[i])
631                         return true;
632
633         return false;
634 }
635
636 #define __gen8_write(x) \
637 static void \
638 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
639         bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
640         REG_WRITE_HEADER; \
641         if (__needs_put) { \
642                 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
643                                                         FORCEWAKE_ALL); \
644         } \
645         __raw_i915_write##x(dev_priv, reg, val); \
646         if (__needs_put) { \
647                 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
648                                                         FORCEWAKE_ALL); \
649         } \
650         REG_WRITE_FOOTER; \
651 }
652
653 __gen8_write(8)
654 __gen8_write(16)
655 __gen8_write(32)
656 __gen8_write(64)
657 __hsw_write(8)
658 __hsw_write(16)
659 __hsw_write(32)
660 __hsw_write(64)
661 __gen6_write(8)
662 __gen6_write(16)
663 __gen6_write(32)
664 __gen6_write(64)
665 __gen5_write(8)
666 __gen5_write(16)
667 __gen5_write(32)
668 __gen5_write(64)
669 __gen4_write(8)
670 __gen4_write(16)
671 __gen4_write(32)
672 __gen4_write(64)
673
674 #undef __gen8_write
675 #undef __hsw_write
676 #undef __gen6_write
677 #undef __gen5_write
678 #undef __gen4_write
679 #undef REG_WRITE_FOOTER
680 #undef REG_WRITE_HEADER
681
682 void intel_uncore_init(struct drm_device *dev)
683 {
684         struct drm_i915_private *dev_priv = dev->dev_private;
685
686         INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
687                           gen6_force_wake_work);
688
689         if (IS_VALLEYVIEW(dev)) {
690                 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
691                 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
692         } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
693                 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
694                 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
695         } else if (IS_IVYBRIDGE(dev)) {
696                 u32 ecobus;
697
698                 /* IVB configs may use multi-threaded forcewake */
699
700                 /* A small trick here - if the bios hasn't configured
701                  * MT forcewake, and if the device is in RC6, then
702                  * force_wake_mt_get will not wake the device and the
703                  * ECOBUS read will return zero. Which will be
704                  * (correctly) interpreted by the test below as MT
705                  * forcewake being disabled.
706                  */
707                 mutex_lock(&dev->struct_mutex);
708                 __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
709                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
710                 __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
711                 mutex_unlock(&dev->struct_mutex);
712
713                 if (ecobus & FORCEWAKE_MT_ENABLE) {
714                         dev_priv->uncore.funcs.force_wake_get =
715                                 __gen6_gt_force_wake_mt_get;
716                         dev_priv->uncore.funcs.force_wake_put =
717                                 __gen6_gt_force_wake_mt_put;
718                 } else {
719                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
720                         DRM_INFO("when using vblank-synced partial screen updates.\n");
721                         dev_priv->uncore.funcs.force_wake_get =
722                                 __gen6_gt_force_wake_get;
723                         dev_priv->uncore.funcs.force_wake_put =
724                                 __gen6_gt_force_wake_put;
725                 }
726         } else if (IS_GEN6(dev)) {
727                 dev_priv->uncore.funcs.force_wake_get =
728                         __gen6_gt_force_wake_get;
729                 dev_priv->uncore.funcs.force_wake_put =
730                         __gen6_gt_force_wake_put;
731         }
732
733         switch (INTEL_INFO(dev)->gen) {
734         default:
735                 dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
736                 dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
737                 dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
738                 dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
739                 dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
740                 dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
741                 dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
742                 dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
743                 break;
744         case 7:
745         case 6:
746                 if (IS_HASWELL(dev)) {
747                         dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
748                         dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
749                         dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
750                         dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
751                 } else {
752                         dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
753                         dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
754                         dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
755                         dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
756                 }
757
758                 if (IS_VALLEYVIEW(dev)) {
759                         dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
760                         dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
761                         dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
762                         dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
763                 } else {
764                         dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
765                         dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
766                         dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
767                         dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
768                 }
769                 break;
770         case 5:
771                 dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
772                 dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
773                 dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
774                 dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
775                 dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
776                 dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
777                 dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
778                 dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
779                 break;
780         case 4:
781         case 3:
782         case 2:
783                 dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
784                 dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
785                 dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
786                 dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
787                 dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
788                 dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
789                 dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
790                 dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
791                 break;
792         }
793 }
794
795 void intel_uncore_fini(struct drm_device *dev)
796 {
797         struct drm_i915_private *dev_priv = dev->dev_private;
798
799         flush_delayed_work(&dev_priv->uncore.force_wake_work);
800
801         /* Paranoia: make sure we have disabled everything before we exit. */
802         intel_uncore_sanitize(dev);
803 }
804
805 static const struct register_whitelist {
806         uint64_t offset;
807         uint32_t size;
808         uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
809 } whitelist[] = {
810         { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
811 };
812
813 int i915_reg_read_ioctl(struct drm_device *dev,
814                         void *data, struct drm_file *file)
815 {
816         struct drm_i915_private *dev_priv = dev->dev_private;
817         struct drm_i915_reg_read *reg = data;
818         struct register_whitelist const *entry = whitelist;
819         int i;
820
821         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
822                 if (entry->offset == reg->offset &&
823                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
824                         break;
825         }
826
827         if (i == ARRAY_SIZE(whitelist))
828                 return -EINVAL;
829
830         switch (entry->size) {
831         case 8:
832                 reg->val = I915_READ64(reg->offset);
833                 break;
834         case 4:
835                 reg->val = I915_READ(reg->offset);
836                 break;
837         case 2:
838                 reg->val = I915_READ16(reg->offset);
839                 break;
840         case 1:
841                 reg->val = I915_READ8(reg->offset);
842                 break;
843         default:
844                 WARN_ON(1);
845                 return -EINVAL;
846         }
847
848         return 0;
849 }
850
851 int i915_get_reset_stats_ioctl(struct drm_device *dev,
852                                void *data, struct drm_file *file)
853 {
854         struct drm_i915_private *dev_priv = dev->dev_private;
855         struct drm_i915_reset_stats *args = data;
856         struct i915_ctx_hang_stats *hs;
857         int ret;
858
859         if (args->flags || args->pad)
860                 return -EINVAL;
861
862         if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
863                 return -EPERM;
864
865         ret = mutex_lock_interruptible(&dev->struct_mutex);
866         if (ret)
867                 return ret;
868
869         hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
870         if (IS_ERR(hs)) {
871                 mutex_unlock(&dev->struct_mutex);
872                 return PTR_ERR(hs);
873         }
874
875         if (capable(CAP_SYS_ADMIN))
876                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
877         else
878                 args->reset_count = 0;
879
880         args->batch_active = hs->batch_active;
881         args->batch_pending = hs->batch_pending;
882
883         mutex_unlock(&dev->struct_mutex);
884
885         return 0;
886 }
887
888 static int i965_reset_complete(struct drm_device *dev)
889 {
890         u8 gdrst;
891         pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
892         return (gdrst & GRDOM_RESET_ENABLE) == 0;
893 }
894
895 static int i965_do_reset(struct drm_device *dev)
896 {
897         int ret;
898
899         /*
900          * Set the domains we want to reset (GRDOM/bits 2 and 3) as
901          * well as the reset bit (GR/bit 0).  Setting the GR bit
902          * triggers the reset; when done, the hardware will clear it.
903          */
904         pci_write_config_byte(dev->pdev, I965_GDRST,
905                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
906         ret =  wait_for(i965_reset_complete(dev), 500);
907         if (ret)
908                 return ret;
909
910         /* We can't reset render&media without also resetting display ... */
911         pci_write_config_byte(dev->pdev, I965_GDRST,
912                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
913
914         ret =  wait_for(i965_reset_complete(dev), 500);
915         if (ret)
916                 return ret;
917
918         pci_write_config_byte(dev->pdev, I965_GDRST, 0);
919
920         return 0;
921 }
922
923 static int ironlake_do_reset(struct drm_device *dev)
924 {
925         struct drm_i915_private *dev_priv = dev->dev_private;
926         u32 gdrst;
927         int ret;
928
929         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
930         gdrst &= ~GRDOM_MASK;
931         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
932                    gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
933         ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
934         if (ret)
935                 return ret;
936
937         /* We can't reset render&media without also resetting display ... */
938         gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
939         gdrst &= ~GRDOM_MASK;
940         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
941                    gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
942         return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
943 }
944
945 static int gen6_do_reset(struct drm_device *dev)
946 {
947         struct drm_i915_private *dev_priv = dev->dev_private;
948         int     ret;
949         unsigned long irqflags;
950
951         /* Hold uncore.lock across reset to prevent any register access
952          * with forcewake not set correctly
953          */
954         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
955
956         /* Reset the chip */
957
958         /* GEN6_GDRST is not in the gt power well, no need to check
959          * for fifo space for the write or forcewake the chip for
960          * the read
961          */
962         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
963
964         /* Spin waiting for the device to ack the reset request */
965         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
966
967         intel_uncore_forcewake_reset(dev);
968
969         /* If reset with a user forcewake, try to restore, otherwise turn it off */
970         if (dev_priv->uncore.forcewake_count)
971                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
972         else
973                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
974
975         /* Restore fifo count */
976         dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
977
978         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
979         return ret;
980 }
981
982 int intel_gpu_reset(struct drm_device *dev)
983 {
984         switch (INTEL_INFO(dev)->gen) {
985         case 8:
986         case 7:
987         case 6: return gen6_do_reset(dev);
988         case 5: return ironlake_do_reset(dev);
989         case 4: return i965_do_reset(dev);
990         default: return -ENODEV;
991         }
992 }
993
994 void intel_uncore_check_errors(struct drm_device *dev)
995 {
996         struct drm_i915_private *dev_priv = dev->dev_private;
997
998         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
999             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1000                 DRM_ERROR("Unclaimed register before interrupt\n");
1001                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1002         }
1003 }