Merge tag 'drm-misc-next-2019-03-28-1' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_fourcc.h>
33 #include "intel_drv.h"
34 #include "intel_guc_submission.h"
35
36 #include "i915_reset.h"
37
38 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39 {
40         return to_i915(node->minor->dev);
41 }
42
43 static int i915_capabilities(struct seq_file *m, void *data)
44 {
45         struct drm_i915_private *dev_priv = node_to_i915(m->private);
46         const struct intel_device_info *info = INTEL_INFO(dev_priv);
47         struct drm_printer p = drm_seq_file_printer(m);
48
49         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
50         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
51         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
52
53         intel_device_info_dump_flags(info, &p);
54         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
55         intel_driver_caps_print(&dev_priv->caps, &p);
56
57         kernel_param_lock(THIS_MODULE);
58         i915_params_dump(&i915_modparams, &p);
59         kernel_param_unlock(THIS_MODULE);
60
61         return 0;
62 }
63
64 static char get_active_flag(struct drm_i915_gem_object *obj)
65 {
66         return i915_gem_object_is_active(obj) ? '*' : ' ';
67 }
68
69 static char get_pin_flag(struct drm_i915_gem_object *obj)
70 {
71         return obj->pin_global ? 'p' : ' ';
72 }
73
74 static char get_tiling_flag(struct drm_i915_gem_object *obj)
75 {
76         switch (i915_gem_object_get_tiling(obj)) {
77         default:
78         case I915_TILING_NONE: return ' ';
79         case I915_TILING_X: return 'X';
80         case I915_TILING_Y: return 'Y';
81         }
82 }
83
84 static char get_global_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->userfault_count ? 'g' : ' ';
87 }
88
89 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
90 {
91         return obj->mm.mapping ? 'M' : ' ';
92 }
93
94 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95 {
96         u64 size = 0;
97         struct i915_vma *vma;
98
99         for_each_ggtt_vma(vma, obj) {
100                 if (drm_mm_node_allocated(&vma->node))
101                         size += vma->node.size;
102         }
103
104         return size;
105 }
106
107 static const char *
108 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109 {
110         size_t x = 0;
111
112         switch (page_sizes) {
113         case 0:
114                 return "";
115         case I915_GTT_PAGE_SIZE_4K:
116                 return "4K";
117         case I915_GTT_PAGE_SIZE_64K:
118                 return "64K";
119         case I915_GTT_PAGE_SIZE_2M:
120                 return "2M";
121         default:
122                 if (!buf)
123                         return "M";
124
125                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126                         x += snprintf(buf + x, len - x, "2M, ");
127                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128                         x += snprintf(buf + x, len - x, "64K, ");
129                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130                         x += snprintf(buf + x, len - x, "4K, ");
131                 buf[x-2] = '\0';
132
133                 return buf;
134         }
135 }
136
137 static void
138 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 {
140         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
141         struct intel_engine_cs *engine;
142         struct i915_vma *vma;
143         unsigned int frontbuffer_bits;
144         int pin_count = 0;
145
146         lockdep_assert_held(&obj->base.dev->struct_mutex);
147
148         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149                    &obj->base,
150                    get_active_flag(obj),
151                    get_pin_flag(obj),
152                    get_tiling_flag(obj),
153                    get_global_flag(obj),
154                    get_pin_mapped_flag(obj),
155                    obj->base.size / 1024,
156                    obj->read_domains,
157                    obj->write_domain,
158                    i915_cache_level_str(dev_priv, obj->cache_level),
159                    obj->mm.dirty ? " dirty" : "",
160                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161         if (obj->base.name)
162                 seq_printf(m, " (name: %d)", obj->base.name);
163         list_for_each_entry(vma, &obj->vma.list, obj_link) {
164                 if (i915_vma_is_pinned(vma))
165                         pin_count++;
166         }
167         seq_printf(m, " (pinned x %d)", pin_count);
168         if (obj->pin_global)
169                 seq_printf(m, " (global)");
170         list_for_each_entry(vma, &obj->vma.list, obj_link) {
171                 if (!drm_mm_node_allocated(&vma->node))
172                         continue;
173
174                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
175                            i915_vma_is_ggtt(vma) ? "g" : "pp",
176                            vma->node.start, vma->node.size,
177                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
178                 if (i915_vma_is_ggtt(vma)) {
179                         switch (vma->ggtt_view.type) {
180                         case I915_GGTT_VIEW_NORMAL:
181                                 seq_puts(m, ", normal");
182                                 break;
183
184                         case I915_GGTT_VIEW_PARTIAL:
185                                 seq_printf(m, ", partial [%08llx+%x]",
186                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
187                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
188                                 break;
189
190                         case I915_GGTT_VIEW_ROTATED:
191                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192                                            vma->ggtt_view.rotated.plane[0].width,
193                                            vma->ggtt_view.rotated.plane[0].height,
194                                            vma->ggtt_view.rotated.plane[0].stride,
195                                            vma->ggtt_view.rotated.plane[0].offset,
196                                            vma->ggtt_view.rotated.plane[1].width,
197                                            vma->ggtt_view.rotated.plane[1].height,
198                                            vma->ggtt_view.rotated.plane[1].stride,
199                                            vma->ggtt_view.rotated.plane[1].offset);
200                                 break;
201
202                         default:
203                                 MISSING_CASE(vma->ggtt_view.type);
204                                 break;
205                         }
206                 }
207                 if (vma->fence)
208                         seq_printf(m, " , fence: %d%s",
209                                    vma->fence->id,
210                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
211                 seq_puts(m, ")");
212         }
213         if (obj->stolen)
214                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
215
216         engine = i915_gem_object_last_write_engine(obj);
217         if (engine)
218                 seq_printf(m, " (%s)", engine->name);
219
220         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221         if (frontbuffer_bits)
222                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
223 }
224
225 static int obj_rank_by_stolen(const void *A, const void *B)
226 {
227         const struct drm_i915_gem_object *a =
228                 *(const struct drm_i915_gem_object **)A;
229         const struct drm_i915_gem_object *b =
230                 *(const struct drm_i915_gem_object **)B;
231
232         if (a->stolen->start < b->stolen->start)
233                 return -1;
234         if (a->stolen->start > b->stolen->start)
235                 return 1;
236         return 0;
237 }
238
239 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 {
241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
242         struct drm_device *dev = &dev_priv->drm;
243         struct drm_i915_gem_object **objects;
244         struct drm_i915_gem_object *obj;
245         u64 total_obj_size, total_gtt_size;
246         unsigned long total, count, n;
247         int ret;
248
249         total = READ_ONCE(dev_priv->mm.object_count);
250         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
251         if (!objects)
252                 return -ENOMEM;
253
254         ret = mutex_lock_interruptible(&dev->struct_mutex);
255         if (ret)
256                 goto out;
257
258         total_obj_size = total_gtt_size = count = 0;
259
260         spin_lock(&dev_priv->mm.obj_lock);
261         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
262                 if (count == total)
263                         break;
264
265                 if (obj->stolen == NULL)
266                         continue;
267
268                 objects[count++] = obj;
269                 total_obj_size += obj->base.size;
270                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
271
272         }
273         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
274                 if (count == total)
275                         break;
276
277                 if (obj->stolen == NULL)
278                         continue;
279
280                 objects[count++] = obj;
281                 total_obj_size += obj->base.size;
282         }
283         spin_unlock(&dev_priv->mm.obj_lock);
284
285         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287         seq_puts(m, "Stolen:\n");
288         for (n = 0; n < count; n++) {
289                 seq_puts(m, "   ");
290                 describe_obj(m, objects[n]);
291                 seq_putc(m, '\n');
292         }
293         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
294                    count, total_obj_size, total_gtt_size);
295
296         mutex_unlock(&dev->struct_mutex);
297 out:
298         kvfree(objects);
299         return ret;
300 }
301
302 struct file_stats {
303         struct i915_address_space *vm;
304         unsigned long count;
305         u64 total, unbound;
306         u64 global, shared;
307         u64 active, inactive;
308         u64 closed;
309 };
310
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313         struct drm_i915_gem_object *obj = ptr;
314         struct file_stats *stats = data;
315         struct i915_vma *vma;
316
317         lockdep_assert_held(&obj->base.dev->struct_mutex);
318
319         stats->count++;
320         stats->total += obj->base.size;
321         if (!obj->bind_count)
322                 stats->unbound += obj->base.size;
323         if (obj->base.name || obj->base.dma_buf)
324                 stats->shared += obj->base.size;
325
326         list_for_each_entry(vma, &obj->vma.list, obj_link) {
327                 if (!drm_mm_node_allocated(&vma->node))
328                         continue;
329
330                 if (i915_vma_is_ggtt(vma)) {
331                         stats->global += vma->node.size;
332                 } else {
333                         if (vma->vm != stats->vm)
334                                 continue;
335                 }
336
337                 if (i915_vma_is_active(vma))
338                         stats->active += vma->node.size;
339                 else
340                         stats->inactive += vma->node.size;
341
342                 if (i915_vma_is_closed(vma))
343                         stats->closed += vma->node.size;
344         }
345
346         return 0;
347 }
348
349 #define print_file_stats(m, name, stats) do { \
350         if (stats.count) \
351                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
352                            name, \
353                            stats.count, \
354                            stats.total, \
355                            stats.active, \
356                            stats.inactive, \
357                            stats.global, \
358                            stats.shared, \
359                            stats.unbound, \
360                            stats.closed); \
361 } while (0)
362
363 static void print_batch_pool_stats(struct seq_file *m,
364                                    struct drm_i915_private *dev_priv)
365 {
366         struct drm_i915_gem_object *obj;
367         struct intel_engine_cs *engine;
368         struct file_stats stats = {};
369         enum intel_engine_id id;
370         int j;
371
372         for_each_engine(engine, dev_priv, id) {
373                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
374                         list_for_each_entry(obj,
375                                             &engine->batch_pool.cache_list[j],
376                                             batch_pool_link)
377                                 per_file_stats(0, obj, &stats);
378                 }
379         }
380
381         print_file_stats(m, "[k]batch pool", stats);
382 }
383
384 static void print_context_stats(struct seq_file *m,
385                                 struct drm_i915_private *i915)
386 {
387         struct file_stats kstats = {};
388         struct i915_gem_context *ctx;
389
390         list_for_each_entry(ctx, &i915->contexts.list, link) {
391                 struct intel_context *ce;
392
393                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
394                         if (ce->state)
395                                 per_file_stats(0, ce->state->obj, &kstats);
396                         if (ce->ring)
397                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
398                 }
399
400                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402                         struct drm_file *file = ctx->file_priv->file;
403                         struct task_struct *task;
404                         char name[80];
405
406                         spin_lock(&file->table_lock);
407                         idr_for_each(&file->object_idr, per_file_stats, &stats);
408                         spin_unlock(&file->table_lock);
409
410                         rcu_read_lock();
411                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412                         snprintf(name, sizeof(name), "%s",
413                                  task ? task->comm : "<unknown>");
414                         rcu_read_unlock();
415
416                         print_file_stats(m, name, stats);
417                 }
418         }
419
420         print_file_stats(m, "[k]contexts", kstats);
421 }
422
423 static int i915_gem_object_info(struct seq_file *m, void *data)
424 {
425         struct drm_i915_private *dev_priv = node_to_i915(m->private);
426         struct drm_device *dev = &dev_priv->drm;
427         struct i915_ggtt *ggtt = &dev_priv->ggtt;
428         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
429         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
430         struct drm_i915_gem_object *obj;
431         unsigned int page_sizes = 0;
432         char buf[80];
433         int ret;
434
435         seq_printf(m, "%u objects, %llu bytes\n",
436                    dev_priv->mm.object_count,
437                    dev_priv->mm.object_memory);
438
439         size = count = 0;
440         mapped_size = mapped_count = 0;
441         purgeable_size = purgeable_count = 0;
442         huge_size = huge_count = 0;
443
444         spin_lock(&dev_priv->mm.obj_lock);
445         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
446                 size += obj->base.size;
447                 ++count;
448
449                 if (obj->mm.madv == I915_MADV_DONTNEED) {
450                         purgeable_size += obj->base.size;
451                         ++purgeable_count;
452                 }
453
454                 if (obj->mm.mapping) {
455                         mapped_count++;
456                         mapped_size += obj->base.size;
457                 }
458
459                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
460                         huge_count++;
461                         huge_size += obj->base.size;
462                         page_sizes |= obj->mm.page_sizes.sg;
463                 }
464         }
465         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
466
467         size = count = dpy_size = dpy_count = 0;
468         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
469                 size += obj->base.size;
470                 ++count;
471
472                 if (obj->pin_global) {
473                         dpy_size += obj->base.size;
474                         ++dpy_count;
475                 }
476
477                 if (obj->mm.madv == I915_MADV_DONTNEED) {
478                         purgeable_size += obj->base.size;
479                         ++purgeable_count;
480                 }
481
482                 if (obj->mm.mapping) {
483                         mapped_count++;
484                         mapped_size += obj->base.size;
485                 }
486
487                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
488                         huge_count++;
489                         huge_size += obj->base.size;
490                         page_sizes |= obj->mm.page_sizes.sg;
491                 }
492         }
493         spin_unlock(&dev_priv->mm.obj_lock);
494
495         seq_printf(m, "%u bound objects, %llu bytes\n",
496                    count, size);
497         seq_printf(m, "%u purgeable objects, %llu bytes\n",
498                    purgeable_count, purgeable_size);
499         seq_printf(m, "%u mapped objects, %llu bytes\n",
500                    mapped_count, mapped_size);
501         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
502                    huge_count,
503                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
504                    huge_size);
505         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
506                    dpy_count, dpy_size);
507
508         seq_printf(m, "%llu [%pa] gtt total\n",
509                    ggtt->vm.total, &ggtt->mappable_end);
510         seq_printf(m, "Supported page sizes: %s\n",
511                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
512                                         buf, sizeof(buf)));
513
514         seq_putc(m, '\n');
515
516         ret = mutex_lock_interruptible(&dev->struct_mutex);
517         if (ret)
518                 return ret;
519
520         print_batch_pool_stats(m, dev_priv);
521         print_context_stats(m, dev_priv);
522         mutex_unlock(&dev->struct_mutex);
523
524         return 0;
525 }
526
527 static int i915_gem_gtt_info(struct seq_file *m, void *data)
528 {
529         struct drm_info_node *node = m->private;
530         struct drm_i915_private *dev_priv = node_to_i915(node);
531         struct drm_device *dev = &dev_priv->drm;
532         struct drm_i915_gem_object **objects;
533         struct drm_i915_gem_object *obj;
534         u64 total_obj_size, total_gtt_size;
535         unsigned long nobject, n;
536         int count, ret;
537
538         nobject = READ_ONCE(dev_priv->mm.object_count);
539         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
540         if (!objects)
541                 return -ENOMEM;
542
543         ret = mutex_lock_interruptible(&dev->struct_mutex);
544         if (ret)
545                 return ret;
546
547         count = 0;
548         spin_lock(&dev_priv->mm.obj_lock);
549         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
550                 objects[count++] = obj;
551                 if (count == nobject)
552                         break;
553         }
554         spin_unlock(&dev_priv->mm.obj_lock);
555
556         total_obj_size = total_gtt_size = 0;
557         for (n = 0;  n < count; n++) {
558                 obj = objects[n];
559
560                 seq_puts(m, "   ");
561                 describe_obj(m, obj);
562                 seq_putc(m, '\n');
563                 total_obj_size += obj->base.size;
564                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
565         }
566
567         mutex_unlock(&dev->struct_mutex);
568
569         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
570                    count, total_obj_size, total_gtt_size);
571         kvfree(objects);
572
573         return 0;
574 }
575
576 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
577 {
578         struct drm_i915_private *dev_priv = node_to_i915(m->private);
579         struct drm_device *dev = &dev_priv->drm;
580         struct drm_i915_gem_object *obj;
581         struct intel_engine_cs *engine;
582         enum intel_engine_id id;
583         int total = 0;
584         int ret, j;
585
586         ret = mutex_lock_interruptible(&dev->struct_mutex);
587         if (ret)
588                 return ret;
589
590         for_each_engine(engine, dev_priv, id) {
591                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
592                         int count;
593
594                         count = 0;
595                         list_for_each_entry(obj,
596                                             &engine->batch_pool.cache_list[j],
597                                             batch_pool_link)
598                                 count++;
599                         seq_printf(m, "%s cache[%d]: %d objects\n",
600                                    engine->name, j, count);
601
602                         list_for_each_entry(obj,
603                                             &engine->batch_pool.cache_list[j],
604                                             batch_pool_link) {
605                                 seq_puts(m, "   ");
606                                 describe_obj(m, obj);
607                                 seq_putc(m, '\n');
608                         }
609
610                         total += count;
611                 }
612         }
613
614         seq_printf(m, "total: %d\n", total);
615
616         mutex_unlock(&dev->struct_mutex);
617
618         return 0;
619 }
620
621 static void gen8_display_interrupt_info(struct seq_file *m)
622 {
623         struct drm_i915_private *dev_priv = node_to_i915(m->private);
624         int pipe;
625
626         for_each_pipe(dev_priv, pipe) {
627                 enum intel_display_power_domain power_domain;
628                 intel_wakeref_t wakeref;
629
630                 power_domain = POWER_DOMAIN_PIPE(pipe);
631                 wakeref = intel_display_power_get_if_enabled(dev_priv,
632                                                              power_domain);
633                 if (!wakeref) {
634                         seq_printf(m, "Pipe %c power disabled\n",
635                                    pipe_name(pipe));
636                         continue;
637                 }
638                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
639                            pipe_name(pipe),
640                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
641                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
642                            pipe_name(pipe),
643                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
644                 seq_printf(m, "Pipe %c IER:\t%08x\n",
645                            pipe_name(pipe),
646                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
647
648                 intel_display_power_put(dev_priv, power_domain, wakeref);
649         }
650
651         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
652                    I915_READ(GEN8_DE_PORT_IMR));
653         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
654                    I915_READ(GEN8_DE_PORT_IIR));
655         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
656                    I915_READ(GEN8_DE_PORT_IER));
657
658         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
659                    I915_READ(GEN8_DE_MISC_IMR));
660         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
661                    I915_READ(GEN8_DE_MISC_IIR));
662         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
663                    I915_READ(GEN8_DE_MISC_IER));
664
665         seq_printf(m, "PCU interrupt mask:\t%08x\n",
666                    I915_READ(GEN8_PCU_IMR));
667         seq_printf(m, "PCU interrupt identity:\t%08x\n",
668                    I915_READ(GEN8_PCU_IIR));
669         seq_printf(m, "PCU interrupt enable:\t%08x\n",
670                    I915_READ(GEN8_PCU_IER));
671 }
672
673 static int i915_interrupt_info(struct seq_file *m, void *data)
674 {
675         struct drm_i915_private *dev_priv = node_to_i915(m->private);
676         struct intel_engine_cs *engine;
677         enum intel_engine_id id;
678         intel_wakeref_t wakeref;
679         int i, pipe;
680
681         wakeref = intel_runtime_pm_get(dev_priv);
682
683         if (IS_CHERRYVIEW(dev_priv)) {
684                 intel_wakeref_t pref;
685
686                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
687                            I915_READ(GEN8_MASTER_IRQ));
688
689                 seq_printf(m, "Display IER:\t%08x\n",
690                            I915_READ(VLV_IER));
691                 seq_printf(m, "Display IIR:\t%08x\n",
692                            I915_READ(VLV_IIR));
693                 seq_printf(m, "Display IIR_RW:\t%08x\n",
694                            I915_READ(VLV_IIR_RW));
695                 seq_printf(m, "Display IMR:\t%08x\n",
696                            I915_READ(VLV_IMR));
697                 for_each_pipe(dev_priv, pipe) {
698                         enum intel_display_power_domain power_domain;
699
700                         power_domain = POWER_DOMAIN_PIPE(pipe);
701                         pref = intel_display_power_get_if_enabled(dev_priv,
702                                                                   power_domain);
703                         if (!pref) {
704                                 seq_printf(m, "Pipe %c power disabled\n",
705                                            pipe_name(pipe));
706                                 continue;
707                         }
708
709                         seq_printf(m, "Pipe %c stat:\t%08x\n",
710                                    pipe_name(pipe),
711                                    I915_READ(PIPESTAT(pipe)));
712
713                         intel_display_power_put(dev_priv, power_domain, pref);
714                 }
715
716                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
717                 seq_printf(m, "Port hotplug:\t%08x\n",
718                            I915_READ(PORT_HOTPLUG_EN));
719                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
720                            I915_READ(VLV_DPFLIPSTAT));
721                 seq_printf(m, "DPINVGTT:\t%08x\n",
722                            I915_READ(DPINVGTT));
723                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
724
725                 for (i = 0; i < 4; i++) {
726                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
727                                    i, I915_READ(GEN8_GT_IMR(i)));
728                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
729                                    i, I915_READ(GEN8_GT_IIR(i)));
730                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
731                                    i, I915_READ(GEN8_GT_IER(i)));
732                 }
733
734                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
735                            I915_READ(GEN8_PCU_IMR));
736                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
737                            I915_READ(GEN8_PCU_IIR));
738                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
739                            I915_READ(GEN8_PCU_IER));
740         } else if (INTEL_GEN(dev_priv) >= 11) {
741                 seq_printf(m, "Master Interrupt Control:  %08x\n",
742                            I915_READ(GEN11_GFX_MSTR_IRQ));
743
744                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
745                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
746                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
747                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
748                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
749                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
750                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
751                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
752                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
753                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
754                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
755                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
756
757                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
758                            I915_READ(GEN11_DISPLAY_INT_CTL));
759
760                 gen8_display_interrupt_info(m);
761         } else if (INTEL_GEN(dev_priv) >= 8) {
762                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
763                            I915_READ(GEN8_MASTER_IRQ));
764
765                 for (i = 0; i < 4; i++) {
766                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
767                                    i, I915_READ(GEN8_GT_IMR(i)));
768                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
769                                    i, I915_READ(GEN8_GT_IIR(i)));
770                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
771                                    i, I915_READ(GEN8_GT_IER(i)));
772                 }
773
774                 gen8_display_interrupt_info(m);
775         } else if (IS_VALLEYVIEW(dev_priv)) {
776                 seq_printf(m, "Display IER:\t%08x\n",
777                            I915_READ(VLV_IER));
778                 seq_printf(m, "Display IIR:\t%08x\n",
779                            I915_READ(VLV_IIR));
780                 seq_printf(m, "Display IIR_RW:\t%08x\n",
781                            I915_READ(VLV_IIR_RW));
782                 seq_printf(m, "Display IMR:\t%08x\n",
783                            I915_READ(VLV_IMR));
784                 for_each_pipe(dev_priv, pipe) {
785                         enum intel_display_power_domain power_domain;
786                         intel_wakeref_t pref;
787
788                         power_domain = POWER_DOMAIN_PIPE(pipe);
789                         pref = intel_display_power_get_if_enabled(dev_priv,
790                                                                   power_domain);
791                         if (!pref) {
792                                 seq_printf(m, "Pipe %c power disabled\n",
793                                            pipe_name(pipe));
794                                 continue;
795                         }
796
797                         seq_printf(m, "Pipe %c stat:\t%08x\n",
798                                    pipe_name(pipe),
799                                    I915_READ(PIPESTAT(pipe)));
800                         intel_display_power_put(dev_priv, power_domain, pref);
801                 }
802
803                 seq_printf(m, "Master IER:\t%08x\n",
804                            I915_READ(VLV_MASTER_IER));
805
806                 seq_printf(m, "Render IER:\t%08x\n",
807                            I915_READ(GTIER));
808                 seq_printf(m, "Render IIR:\t%08x\n",
809                            I915_READ(GTIIR));
810                 seq_printf(m, "Render IMR:\t%08x\n",
811                            I915_READ(GTIMR));
812
813                 seq_printf(m, "PM IER:\t\t%08x\n",
814                            I915_READ(GEN6_PMIER));
815                 seq_printf(m, "PM IIR:\t\t%08x\n",
816                            I915_READ(GEN6_PMIIR));
817                 seq_printf(m, "PM IMR:\t\t%08x\n",
818                            I915_READ(GEN6_PMIMR));
819
820                 seq_printf(m, "Port hotplug:\t%08x\n",
821                            I915_READ(PORT_HOTPLUG_EN));
822                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
823                            I915_READ(VLV_DPFLIPSTAT));
824                 seq_printf(m, "DPINVGTT:\t%08x\n",
825                            I915_READ(DPINVGTT));
826
827         } else if (!HAS_PCH_SPLIT(dev_priv)) {
828                 seq_printf(m, "Interrupt enable:    %08x\n",
829                            I915_READ(IER));
830                 seq_printf(m, "Interrupt identity:  %08x\n",
831                            I915_READ(IIR));
832                 seq_printf(m, "Interrupt mask:      %08x\n",
833                            I915_READ(IMR));
834                 for_each_pipe(dev_priv, pipe)
835                         seq_printf(m, "Pipe %c stat:         %08x\n",
836                                    pipe_name(pipe),
837                                    I915_READ(PIPESTAT(pipe)));
838         } else {
839                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
840                            I915_READ(DEIER));
841                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
842                            I915_READ(DEIIR));
843                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
844                            I915_READ(DEIMR));
845                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
846                            I915_READ(SDEIER));
847                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
848                            I915_READ(SDEIIR));
849                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
850                            I915_READ(SDEIMR));
851                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
852                            I915_READ(GTIER));
853                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
854                            I915_READ(GTIIR));
855                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
856                            I915_READ(GTIMR));
857         }
858
859         if (INTEL_GEN(dev_priv) >= 11) {
860                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
861                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
862                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
863                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
864                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
865                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
866                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
867                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
868                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
869                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
870                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
871                            I915_READ(GEN11_GUC_SG_INTR_MASK));
872                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
873                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
874                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
875                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
876                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
877                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
878
879         } else if (INTEL_GEN(dev_priv) >= 6) {
880                 for_each_engine(engine, dev_priv, id) {
881                         seq_printf(m,
882                                    "Graphics Interrupt mask (%s):       %08x\n",
883                                    engine->name, ENGINE_READ(engine, RING_IMR));
884                 }
885         }
886
887         intel_runtime_pm_put(dev_priv, wakeref);
888
889         return 0;
890 }
891
892 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
893 {
894         struct drm_i915_private *dev_priv = node_to_i915(m->private);
895         struct drm_device *dev = &dev_priv->drm;
896         int i, ret;
897
898         ret = mutex_lock_interruptible(&dev->struct_mutex);
899         if (ret)
900                 return ret;
901
902         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
903         for (i = 0; i < dev_priv->num_fence_regs; i++) {
904                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
905
906                 seq_printf(m, "Fence %d, pin count = %d, object = ",
907                            i, dev_priv->fence_regs[i].pin_count);
908                 if (!vma)
909                         seq_puts(m, "unused");
910                 else
911                         describe_obj(m, vma->obj);
912                 seq_putc(m, '\n');
913         }
914
915         mutex_unlock(&dev->struct_mutex);
916         return 0;
917 }
918
919 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
920 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
921                               size_t count, loff_t *pos)
922 {
923         struct i915_gpu_state *error;
924         ssize_t ret;
925         void *buf;
926
927         error = file->private_data;
928         if (!error)
929                 return 0;
930
931         /* Bounce buffer required because of kernfs __user API convenience. */
932         buf = kmalloc(count, GFP_KERNEL);
933         if (!buf)
934                 return -ENOMEM;
935
936         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
937         if (ret <= 0)
938                 goto out;
939
940         if (!copy_to_user(ubuf, buf, ret))
941                 *pos += ret;
942         else
943                 ret = -EFAULT;
944
945 out:
946         kfree(buf);
947         return ret;
948 }
949
950 static int gpu_state_release(struct inode *inode, struct file *file)
951 {
952         i915_gpu_state_put(file->private_data);
953         return 0;
954 }
955
956 static int i915_gpu_info_open(struct inode *inode, struct file *file)
957 {
958         struct drm_i915_private *i915 = inode->i_private;
959         struct i915_gpu_state *gpu;
960         intel_wakeref_t wakeref;
961
962         gpu = NULL;
963         with_intel_runtime_pm(i915, wakeref)
964                 gpu = i915_capture_gpu_state(i915);
965         if (IS_ERR(gpu))
966                 return PTR_ERR(gpu);
967
968         file->private_data = gpu;
969         return 0;
970 }
971
972 static const struct file_operations i915_gpu_info_fops = {
973         .owner = THIS_MODULE,
974         .open = i915_gpu_info_open,
975         .read = gpu_state_read,
976         .llseek = default_llseek,
977         .release = gpu_state_release,
978 };
979
980 static ssize_t
981 i915_error_state_write(struct file *filp,
982                        const char __user *ubuf,
983                        size_t cnt,
984                        loff_t *ppos)
985 {
986         struct i915_gpu_state *error = filp->private_data;
987
988         if (!error)
989                 return 0;
990
991         DRM_DEBUG_DRIVER("Resetting error state\n");
992         i915_reset_error_state(error->i915);
993
994         return cnt;
995 }
996
997 static int i915_error_state_open(struct inode *inode, struct file *file)
998 {
999         struct i915_gpu_state *error;
1000
1001         error = i915_first_error_state(inode->i_private);
1002         if (IS_ERR(error))
1003                 return PTR_ERR(error);
1004
1005         file->private_data  = error;
1006         return 0;
1007 }
1008
1009 static const struct file_operations i915_error_state_fops = {
1010         .owner = THIS_MODULE,
1011         .open = i915_error_state_open,
1012         .read = gpu_state_read,
1013         .write = i915_error_state_write,
1014         .llseek = default_llseek,
1015         .release = gpu_state_release,
1016 };
1017 #endif
1018
1019 static int i915_frequency_info(struct seq_file *m, void *unused)
1020 {
1021         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1022         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1023         intel_wakeref_t wakeref;
1024         int ret = 0;
1025
1026         wakeref = intel_runtime_pm_get(dev_priv);
1027
1028         if (IS_GEN(dev_priv, 5)) {
1029                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1030                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1031
1032                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1033                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1034                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1035                            MEMSTAT_VID_SHIFT);
1036                 seq_printf(m, "Current P-state: %d\n",
1037                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1038         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1039                 u32 rpmodectl, freq_sts;
1040
1041                 mutex_lock(&dev_priv->pcu_lock);
1042
1043                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1044                 seq_printf(m, "Video Turbo Mode: %s\n",
1045                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1046                 seq_printf(m, "HW control enabled: %s\n",
1047                            yesno(rpmodectl & GEN6_RP_ENABLE));
1048                 seq_printf(m, "SW control enabled: %s\n",
1049                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1050                                   GEN6_RP_MEDIA_SW_MODE));
1051
1052                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1053                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1054                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1055
1056                 seq_printf(m, "actual GPU freq: %d MHz\n",
1057                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1058
1059                 seq_printf(m, "current GPU freq: %d MHz\n",
1060                            intel_gpu_freq(dev_priv, rps->cur_freq));
1061
1062                 seq_printf(m, "max GPU freq: %d MHz\n",
1063                            intel_gpu_freq(dev_priv, rps->max_freq));
1064
1065                 seq_printf(m, "min GPU freq: %d MHz\n",
1066                            intel_gpu_freq(dev_priv, rps->min_freq));
1067
1068                 seq_printf(m, "idle GPU freq: %d MHz\n",
1069                            intel_gpu_freq(dev_priv, rps->idle_freq));
1070
1071                 seq_printf(m,
1072                            "efficient (RPe) frequency: %d MHz\n",
1073                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1074                 mutex_unlock(&dev_priv->pcu_lock);
1075         } else if (INTEL_GEN(dev_priv) >= 6) {
1076                 u32 rp_state_limits;
1077                 u32 gt_perf_status;
1078                 u32 rp_state_cap;
1079                 u32 rpmodectl, rpinclimit, rpdeclimit;
1080                 u32 rpstat, cagf, reqf;
1081                 u32 rpupei, rpcurup, rpprevup;
1082                 u32 rpdownei, rpcurdown, rpprevdown;
1083                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1084                 int max_freq;
1085
1086                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1087                 if (IS_GEN9_LP(dev_priv)) {
1088                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1089                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1090                 } else {
1091                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1092                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1093                 }
1094
1095                 /* RPSTAT1 is in the GT power well */
1096                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1097
1098                 reqf = I915_READ(GEN6_RPNSWREQ);
1099                 if (INTEL_GEN(dev_priv) >= 9)
1100                         reqf >>= 23;
1101                 else {
1102                         reqf &= ~GEN6_TURBO_DISABLE;
1103                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1104                                 reqf >>= 24;
1105                         else
1106                                 reqf >>= 25;
1107                 }
1108                 reqf = intel_gpu_freq(dev_priv, reqf);
1109
1110                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1111                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1112                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1113
1114                 rpstat = I915_READ(GEN6_RPSTAT1);
1115                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1116                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1117                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1118                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1119                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1120                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1121                 cagf = intel_gpu_freq(dev_priv,
1122                                       intel_get_cagf(dev_priv, rpstat));
1123
1124                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1125
1126                 if (INTEL_GEN(dev_priv) >= 11) {
1127                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1128                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1129                         /*
1130                          * The equivalent to the PM ISR & IIR cannot be read
1131                          * without affecting the current state of the system
1132                          */
1133                         pm_isr = 0;
1134                         pm_iir = 0;
1135                 } else if (INTEL_GEN(dev_priv) >= 8) {
1136                         pm_ier = I915_READ(GEN8_GT_IER(2));
1137                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1138                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1139                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1140                 } else {
1141                         pm_ier = I915_READ(GEN6_PMIER);
1142                         pm_imr = I915_READ(GEN6_PMIMR);
1143                         pm_isr = I915_READ(GEN6_PMISR);
1144                         pm_iir = I915_READ(GEN6_PMIIR);
1145                 }
1146                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1147
1148                 seq_printf(m, "Video Turbo Mode: %s\n",
1149                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1150                 seq_printf(m, "HW control enabled: %s\n",
1151                            yesno(rpmodectl & GEN6_RP_ENABLE));
1152                 seq_printf(m, "SW control enabled: %s\n",
1153                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1154                                   GEN6_RP_MEDIA_SW_MODE));
1155
1156                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1157                            pm_ier, pm_imr, pm_mask);
1158                 if (INTEL_GEN(dev_priv) <= 10)
1159                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1160                                    pm_isr, pm_iir);
1161                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1162                            rps->pm_intrmsk_mbz);
1163                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1164                 seq_printf(m, "Render p-state ratio: %d\n",
1165                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1166                 seq_printf(m, "Render p-state VID: %d\n",
1167                            gt_perf_status & 0xff);
1168                 seq_printf(m, "Render p-state limit: %d\n",
1169                            rp_state_limits & 0xff);
1170                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1171                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1172                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1173                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1174                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1175                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1176                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1177                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1178                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1179                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1180                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1181                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1182                 seq_printf(m, "Up threshold: %d%%\n",
1183                            rps->power.up_threshold);
1184
1185                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1186                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1187                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1188                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1189                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1190                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1191                 seq_printf(m, "Down threshold: %d%%\n",
1192                            rps->power.down_threshold);
1193
1194                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1195                             rp_state_cap >> 16) & 0xff;
1196                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1197                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1198                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1199                            intel_gpu_freq(dev_priv, max_freq));
1200
1201                 max_freq = (rp_state_cap & 0xff00) >> 8;
1202                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1203                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1204                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1205                            intel_gpu_freq(dev_priv, max_freq));
1206
1207                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1208                             rp_state_cap >> 0) & 0xff;
1209                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1210                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1211                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1212                            intel_gpu_freq(dev_priv, max_freq));
1213                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1214                            intel_gpu_freq(dev_priv, rps->max_freq));
1215
1216                 seq_printf(m, "Current freq: %d MHz\n",
1217                            intel_gpu_freq(dev_priv, rps->cur_freq));
1218                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1219                 seq_printf(m, "Idle freq: %d MHz\n",
1220                            intel_gpu_freq(dev_priv, rps->idle_freq));
1221                 seq_printf(m, "Min freq: %d MHz\n",
1222                            intel_gpu_freq(dev_priv, rps->min_freq));
1223                 seq_printf(m, "Boost freq: %d MHz\n",
1224                            intel_gpu_freq(dev_priv, rps->boost_freq));
1225                 seq_printf(m, "Max freq: %d MHz\n",
1226                            intel_gpu_freq(dev_priv, rps->max_freq));
1227                 seq_printf(m,
1228                            "efficient (RPe) frequency: %d MHz\n",
1229                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1230         } else {
1231                 seq_puts(m, "no P-state info available\n");
1232         }
1233
1234         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1235         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1236         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1237
1238         intel_runtime_pm_put(dev_priv, wakeref);
1239         return ret;
1240 }
1241
1242 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1243                                struct seq_file *m,
1244                                struct intel_instdone *instdone)
1245 {
1246         int slice;
1247         int subslice;
1248
1249         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1250                    instdone->instdone);
1251
1252         if (INTEL_GEN(dev_priv) <= 3)
1253                 return;
1254
1255         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1256                    instdone->slice_common);
1257
1258         if (INTEL_GEN(dev_priv) <= 6)
1259                 return;
1260
1261         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1262                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1263                            slice, subslice, instdone->sampler[slice][subslice]);
1264
1265         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1267                            slice, subslice, instdone->row[slice][subslice]);
1268 }
1269
1270 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1271 {
1272         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1273         struct intel_engine_cs *engine;
1274         u64 acthd[I915_NUM_ENGINES];
1275         u32 seqno[I915_NUM_ENGINES];
1276         struct intel_instdone instdone;
1277         intel_wakeref_t wakeref;
1278         enum intel_engine_id id;
1279
1280         seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1281         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1282                 seq_puts(m, "\tWedged\n");
1283         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1284                 seq_puts(m, "\tDevice (global) reset in progress\n");
1285
1286         if (!i915_modparams.enable_hangcheck) {
1287                 seq_puts(m, "Hangcheck disabled\n");
1288                 return 0;
1289         }
1290
1291         with_intel_runtime_pm(dev_priv, wakeref) {
1292                 for_each_engine(engine, dev_priv, id) {
1293                         acthd[id] = intel_engine_get_active_head(engine);
1294                         seqno[id] = intel_engine_get_hangcheck_seqno(engine);
1295                 }
1296
1297                 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1298         }
1299
1300         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1301                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1302                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1303                                             jiffies));
1304         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1305                 seq_puts(m, "Hangcheck active, work pending\n");
1306         else
1307                 seq_puts(m, "Hangcheck inactive\n");
1308
1309         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1310
1311         for_each_engine(engine, dev_priv, id) {
1312                 seq_printf(m, "%s:\n", engine->name);
1313                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1314                            engine->hangcheck.last_seqno,
1315                            seqno[id],
1316                            engine->hangcheck.next_seqno,
1317                            jiffies_to_msecs(jiffies -
1318                                             engine->hangcheck.action_timestamp));
1319
1320                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1321                            (long long)engine->hangcheck.acthd,
1322                            (long long)acthd[id]);
1323
1324                 if (engine->id == RCS0) {
1325                         seq_puts(m, "\tinstdone read =\n");
1326
1327                         i915_instdone_info(dev_priv, m, &instdone);
1328
1329                         seq_puts(m, "\tinstdone accu =\n");
1330
1331                         i915_instdone_info(dev_priv, m,
1332                                            &engine->hangcheck.instdone);
1333                 }
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int i915_reset_info(struct seq_file *m, void *unused)
1340 {
1341         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1342         struct i915_gpu_error *error = &dev_priv->gpu_error;
1343         struct intel_engine_cs *engine;
1344         enum intel_engine_id id;
1345
1346         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1347
1348         for_each_engine(engine, dev_priv, id) {
1349                 seq_printf(m, "%s = %u\n", engine->name,
1350                            i915_reset_engine_count(error, engine));
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int ironlake_drpc_info(struct seq_file *m)
1357 {
1358         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1359         u32 rgvmodectl, rstdbyctl;
1360         u16 crstandvid;
1361
1362         rgvmodectl = I915_READ(MEMMODECTL);
1363         rstdbyctl = I915_READ(RSTDBYCTL);
1364         crstandvid = I915_READ16(CRSTANDVID);
1365
1366         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1367         seq_printf(m, "Boost freq: %d\n",
1368                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1369                    MEMMODE_BOOST_FREQ_SHIFT);
1370         seq_printf(m, "HW control enabled: %s\n",
1371                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1372         seq_printf(m, "SW control enabled: %s\n",
1373                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1374         seq_printf(m, "Gated voltage change: %s\n",
1375                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1376         seq_printf(m, "Starting frequency: P%d\n",
1377                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1378         seq_printf(m, "Max P-state: P%d\n",
1379                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1380         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1381         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1382         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1383         seq_printf(m, "Render standby enabled: %s\n",
1384                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1385         seq_puts(m, "Current RS state: ");
1386         switch (rstdbyctl & RSX_STATUS_MASK) {
1387         case RSX_STATUS_ON:
1388                 seq_puts(m, "on\n");
1389                 break;
1390         case RSX_STATUS_RC1:
1391                 seq_puts(m, "RC1\n");
1392                 break;
1393         case RSX_STATUS_RC1E:
1394                 seq_puts(m, "RC1E\n");
1395                 break;
1396         case RSX_STATUS_RS1:
1397                 seq_puts(m, "RS1\n");
1398                 break;
1399         case RSX_STATUS_RS2:
1400                 seq_puts(m, "RS2 (RC6)\n");
1401                 break;
1402         case RSX_STATUS_RS3:
1403                 seq_puts(m, "RC3 (RC6+)\n");
1404                 break;
1405         default:
1406                 seq_puts(m, "unknown\n");
1407                 break;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static int i915_forcewake_domains(struct seq_file *m, void *data)
1414 {
1415         struct drm_i915_private *i915 = node_to_i915(m->private);
1416         struct intel_uncore *uncore = &i915->uncore;
1417         struct intel_uncore_forcewake_domain *fw_domain;
1418         unsigned int tmp;
1419
1420         seq_printf(m, "user.bypass_count = %u\n",
1421                    uncore->user_forcewake.count);
1422
1423         for_each_fw_domain(fw_domain, uncore, tmp)
1424                 seq_printf(m, "%s.wake_count = %u\n",
1425                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1426                            READ_ONCE(fw_domain->wake_count));
1427
1428         return 0;
1429 }
1430
1431 static void print_rc6_res(struct seq_file *m,
1432                           const char *title,
1433                           const i915_reg_t reg)
1434 {
1435         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1436
1437         seq_printf(m, "%s %u (%llu us)\n",
1438                    title, I915_READ(reg),
1439                    intel_rc6_residency_us(dev_priv, reg));
1440 }
1441
1442 static int vlv_drpc_info(struct seq_file *m)
1443 {
1444         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1445         u32 rcctl1, pw_status;
1446
1447         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1448         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1449
1450         seq_printf(m, "RC6 Enabled: %s\n",
1451                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1452                                         GEN6_RC_CTL_EI_MODE(1))));
1453         seq_printf(m, "Render Power Well: %s\n",
1454                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1455         seq_printf(m, "Media Power Well: %s\n",
1456                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1457
1458         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1459         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1460
1461         return i915_forcewake_domains(m, NULL);
1462 }
1463
1464 static int gen6_drpc_info(struct seq_file *m)
1465 {
1466         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467         u32 gt_core_status, rcctl1, rc6vids = 0;
1468         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1469
1470         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1471         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1472
1473         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1474         if (INTEL_GEN(dev_priv) >= 9) {
1475                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1476                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1477         }
1478
1479         if (INTEL_GEN(dev_priv) <= 7) {
1480                 mutex_lock(&dev_priv->pcu_lock);
1481                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1482                                        &rc6vids);
1483                 mutex_unlock(&dev_priv->pcu_lock);
1484         }
1485
1486         seq_printf(m, "RC1e Enabled: %s\n",
1487                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1488         seq_printf(m, "RC6 Enabled: %s\n",
1489                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1490         if (INTEL_GEN(dev_priv) >= 9) {
1491                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1492                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1493                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1494                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1495         }
1496         seq_printf(m, "Deep RC6 Enabled: %s\n",
1497                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1498         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1499                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1500         seq_puts(m, "Current RC state: ");
1501         switch (gt_core_status & GEN6_RCn_MASK) {
1502         case GEN6_RC0:
1503                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1504                         seq_puts(m, "Core Power Down\n");
1505                 else
1506                         seq_puts(m, "on\n");
1507                 break;
1508         case GEN6_RC3:
1509                 seq_puts(m, "RC3\n");
1510                 break;
1511         case GEN6_RC6:
1512                 seq_puts(m, "RC6\n");
1513                 break;
1514         case GEN6_RC7:
1515                 seq_puts(m, "RC7\n");
1516                 break;
1517         default:
1518                 seq_puts(m, "Unknown\n");
1519                 break;
1520         }
1521
1522         seq_printf(m, "Core Power Down: %s\n",
1523                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1524         if (INTEL_GEN(dev_priv) >= 9) {
1525                 seq_printf(m, "Render Power Well: %s\n",
1526                         (gen9_powergate_status &
1527                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1528                 seq_printf(m, "Media Power Well: %s\n",
1529                         (gen9_powergate_status &
1530                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1531         }
1532
1533         /* Not exactly sure what this is */
1534         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1535                       GEN6_GT_GFX_RC6_LOCKED);
1536         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1537         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1538         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1539
1540         if (INTEL_GEN(dev_priv) <= 7) {
1541                 seq_printf(m, "RC6   voltage: %dmV\n",
1542                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1543                 seq_printf(m, "RC6+  voltage: %dmV\n",
1544                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1545                 seq_printf(m, "RC6++ voltage: %dmV\n",
1546                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1547         }
1548
1549         return i915_forcewake_domains(m, NULL);
1550 }
1551
1552 static int i915_drpc_info(struct seq_file *m, void *unused)
1553 {
1554         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1555         intel_wakeref_t wakeref;
1556         int err = -ENODEV;
1557
1558         with_intel_runtime_pm(dev_priv, wakeref) {
1559                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1560                         err = vlv_drpc_info(m);
1561                 else if (INTEL_GEN(dev_priv) >= 6)
1562                         err = gen6_drpc_info(m);
1563                 else
1564                         err = ironlake_drpc_info(m);
1565         }
1566
1567         return err;
1568 }
1569
1570 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1571 {
1572         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1573
1574         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1575                    dev_priv->fb_tracking.busy_bits);
1576
1577         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1578                    dev_priv->fb_tracking.flip_bits);
1579
1580         return 0;
1581 }
1582
1583 static int i915_fbc_status(struct seq_file *m, void *unused)
1584 {
1585         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1586         struct intel_fbc *fbc = &dev_priv->fbc;
1587         intel_wakeref_t wakeref;
1588
1589         if (!HAS_FBC(dev_priv))
1590                 return -ENODEV;
1591
1592         wakeref = intel_runtime_pm_get(dev_priv);
1593         mutex_lock(&fbc->lock);
1594
1595         if (intel_fbc_is_active(dev_priv))
1596                 seq_puts(m, "FBC enabled\n");
1597         else
1598                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1599
1600         if (intel_fbc_is_active(dev_priv)) {
1601                 u32 mask;
1602
1603                 if (INTEL_GEN(dev_priv) >= 8)
1604                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1605                 else if (INTEL_GEN(dev_priv) >= 7)
1606                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1607                 else if (INTEL_GEN(dev_priv) >= 5)
1608                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1609                 else if (IS_G4X(dev_priv))
1610                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1611                 else
1612                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1613                                                         FBC_STAT_COMPRESSED);
1614
1615                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1616         }
1617
1618         mutex_unlock(&fbc->lock);
1619         intel_runtime_pm_put(dev_priv, wakeref);
1620
1621         return 0;
1622 }
1623
1624 static int i915_fbc_false_color_get(void *data, u64 *val)
1625 {
1626         struct drm_i915_private *dev_priv = data;
1627
1628         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1629                 return -ENODEV;
1630
1631         *val = dev_priv->fbc.false_color;
1632
1633         return 0;
1634 }
1635
1636 static int i915_fbc_false_color_set(void *data, u64 val)
1637 {
1638         struct drm_i915_private *dev_priv = data;
1639         u32 reg;
1640
1641         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1642                 return -ENODEV;
1643
1644         mutex_lock(&dev_priv->fbc.lock);
1645
1646         reg = I915_READ(ILK_DPFC_CONTROL);
1647         dev_priv->fbc.false_color = val;
1648
1649         I915_WRITE(ILK_DPFC_CONTROL, val ?
1650                    (reg | FBC_CTL_FALSE_COLOR) :
1651                    (reg & ~FBC_CTL_FALSE_COLOR));
1652
1653         mutex_unlock(&dev_priv->fbc.lock);
1654         return 0;
1655 }
1656
1657 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1658                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1659                         "%llu\n");
1660
1661 static int i915_ips_status(struct seq_file *m, void *unused)
1662 {
1663         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1664         intel_wakeref_t wakeref;
1665
1666         if (!HAS_IPS(dev_priv))
1667                 return -ENODEV;
1668
1669         wakeref = intel_runtime_pm_get(dev_priv);
1670
1671         seq_printf(m, "Enabled by kernel parameter: %s\n",
1672                    yesno(i915_modparams.enable_ips));
1673
1674         if (INTEL_GEN(dev_priv) >= 8) {
1675                 seq_puts(m, "Currently: unknown\n");
1676         } else {
1677                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1678                         seq_puts(m, "Currently: enabled\n");
1679                 else
1680                         seq_puts(m, "Currently: disabled\n");
1681         }
1682
1683         intel_runtime_pm_put(dev_priv, wakeref);
1684
1685         return 0;
1686 }
1687
1688 static int i915_sr_status(struct seq_file *m, void *unused)
1689 {
1690         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1691         intel_wakeref_t wakeref;
1692         bool sr_enabled = false;
1693
1694         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1695
1696         if (INTEL_GEN(dev_priv) >= 9)
1697                 /* no global SR status; inspect per-plane WM */;
1698         else if (HAS_PCH_SPLIT(dev_priv))
1699                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1700         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1701                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1702                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1703         else if (IS_I915GM(dev_priv))
1704                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1705         else if (IS_PINEVIEW(dev_priv))
1706                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1707         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1708                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1709
1710         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1711
1712         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1713
1714         return 0;
1715 }
1716
1717 static int i915_emon_status(struct seq_file *m, void *unused)
1718 {
1719         struct drm_i915_private *i915 = node_to_i915(m->private);
1720         intel_wakeref_t wakeref;
1721
1722         if (!IS_GEN(i915, 5))
1723                 return -ENODEV;
1724
1725         with_intel_runtime_pm(i915, wakeref) {
1726                 unsigned long temp, chipset, gfx;
1727
1728                 temp = i915_mch_val(i915);
1729                 chipset = i915_chipset_val(i915);
1730                 gfx = i915_gfx_val(i915);
1731
1732                 seq_printf(m, "GMCH temp: %ld\n", temp);
1733                 seq_printf(m, "Chipset power: %ld\n", chipset);
1734                 seq_printf(m, "GFX power: %ld\n", gfx);
1735                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1736         }
1737
1738         return 0;
1739 }
1740
1741 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1742 {
1743         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1744         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1745         unsigned int max_gpu_freq, min_gpu_freq;
1746         intel_wakeref_t wakeref;
1747         int gpu_freq, ia_freq;
1748         int ret;
1749
1750         if (!HAS_LLC(dev_priv))
1751                 return -ENODEV;
1752
1753         wakeref = intel_runtime_pm_get(dev_priv);
1754
1755         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1756         if (ret)
1757                 goto out;
1758
1759         min_gpu_freq = rps->min_freq;
1760         max_gpu_freq = rps->max_freq;
1761         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1762                 /* Convert GT frequency to 50 HZ units */
1763                 min_gpu_freq /= GEN9_FREQ_SCALER;
1764                 max_gpu_freq /= GEN9_FREQ_SCALER;
1765         }
1766
1767         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1768
1769         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1770                 ia_freq = gpu_freq;
1771                 sandybridge_pcode_read(dev_priv,
1772                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1773                                        &ia_freq);
1774                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1775                            intel_gpu_freq(dev_priv, (gpu_freq *
1776                                                      (IS_GEN9_BC(dev_priv) ||
1777                                                       INTEL_GEN(dev_priv) >= 10 ?
1778                                                       GEN9_FREQ_SCALER : 1))),
1779                            ((ia_freq >> 0) & 0xff) * 100,
1780                            ((ia_freq >> 8) & 0xff) * 100);
1781         }
1782
1783         mutex_unlock(&dev_priv->pcu_lock);
1784
1785 out:
1786         intel_runtime_pm_put(dev_priv, wakeref);
1787         return ret;
1788 }
1789
1790 static int i915_opregion(struct seq_file *m, void *unused)
1791 {
1792         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1793         struct drm_device *dev = &dev_priv->drm;
1794         struct intel_opregion *opregion = &dev_priv->opregion;
1795         int ret;
1796
1797         ret = mutex_lock_interruptible(&dev->struct_mutex);
1798         if (ret)
1799                 goto out;
1800
1801         if (opregion->header)
1802                 seq_write(m, opregion->header, OPREGION_SIZE);
1803
1804         mutex_unlock(&dev->struct_mutex);
1805
1806 out:
1807         return 0;
1808 }
1809
1810 static int i915_vbt(struct seq_file *m, void *unused)
1811 {
1812         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1813
1814         if (opregion->vbt)
1815                 seq_write(m, opregion->vbt, opregion->vbt_size);
1816
1817         return 0;
1818 }
1819
1820 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1821 {
1822         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1823         struct drm_device *dev = &dev_priv->drm;
1824         struct intel_framebuffer *fbdev_fb = NULL;
1825         struct drm_framebuffer *drm_fb;
1826         int ret;
1827
1828         ret = mutex_lock_interruptible(&dev->struct_mutex);
1829         if (ret)
1830                 return ret;
1831
1832 #ifdef CONFIG_DRM_FBDEV_EMULATION
1833         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1834                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1835
1836                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1837                            fbdev_fb->base.width,
1838                            fbdev_fb->base.height,
1839                            fbdev_fb->base.format->depth,
1840                            fbdev_fb->base.format->cpp[0] * 8,
1841                            fbdev_fb->base.modifier,
1842                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1843                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1844                 seq_putc(m, '\n');
1845         }
1846 #endif
1847
1848         mutex_lock(&dev->mode_config.fb_lock);
1849         drm_for_each_fb(drm_fb, dev) {
1850                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1851                 if (fb == fbdev_fb)
1852                         continue;
1853
1854                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1855                            fb->base.width,
1856                            fb->base.height,
1857                            fb->base.format->depth,
1858                            fb->base.format->cpp[0] * 8,
1859                            fb->base.modifier,
1860                            drm_framebuffer_read_refcount(&fb->base));
1861                 describe_obj(m, intel_fb_obj(&fb->base));
1862                 seq_putc(m, '\n');
1863         }
1864         mutex_unlock(&dev->mode_config.fb_lock);
1865         mutex_unlock(&dev->struct_mutex);
1866
1867         return 0;
1868 }
1869
1870 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1871 {
1872         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1873                    ring->space, ring->head, ring->tail, ring->emit);
1874 }
1875
1876 static int i915_context_status(struct seq_file *m, void *unused)
1877 {
1878         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1879         struct drm_device *dev = &dev_priv->drm;
1880         struct i915_gem_context *ctx;
1881         int ret;
1882
1883         ret = mutex_lock_interruptible(&dev->struct_mutex);
1884         if (ret)
1885                 return ret;
1886
1887         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1888                 struct intel_context *ce;
1889
1890                 seq_puts(m, "HW context ");
1891                 if (!list_empty(&ctx->hw_id_link))
1892                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1893                                    atomic_read(&ctx->hw_id_pin_count));
1894                 if (ctx->pid) {
1895                         struct task_struct *task;
1896
1897                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1898                         if (task) {
1899                                 seq_printf(m, "(%s [%d]) ",
1900                                            task->comm, task->pid);
1901                                 put_task_struct(task);
1902                         }
1903                 } else if (IS_ERR(ctx->file_priv)) {
1904                         seq_puts(m, "(deleted) ");
1905                 } else {
1906                         seq_puts(m, "(kernel) ");
1907                 }
1908
1909                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1910                 seq_putc(m, '\n');
1911
1912                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1913                         seq_printf(m, "%s: ", ce->engine->name);
1914                         if (ce->state)
1915                                 describe_obj(m, ce->state->obj);
1916                         if (ce->ring)
1917                                 describe_ctx_ring(m, ce->ring);
1918                         seq_putc(m, '\n');
1919                 }
1920
1921                 seq_putc(m, '\n');
1922         }
1923
1924         mutex_unlock(&dev->struct_mutex);
1925
1926         return 0;
1927 }
1928
1929 static const char *swizzle_string(unsigned swizzle)
1930 {
1931         switch (swizzle) {
1932         case I915_BIT_6_SWIZZLE_NONE:
1933                 return "none";
1934         case I915_BIT_6_SWIZZLE_9:
1935                 return "bit9";
1936         case I915_BIT_6_SWIZZLE_9_10:
1937                 return "bit9/bit10";
1938         case I915_BIT_6_SWIZZLE_9_11:
1939                 return "bit9/bit11";
1940         case I915_BIT_6_SWIZZLE_9_10_11:
1941                 return "bit9/bit10/bit11";
1942         case I915_BIT_6_SWIZZLE_9_17:
1943                 return "bit9/bit17";
1944         case I915_BIT_6_SWIZZLE_9_10_17:
1945                 return "bit9/bit10/bit17";
1946         case I915_BIT_6_SWIZZLE_UNKNOWN:
1947                 return "unknown";
1948         }
1949
1950         return "bug";
1951 }
1952
1953 static int i915_swizzle_info(struct seq_file *m, void *data)
1954 {
1955         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1956         intel_wakeref_t wakeref;
1957
1958         wakeref = intel_runtime_pm_get(dev_priv);
1959
1960         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1961                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1962         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1963                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1964
1965         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1966                 seq_printf(m, "DDC = 0x%08x\n",
1967                            I915_READ(DCC));
1968                 seq_printf(m, "DDC2 = 0x%08x\n",
1969                            I915_READ(DCC2));
1970                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1971                            I915_READ16(C0DRB3));
1972                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1973                            I915_READ16(C1DRB3));
1974         } else if (INTEL_GEN(dev_priv) >= 6) {
1975                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1976                            I915_READ(MAD_DIMM_C0));
1977                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1978                            I915_READ(MAD_DIMM_C1));
1979                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1980                            I915_READ(MAD_DIMM_C2));
1981                 seq_printf(m, "TILECTL = 0x%08x\n",
1982                            I915_READ(TILECTL));
1983                 if (INTEL_GEN(dev_priv) >= 8)
1984                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1985                                    I915_READ(GAMTARBMODE));
1986                 else
1987                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1988                                    I915_READ(ARB_MODE));
1989                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1990                            I915_READ(DISP_ARB_CTL));
1991         }
1992
1993         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1994                 seq_puts(m, "L-shaped memory detected\n");
1995
1996         intel_runtime_pm_put(dev_priv, wakeref);
1997
1998         return 0;
1999 }
2000
2001 static const char *rps_power_to_str(unsigned int power)
2002 {
2003         static const char * const strings[] = {
2004                 [LOW_POWER] = "low power",
2005                 [BETWEEN] = "mixed",
2006                 [HIGH_POWER] = "high power",
2007         };
2008
2009         if (power >= ARRAY_SIZE(strings) || !strings[power])
2010                 return "unknown";
2011
2012         return strings[power];
2013 }
2014
2015 static int i915_rps_boost_info(struct seq_file *m, void *data)
2016 {
2017         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2018         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2019         u32 act_freq = rps->cur_freq;
2020         intel_wakeref_t wakeref;
2021
2022         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2023                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2024                         mutex_lock(&dev_priv->pcu_lock);
2025                         act_freq = vlv_punit_read(dev_priv,
2026                                                   PUNIT_REG_GPU_FREQ_STS);
2027                         act_freq = (act_freq >> 8) & 0xff;
2028                         mutex_unlock(&dev_priv->pcu_lock);
2029                 } else {
2030                         act_freq = intel_get_cagf(dev_priv,
2031                                                   I915_READ(GEN6_RPSTAT1));
2032                 }
2033         }
2034
2035         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2036         seq_printf(m, "GPU busy? %s [%d requests]\n",
2037                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2038         seq_printf(m, "Boosts outstanding? %d\n",
2039                    atomic_read(&rps->num_waiters));
2040         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2041         seq_printf(m, "Frequency requested %d, actual %d\n",
2042                    intel_gpu_freq(dev_priv, rps->cur_freq),
2043                    intel_gpu_freq(dev_priv, act_freq));
2044         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2045                    intel_gpu_freq(dev_priv, rps->min_freq),
2046                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2047                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2048                    intel_gpu_freq(dev_priv, rps->max_freq));
2049         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2050                    intel_gpu_freq(dev_priv, rps->idle_freq),
2051                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2052                    intel_gpu_freq(dev_priv, rps->boost_freq));
2053
2054         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2055
2056         if (INTEL_GEN(dev_priv) >= 6 &&
2057             rps->enabled &&
2058             dev_priv->gt.active_requests) {
2059                 u32 rpup, rpupei;
2060                 u32 rpdown, rpdownei;
2061
2062                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
2063                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2064                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2065                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2066                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2067                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
2068
2069                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2070                            rps_power_to_str(rps->power.mode));
2071                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2072                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2073                            rps->power.up_threshold);
2074                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2075                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2076                            rps->power.down_threshold);
2077         } else {
2078                 seq_puts(m, "\nRPS Autotuning inactive\n");
2079         }
2080
2081         return 0;
2082 }
2083
2084 static int i915_llc(struct seq_file *m, void *data)
2085 {
2086         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2087         const bool edram = INTEL_GEN(dev_priv) > 8;
2088
2089         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2090         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2091                    intel_uncore_edram_size(dev_priv)/1024/1024);
2092
2093         return 0;
2094 }
2095
2096 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2097 {
2098         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2099         intel_wakeref_t wakeref;
2100         struct drm_printer p;
2101
2102         if (!HAS_HUC(dev_priv))
2103                 return -ENODEV;
2104
2105         p = drm_seq_file_printer(m);
2106         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2107
2108         with_intel_runtime_pm(dev_priv, wakeref)
2109                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2110
2111         return 0;
2112 }
2113
2114 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2115 {
2116         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2117         intel_wakeref_t wakeref;
2118         struct drm_printer p;
2119
2120         if (!HAS_GUC(dev_priv))
2121                 return -ENODEV;
2122
2123         p = drm_seq_file_printer(m);
2124         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2125
2126         with_intel_runtime_pm(dev_priv, wakeref) {
2127                 u32 tmp = I915_READ(GUC_STATUS);
2128                 u32 i;
2129
2130                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2131                 seq_printf(m, "\tBootrom status = 0x%x\n",
2132                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2133                 seq_printf(m, "\tuKernel status = 0x%x\n",
2134                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2135                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2136                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2137                 seq_puts(m, "\nScratch registers:\n");
2138                 for (i = 0; i < 16; i++) {
2139                         seq_printf(m, "\t%2d: \t0x%x\n",
2140                                    i, I915_READ(SOFT_SCRATCH(i)));
2141                 }
2142         }
2143
2144         return 0;
2145 }
2146
2147 static const char *
2148 stringify_guc_log_type(enum guc_log_buffer_type type)
2149 {
2150         switch (type) {
2151         case GUC_ISR_LOG_BUFFER:
2152                 return "ISR";
2153         case GUC_DPC_LOG_BUFFER:
2154                 return "DPC";
2155         case GUC_CRASH_DUMP_LOG_BUFFER:
2156                 return "CRASH";
2157         default:
2158                 MISSING_CASE(type);
2159         }
2160
2161         return "";
2162 }
2163
2164 static void i915_guc_log_info(struct seq_file *m,
2165                               struct drm_i915_private *dev_priv)
2166 {
2167         struct intel_guc_log *log = &dev_priv->guc.log;
2168         enum guc_log_buffer_type type;
2169
2170         if (!intel_guc_log_relay_enabled(log)) {
2171                 seq_puts(m, "GuC log relay disabled\n");
2172                 return;
2173         }
2174
2175         seq_puts(m, "GuC logging stats:\n");
2176
2177         seq_printf(m, "\tRelay full count: %u\n",
2178                    log->relay.full_count);
2179
2180         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2181                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2182                            stringify_guc_log_type(type),
2183                            log->stats[type].flush,
2184                            log->stats[type].sampled_overflow);
2185         }
2186 }
2187
2188 static void i915_guc_client_info(struct seq_file *m,
2189                                  struct drm_i915_private *dev_priv,
2190                                  struct intel_guc_client *client)
2191 {
2192         struct intel_engine_cs *engine;
2193         enum intel_engine_id id;
2194         u64 tot = 0;
2195
2196         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2197                 client->priority, client->stage_id, client->proc_desc_offset);
2198         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2199                 client->doorbell_id, client->doorbell_offset);
2200
2201         for_each_engine(engine, dev_priv, id) {
2202                 u64 submissions = client->submissions[id];
2203                 tot += submissions;
2204                 seq_printf(m, "\tSubmissions: %llu %s\n",
2205                                 submissions, engine->name);
2206         }
2207         seq_printf(m, "\tTotal: %llu\n", tot);
2208 }
2209
2210 static int i915_guc_info(struct seq_file *m, void *data)
2211 {
2212         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2213         const struct intel_guc *guc = &dev_priv->guc;
2214
2215         if (!USES_GUC(dev_priv))
2216                 return -ENODEV;
2217
2218         i915_guc_log_info(m, dev_priv);
2219
2220         if (!USES_GUC_SUBMISSION(dev_priv))
2221                 return 0;
2222
2223         GEM_BUG_ON(!guc->execbuf_client);
2224
2225         seq_printf(m, "\nDoorbell map:\n");
2226         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2227         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2228
2229         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2230         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2231         if (guc->preempt_client) {
2232                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2233                            guc->preempt_client);
2234                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2235         }
2236
2237         /* Add more as required ... */
2238
2239         return 0;
2240 }
2241
2242 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2243 {
2244         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2245         const struct intel_guc *guc = &dev_priv->guc;
2246         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2247         struct intel_guc_client *client = guc->execbuf_client;
2248         unsigned int tmp;
2249         int index;
2250
2251         if (!USES_GUC_SUBMISSION(dev_priv))
2252                 return -ENODEV;
2253
2254         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2255                 struct intel_engine_cs *engine;
2256
2257                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2258                         continue;
2259
2260                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2261                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2262                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2263                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2264                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2265                 seq_printf(m, "\tEngines used: 0x%x\n",
2266                            desc->engines_used);
2267                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2268                            desc->db_trigger_phy,
2269                            desc->db_trigger_cpu,
2270                            desc->db_trigger_uk);
2271                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2272                            desc->process_desc);
2273                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2274                            desc->wq_addr, desc->wq_size);
2275                 seq_putc(m, '\n');
2276
2277                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2278                         u32 guc_engine_id = engine->guc_id;
2279                         struct guc_execlist_context *lrc =
2280                                                 &desc->lrc[guc_engine_id];
2281
2282                         seq_printf(m, "\t%s LRC:\n", engine->name);
2283                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2284                                    lrc->context_desc);
2285                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2286                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2287                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2288                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2289                         seq_putc(m, '\n');
2290                 }
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int i915_guc_log_dump(struct seq_file *m, void *data)
2297 {
2298         struct drm_info_node *node = m->private;
2299         struct drm_i915_private *dev_priv = node_to_i915(node);
2300         bool dump_load_err = !!node->info_ent->data;
2301         struct drm_i915_gem_object *obj = NULL;
2302         u32 *log;
2303         int i = 0;
2304
2305         if (!HAS_GUC(dev_priv))
2306                 return -ENODEV;
2307
2308         if (dump_load_err)
2309                 obj = dev_priv->guc.load_err_log;
2310         else if (dev_priv->guc.log.vma)
2311                 obj = dev_priv->guc.log.vma->obj;
2312
2313         if (!obj)
2314                 return 0;
2315
2316         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2317         if (IS_ERR(log)) {
2318                 DRM_DEBUG("Failed to pin object\n");
2319                 seq_puts(m, "(log data unaccessible)\n");
2320                 return PTR_ERR(log);
2321         }
2322
2323         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2324                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2325                            *(log + i), *(log + i + 1),
2326                            *(log + i + 2), *(log + i + 3));
2327
2328         seq_putc(m, '\n');
2329
2330         i915_gem_object_unpin_map(obj);
2331
2332         return 0;
2333 }
2334
2335 static int i915_guc_log_level_get(void *data, u64 *val)
2336 {
2337         struct drm_i915_private *dev_priv = data;
2338
2339         if (!USES_GUC(dev_priv))
2340                 return -ENODEV;
2341
2342         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2343
2344         return 0;
2345 }
2346
2347 static int i915_guc_log_level_set(void *data, u64 val)
2348 {
2349         struct drm_i915_private *dev_priv = data;
2350
2351         if (!USES_GUC(dev_priv))
2352                 return -ENODEV;
2353
2354         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2355 }
2356
2357 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2358                         i915_guc_log_level_get, i915_guc_log_level_set,
2359                         "%lld\n");
2360
2361 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2362 {
2363         struct drm_i915_private *dev_priv = inode->i_private;
2364
2365         if (!USES_GUC(dev_priv))
2366                 return -ENODEV;
2367
2368         file->private_data = &dev_priv->guc.log;
2369
2370         return intel_guc_log_relay_open(&dev_priv->guc.log);
2371 }
2372
2373 static ssize_t
2374 i915_guc_log_relay_write(struct file *filp,
2375                          const char __user *ubuf,
2376                          size_t cnt,
2377                          loff_t *ppos)
2378 {
2379         struct intel_guc_log *log = filp->private_data;
2380
2381         intel_guc_log_relay_flush(log);
2382
2383         return cnt;
2384 }
2385
2386 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2387 {
2388         struct drm_i915_private *dev_priv = inode->i_private;
2389
2390         intel_guc_log_relay_close(&dev_priv->guc.log);
2391
2392         return 0;
2393 }
2394
2395 static const struct file_operations i915_guc_log_relay_fops = {
2396         .owner = THIS_MODULE,
2397         .open = i915_guc_log_relay_open,
2398         .write = i915_guc_log_relay_write,
2399         .release = i915_guc_log_relay_release,
2400 };
2401
2402 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2403 {
2404         u8 val;
2405         static const char * const sink_status[] = {
2406                 "inactive",
2407                 "transition to active, capture and display",
2408                 "active, display from RFB",
2409                 "active, capture and display on sink device timings",
2410                 "transition to inactive, capture and display, timing re-sync",
2411                 "reserved",
2412                 "reserved",
2413                 "sink internal error",
2414         };
2415         struct drm_connector *connector = m->private;
2416         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2417         struct intel_dp *intel_dp =
2418                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2419         int ret;
2420
2421         if (!CAN_PSR(dev_priv)) {
2422                 seq_puts(m, "PSR Unsupported\n");
2423                 return -ENODEV;
2424         }
2425
2426         if (connector->status != connector_status_connected)
2427                 return -ENODEV;
2428
2429         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2430
2431         if (ret == 1) {
2432                 const char *str = "unknown";
2433
2434                 val &= DP_PSR_SINK_STATE_MASK;
2435                 if (val < ARRAY_SIZE(sink_status))
2436                         str = sink_status[val];
2437                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2438         } else {
2439                 return ret;
2440         }
2441
2442         return 0;
2443 }
2444 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2445
2446 static void
2447 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2448 {
2449         u32 val, status_val;
2450         const char *status = "unknown";
2451
2452         if (dev_priv->psr.psr2_enabled) {
2453                 static const char * const live_status[] = {
2454                         "IDLE",
2455                         "CAPTURE",
2456                         "CAPTURE_FS",
2457                         "SLEEP",
2458                         "BUFON_FW",
2459                         "ML_UP",
2460                         "SU_STANDBY",
2461                         "FAST_SLEEP",
2462                         "DEEP_SLEEP",
2463                         "BUF_ON",
2464                         "TG_ON"
2465                 };
2466                 val = I915_READ(EDP_PSR2_STATUS);
2467                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2468                               EDP_PSR2_STATUS_STATE_SHIFT;
2469                 if (status_val < ARRAY_SIZE(live_status))
2470                         status = live_status[status_val];
2471         } else {
2472                 static const char * const live_status[] = {
2473                         "IDLE",
2474                         "SRDONACK",
2475                         "SRDENT",
2476                         "BUFOFF",
2477                         "BUFON",
2478                         "AUXACK",
2479                         "SRDOFFACK",
2480                         "SRDENT_ON",
2481                 };
2482                 val = I915_READ(EDP_PSR_STATUS);
2483                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2484                               EDP_PSR_STATUS_STATE_SHIFT;
2485                 if (status_val < ARRAY_SIZE(live_status))
2486                         status = live_status[status_val];
2487         }
2488
2489         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2490 }
2491
2492 static int i915_edp_psr_status(struct seq_file *m, void *data)
2493 {
2494         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2495         struct i915_psr *psr = &dev_priv->psr;
2496         intel_wakeref_t wakeref;
2497         const char *status;
2498         bool enabled;
2499         u32 val;
2500
2501         if (!HAS_PSR(dev_priv))
2502                 return -ENODEV;
2503
2504         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2505         if (psr->dp)
2506                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2507         seq_puts(m, "\n");
2508
2509         if (!psr->sink_support)
2510                 return 0;
2511
2512         wakeref = intel_runtime_pm_get(dev_priv);
2513         mutex_lock(&psr->lock);
2514
2515         if (psr->enabled)
2516                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2517         else
2518                 status = "disabled";
2519         seq_printf(m, "PSR mode: %s\n", status);
2520
2521         if (!psr->enabled)
2522                 goto unlock;
2523
2524         if (psr->psr2_enabled) {
2525                 val = I915_READ(EDP_PSR2_CTL);
2526                 enabled = val & EDP_PSR2_ENABLE;
2527         } else {
2528                 val = I915_READ(EDP_PSR_CTL);
2529                 enabled = val & EDP_PSR_ENABLE;
2530         }
2531         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2532                    enableddisabled(enabled), val);
2533         psr_source_status(dev_priv, m);
2534         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2535                    psr->busy_frontbuffer_bits);
2536
2537         /*
2538          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2539          */
2540         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2541                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2542                 seq_printf(m, "Performance counter: %u\n", val);
2543         }
2544
2545         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2546                 seq_printf(m, "Last attempted entry at: %lld\n",
2547                            psr->last_entry_attempt);
2548                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2549         }
2550
2551         if (psr->psr2_enabled) {
2552                 u32 su_frames_val[3];
2553                 int frame;
2554
2555                 /*
2556                  * Reading all 3 registers before hand to minimize crossing a
2557                  * frame boundary between register reads
2558                  */
2559                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2560                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2561
2562                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2563
2564                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2565                         u32 su_blocks;
2566
2567                         su_blocks = su_frames_val[frame / 3] &
2568                                     PSR2_SU_STATUS_MASK(frame);
2569                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2570                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2571                 }
2572         }
2573
2574 unlock:
2575         mutex_unlock(&psr->lock);
2576         intel_runtime_pm_put(dev_priv, wakeref);
2577
2578         return 0;
2579 }
2580
2581 static int
2582 i915_edp_psr_debug_set(void *data, u64 val)
2583 {
2584         struct drm_i915_private *dev_priv = data;
2585         intel_wakeref_t wakeref;
2586         int ret;
2587
2588         if (!CAN_PSR(dev_priv))
2589                 return -ENODEV;
2590
2591         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2592
2593         wakeref = intel_runtime_pm_get(dev_priv);
2594
2595         ret = intel_psr_debug_set(dev_priv, val);
2596
2597         intel_runtime_pm_put(dev_priv, wakeref);
2598
2599         return ret;
2600 }
2601
2602 static int
2603 i915_edp_psr_debug_get(void *data, u64 *val)
2604 {
2605         struct drm_i915_private *dev_priv = data;
2606
2607         if (!CAN_PSR(dev_priv))
2608                 return -ENODEV;
2609
2610         *val = READ_ONCE(dev_priv->psr.debug);
2611         return 0;
2612 }
2613
2614 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2615                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2616                         "%llu\n");
2617
2618 static int i915_energy_uJ(struct seq_file *m, void *data)
2619 {
2620         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2621         unsigned long long power;
2622         intel_wakeref_t wakeref;
2623         u32 units;
2624
2625         if (INTEL_GEN(dev_priv) < 6)
2626                 return -ENODEV;
2627
2628         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2629                 return -ENODEV;
2630
2631         units = (power & 0x1f00) >> 8;
2632         with_intel_runtime_pm(dev_priv, wakeref)
2633                 power = I915_READ(MCH_SECP_NRG_STTS);
2634
2635         power = (1000000 * power) >> units; /* convert to uJ */
2636         seq_printf(m, "%llu", power);
2637
2638         return 0;
2639 }
2640
2641 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2642 {
2643         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2644         struct pci_dev *pdev = dev_priv->drm.pdev;
2645
2646         if (!HAS_RUNTIME_PM(dev_priv))
2647                 seq_puts(m, "Runtime power management not supported\n");
2648
2649         seq_printf(m, "Runtime power status: %s\n",
2650                    enableddisabled(!dev_priv->power_domains.wakeref));
2651
2652         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2653         seq_printf(m, "IRQs disabled: %s\n",
2654                    yesno(!intel_irqs_enabled(dev_priv)));
2655 #ifdef CONFIG_PM
2656         seq_printf(m, "Usage count: %d\n",
2657                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2658 #else
2659         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2660 #endif
2661         seq_printf(m, "PCI device power state: %s [%d]\n",
2662                    pci_power_name(pdev->current_state),
2663                    pdev->current_state);
2664
2665         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2666                 struct drm_printer p = drm_seq_file_printer(m);
2667
2668                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2669         }
2670
2671         return 0;
2672 }
2673
2674 static int i915_power_domain_info(struct seq_file *m, void *unused)
2675 {
2676         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2677         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2678         int i;
2679
2680         mutex_lock(&power_domains->lock);
2681
2682         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2683         for (i = 0; i < power_domains->power_well_count; i++) {
2684                 struct i915_power_well *power_well;
2685                 enum intel_display_power_domain power_domain;
2686
2687                 power_well = &power_domains->power_wells[i];
2688                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2689                            power_well->count);
2690
2691                 for_each_power_domain(power_domain, power_well->desc->domains)
2692                         seq_printf(m, "  %-23s %d\n",
2693                                  intel_display_power_domain_str(power_domain),
2694                                  power_domains->domain_use_count[power_domain]);
2695         }
2696
2697         mutex_unlock(&power_domains->lock);
2698
2699         return 0;
2700 }
2701
2702 static int i915_dmc_info(struct seq_file *m, void *unused)
2703 {
2704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2705         intel_wakeref_t wakeref;
2706         struct intel_csr *csr;
2707
2708         if (!HAS_CSR(dev_priv))
2709                 return -ENODEV;
2710
2711         csr = &dev_priv->csr;
2712
2713         wakeref = intel_runtime_pm_get(dev_priv);
2714
2715         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2716         seq_printf(m, "path: %s\n", csr->fw_path);
2717
2718         if (!csr->dmc_payload)
2719                 goto out;
2720
2721         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2722                    CSR_VERSION_MINOR(csr->version));
2723
2724         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2725                 goto out;
2726
2727         seq_printf(m, "DC3 -> DC5 count: %d\n",
2728                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2729                                                     SKL_CSR_DC3_DC5_COUNT));
2730         if (!IS_GEN9_LP(dev_priv))
2731                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2732                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2733
2734 out:
2735         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2736         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2737         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2738
2739         intel_runtime_pm_put(dev_priv, wakeref);
2740
2741         return 0;
2742 }
2743
2744 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2745                                  struct drm_display_mode *mode)
2746 {
2747         int i;
2748
2749         for (i = 0; i < tabs; i++)
2750                 seq_putc(m, '\t');
2751
2752         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2753 }
2754
2755 static void intel_encoder_info(struct seq_file *m,
2756                                struct intel_crtc *intel_crtc,
2757                                struct intel_encoder *intel_encoder)
2758 {
2759         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2760         struct drm_device *dev = &dev_priv->drm;
2761         struct drm_crtc *crtc = &intel_crtc->base;
2762         struct intel_connector *intel_connector;
2763         struct drm_encoder *encoder;
2764
2765         encoder = &intel_encoder->base;
2766         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2767                    encoder->base.id, encoder->name);
2768         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2769                 struct drm_connector *connector = &intel_connector->base;
2770                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2771                            connector->base.id,
2772                            connector->name,
2773                            drm_get_connector_status_name(connector->status));
2774                 if (connector->status == connector_status_connected) {
2775                         struct drm_display_mode *mode = &crtc->mode;
2776                         seq_printf(m, ", mode:\n");
2777                         intel_seq_print_mode(m, 2, mode);
2778                 } else {
2779                         seq_putc(m, '\n');
2780                 }
2781         }
2782 }
2783
2784 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2785 {
2786         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2787         struct drm_device *dev = &dev_priv->drm;
2788         struct drm_crtc *crtc = &intel_crtc->base;
2789         struct intel_encoder *intel_encoder;
2790         struct drm_plane_state *plane_state = crtc->primary->state;
2791         struct drm_framebuffer *fb = plane_state->fb;
2792
2793         if (fb)
2794                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2795                            fb->base.id, plane_state->src_x >> 16,
2796                            plane_state->src_y >> 16, fb->width, fb->height);
2797         else
2798                 seq_puts(m, "\tprimary plane disabled\n");
2799         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2800                 intel_encoder_info(m, intel_crtc, intel_encoder);
2801 }
2802
2803 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2804 {
2805         struct drm_display_mode *mode = panel->fixed_mode;
2806
2807         seq_printf(m, "\tfixed mode:\n");
2808         intel_seq_print_mode(m, 2, mode);
2809 }
2810
2811 static void intel_dp_info(struct seq_file *m,
2812                           struct intel_connector *intel_connector)
2813 {
2814         struct intel_encoder *intel_encoder = intel_connector->encoder;
2815         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2816
2817         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2818         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2819         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2820                 intel_panel_info(m, &intel_connector->panel);
2821
2822         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2823                                 &intel_dp->aux);
2824 }
2825
2826 static void intel_dp_mst_info(struct seq_file *m,
2827                           struct intel_connector *intel_connector)
2828 {
2829         struct intel_encoder *intel_encoder = intel_connector->encoder;
2830         struct intel_dp_mst_encoder *intel_mst =
2831                 enc_to_mst(&intel_encoder->base);
2832         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2833         struct intel_dp *intel_dp = &intel_dig_port->dp;
2834         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2835                                         intel_connector->port);
2836
2837         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2838 }
2839
2840 static void intel_hdmi_info(struct seq_file *m,
2841                             struct intel_connector *intel_connector)
2842 {
2843         struct intel_encoder *intel_encoder = intel_connector->encoder;
2844         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2845
2846         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2847 }
2848
2849 static void intel_lvds_info(struct seq_file *m,
2850                             struct intel_connector *intel_connector)
2851 {
2852         intel_panel_info(m, &intel_connector->panel);
2853 }
2854
2855 static void intel_connector_info(struct seq_file *m,
2856                                  struct drm_connector *connector)
2857 {
2858         struct intel_connector *intel_connector = to_intel_connector(connector);
2859         struct intel_encoder *intel_encoder = intel_connector->encoder;
2860         struct drm_display_mode *mode;
2861
2862         seq_printf(m, "connector %d: type %s, status: %s\n",
2863                    connector->base.id, connector->name,
2864                    drm_get_connector_status_name(connector->status));
2865
2866         if (connector->status == connector_status_disconnected)
2867                 return;
2868
2869         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2870                    connector->display_info.width_mm,
2871                    connector->display_info.height_mm);
2872         seq_printf(m, "\tsubpixel order: %s\n",
2873                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2874         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2875
2876         if (!intel_encoder)
2877                 return;
2878
2879         switch (connector->connector_type) {
2880         case DRM_MODE_CONNECTOR_DisplayPort:
2881         case DRM_MODE_CONNECTOR_eDP:
2882                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2883                         intel_dp_mst_info(m, intel_connector);
2884                 else
2885                         intel_dp_info(m, intel_connector);
2886                 break;
2887         case DRM_MODE_CONNECTOR_LVDS:
2888                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2889                         intel_lvds_info(m, intel_connector);
2890                 break;
2891         case DRM_MODE_CONNECTOR_HDMIA:
2892                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2893                     intel_encoder->type == INTEL_OUTPUT_DDI)
2894                         intel_hdmi_info(m, intel_connector);
2895                 break;
2896         default:
2897                 break;
2898         }
2899
2900         seq_printf(m, "\tmodes:\n");
2901         list_for_each_entry(mode, &connector->modes, head)
2902                 intel_seq_print_mode(m, 2, mode);
2903 }
2904
2905 static const char *plane_type(enum drm_plane_type type)
2906 {
2907         switch (type) {
2908         case DRM_PLANE_TYPE_OVERLAY:
2909                 return "OVL";
2910         case DRM_PLANE_TYPE_PRIMARY:
2911                 return "PRI";
2912         case DRM_PLANE_TYPE_CURSOR:
2913                 return "CUR";
2914         /*
2915          * Deliberately omitting default: to generate compiler warnings
2916          * when a new drm_plane_type gets added.
2917          */
2918         }
2919
2920         return "unknown";
2921 }
2922
2923 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2924 {
2925         /*
2926          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2927          * will print them all to visualize if the values are misused
2928          */
2929         snprintf(buf, bufsize,
2930                  "%s%s%s%s%s%s(0x%08x)",
2931                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2932                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2933                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2934                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2935                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2936                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2937                  rotation);
2938 }
2939
2940 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2941 {
2942         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2943         struct drm_device *dev = &dev_priv->drm;
2944         struct intel_plane *intel_plane;
2945
2946         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2947                 struct drm_plane_state *state;
2948                 struct drm_plane *plane = &intel_plane->base;
2949                 struct drm_format_name_buf format_name;
2950                 char rot_str[48];
2951
2952                 if (!plane->state) {
2953                         seq_puts(m, "plane->state is NULL!\n");
2954                         continue;
2955                 }
2956
2957                 state = plane->state;
2958
2959                 if (state->fb) {
2960                         drm_get_format_name(state->fb->format->format,
2961                                             &format_name);
2962                 } else {
2963                         sprintf(format_name.str, "N/A");
2964                 }
2965
2966                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2967
2968                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2969                            plane->base.id,
2970                            plane_type(intel_plane->base.type),
2971                            state->crtc_x, state->crtc_y,
2972                            state->crtc_w, state->crtc_h,
2973                            (state->src_x >> 16),
2974                            ((state->src_x & 0xffff) * 15625) >> 10,
2975                            (state->src_y >> 16),
2976                            ((state->src_y & 0xffff) * 15625) >> 10,
2977                            (state->src_w >> 16),
2978                            ((state->src_w & 0xffff) * 15625) >> 10,
2979                            (state->src_h >> 16),
2980                            ((state->src_h & 0xffff) * 15625) >> 10,
2981                            format_name.str,
2982                            rot_str);
2983         }
2984 }
2985
2986 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2987 {
2988         struct intel_crtc_state *pipe_config;
2989         int num_scalers = intel_crtc->num_scalers;
2990         int i;
2991
2992         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2993
2994         /* Not all platformas have a scaler */
2995         if (num_scalers) {
2996                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2997                            num_scalers,
2998                            pipe_config->scaler_state.scaler_users,
2999                            pipe_config->scaler_state.scaler_id);
3000
3001                 for (i = 0; i < num_scalers; i++) {
3002                         struct intel_scaler *sc =
3003                                         &pipe_config->scaler_state.scalers[i];
3004
3005                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3006                                    i, yesno(sc->in_use), sc->mode);
3007                 }
3008                 seq_puts(m, "\n");
3009         } else {
3010                 seq_puts(m, "\tNo scalers available on this platform\n");
3011         }
3012 }
3013
3014 static int i915_display_info(struct seq_file *m, void *unused)
3015 {
3016         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3017         struct drm_device *dev = &dev_priv->drm;
3018         struct intel_crtc *crtc;
3019         struct drm_connector *connector;
3020         struct drm_connector_list_iter conn_iter;
3021         intel_wakeref_t wakeref;
3022
3023         wakeref = intel_runtime_pm_get(dev_priv);
3024
3025         seq_printf(m, "CRTC info\n");
3026         seq_printf(m, "---------\n");
3027         for_each_intel_crtc(dev, crtc) {
3028                 struct intel_crtc_state *pipe_config;
3029
3030                 drm_modeset_lock(&crtc->base.mutex, NULL);
3031                 pipe_config = to_intel_crtc_state(crtc->base.state);
3032
3033                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3034                            crtc->base.base.id, pipe_name(crtc->pipe),
3035                            yesno(pipe_config->base.active),
3036                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3037                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3038
3039                 if (pipe_config->base.active) {
3040                         struct intel_plane *cursor =
3041                                 to_intel_plane(crtc->base.cursor);
3042
3043                         intel_crtc_info(m, crtc);
3044
3045                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3046                                    yesno(cursor->base.state->visible),
3047                                    cursor->base.state->crtc_x,
3048                                    cursor->base.state->crtc_y,
3049                                    cursor->base.state->crtc_w,
3050                                    cursor->base.state->crtc_h,
3051                                    cursor->cursor.base);
3052                         intel_scaler_info(m, crtc);
3053                         intel_plane_info(m, crtc);
3054                 }
3055
3056                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3057                            yesno(!crtc->cpu_fifo_underrun_disabled),
3058                            yesno(!crtc->pch_fifo_underrun_disabled));
3059                 drm_modeset_unlock(&crtc->base.mutex);
3060         }
3061
3062         seq_printf(m, "\n");
3063         seq_printf(m, "Connector info\n");
3064         seq_printf(m, "--------------\n");
3065         mutex_lock(&dev->mode_config.mutex);
3066         drm_connector_list_iter_begin(dev, &conn_iter);
3067         drm_for_each_connector_iter(connector, &conn_iter)
3068                 intel_connector_info(m, connector);
3069         drm_connector_list_iter_end(&conn_iter);
3070         mutex_unlock(&dev->mode_config.mutex);
3071
3072         intel_runtime_pm_put(dev_priv, wakeref);
3073
3074         return 0;
3075 }
3076
3077 static int i915_engine_info(struct seq_file *m, void *unused)
3078 {
3079         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3080         struct intel_engine_cs *engine;
3081         intel_wakeref_t wakeref;
3082         enum intel_engine_id id;
3083         struct drm_printer p;
3084
3085         wakeref = intel_runtime_pm_get(dev_priv);
3086
3087         seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
3088         seq_printf(m, "Global active requests: %d\n",
3089                    dev_priv->gt.active_requests);
3090         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3091                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3092
3093         p = drm_seq_file_printer(m);
3094         for_each_engine(engine, dev_priv, id)
3095                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3096
3097         intel_runtime_pm_put(dev_priv, wakeref);
3098
3099         return 0;
3100 }
3101
3102 static int i915_rcs_topology(struct seq_file *m, void *unused)
3103 {
3104         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3105         struct drm_printer p = drm_seq_file_printer(m);
3106
3107         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3108
3109         return 0;
3110 }
3111
3112 static int i915_shrinker_info(struct seq_file *m, void *unused)
3113 {
3114         struct drm_i915_private *i915 = node_to_i915(m->private);
3115
3116         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3117         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3118
3119         return 0;
3120 }
3121
3122 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3123 {
3124         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3125         struct drm_device *dev = &dev_priv->drm;
3126         int i;
3127
3128         drm_modeset_lock_all(dev);
3129         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3130                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3131
3132                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3133                            pll->info->id);
3134                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3135                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3136                 seq_printf(m, " tracked hardware state:\n");
3137                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3138                 seq_printf(m, " dpll_md: 0x%08x\n",
3139                            pll->state.hw_state.dpll_md);
3140                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3141                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3142                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3143                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3144                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3145                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3146                            pll->state.hw_state.mg_refclkin_ctl);
3147                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3148                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3149                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3150                            pll->state.hw_state.mg_clktop2_hsclkctl);
3151                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3152                            pll->state.hw_state.mg_pll_div0);
3153                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3154                            pll->state.hw_state.mg_pll_div1);
3155                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3156                            pll->state.hw_state.mg_pll_lf);
3157                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3158                            pll->state.hw_state.mg_pll_frac_lock);
3159                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3160                            pll->state.hw_state.mg_pll_ssc);
3161                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3162                            pll->state.hw_state.mg_pll_bias);
3163                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3164                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3165         }
3166         drm_modeset_unlock_all(dev);
3167
3168         return 0;
3169 }
3170
3171 static int i915_wa_registers(struct seq_file *m, void *unused)
3172 {
3173         struct drm_i915_private *i915 = node_to_i915(m->private);
3174         const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
3175         struct i915_wa *wa;
3176         unsigned int i;
3177
3178         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3179         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3180                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3181                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3182
3183         return 0;
3184 }
3185
3186 static int i915_ipc_status_show(struct seq_file *m, void *data)
3187 {
3188         struct drm_i915_private *dev_priv = m->private;
3189
3190         seq_printf(m, "Isochronous Priority Control: %s\n",
3191                         yesno(dev_priv->ipc_enabled));
3192         return 0;
3193 }
3194
3195 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3196 {
3197         struct drm_i915_private *dev_priv = inode->i_private;
3198
3199         if (!HAS_IPC(dev_priv))
3200                 return -ENODEV;
3201
3202         return single_open(file, i915_ipc_status_show, dev_priv);
3203 }
3204
3205 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3206                                      size_t len, loff_t *offp)
3207 {
3208         struct seq_file *m = file->private_data;
3209         struct drm_i915_private *dev_priv = m->private;
3210         intel_wakeref_t wakeref;
3211         bool enable;
3212         int ret;
3213
3214         ret = kstrtobool_from_user(ubuf, len, &enable);
3215         if (ret < 0)
3216                 return ret;
3217
3218         with_intel_runtime_pm(dev_priv, wakeref) {
3219                 if (!dev_priv->ipc_enabled && enable)
3220                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3221                 dev_priv->wm.distrust_bios_wm = true;
3222                 dev_priv->ipc_enabled = enable;
3223                 intel_enable_ipc(dev_priv);
3224         }
3225
3226         return len;
3227 }
3228
3229 static const struct file_operations i915_ipc_status_fops = {
3230         .owner = THIS_MODULE,
3231         .open = i915_ipc_status_open,
3232         .read = seq_read,
3233         .llseek = seq_lseek,
3234         .release = single_release,
3235         .write = i915_ipc_status_write
3236 };
3237
3238 static int i915_ddb_info(struct seq_file *m, void *unused)
3239 {
3240         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3241         struct drm_device *dev = &dev_priv->drm;
3242         struct skl_ddb_entry *entry;
3243         struct intel_crtc *crtc;
3244
3245         if (INTEL_GEN(dev_priv) < 9)
3246                 return -ENODEV;
3247
3248         drm_modeset_lock_all(dev);
3249
3250         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3251
3252         for_each_intel_crtc(&dev_priv->drm, crtc) {
3253                 struct intel_crtc_state *crtc_state =
3254                         to_intel_crtc_state(crtc->base.state);
3255                 enum pipe pipe = crtc->pipe;
3256                 enum plane_id plane_id;
3257
3258                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3259
3260                 for_each_plane_id_on_crtc(crtc, plane_id) {
3261                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3262                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3263                                    entry->start, entry->end,
3264                                    skl_ddb_entry_size(entry));
3265                 }
3266
3267                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3268                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3269                            entry->end, skl_ddb_entry_size(entry));
3270         }
3271
3272         drm_modeset_unlock_all(dev);
3273
3274         return 0;
3275 }
3276
3277 static void drrs_status_per_crtc(struct seq_file *m,
3278                                  struct drm_device *dev,
3279                                  struct intel_crtc *intel_crtc)
3280 {
3281         struct drm_i915_private *dev_priv = to_i915(dev);
3282         struct i915_drrs *drrs = &dev_priv->drrs;
3283         int vrefresh = 0;
3284         struct drm_connector *connector;
3285         struct drm_connector_list_iter conn_iter;
3286
3287         drm_connector_list_iter_begin(dev, &conn_iter);
3288         drm_for_each_connector_iter(connector, &conn_iter) {
3289                 if (connector->state->crtc != &intel_crtc->base)
3290                         continue;
3291
3292                 seq_printf(m, "%s:\n", connector->name);
3293         }
3294         drm_connector_list_iter_end(&conn_iter);
3295
3296         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3297                 seq_puts(m, "\tVBT: DRRS_type: Static");
3298         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3299                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3300         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3301                 seq_puts(m, "\tVBT: DRRS_type: None");
3302         else
3303                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3304
3305         seq_puts(m, "\n\n");
3306
3307         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3308                 struct intel_panel *panel;
3309
3310                 mutex_lock(&drrs->mutex);
3311                 /* DRRS Supported */
3312                 seq_puts(m, "\tDRRS Supported: Yes\n");
3313
3314                 /* disable_drrs() will make drrs->dp NULL */
3315                 if (!drrs->dp) {
3316                         seq_puts(m, "Idleness DRRS: Disabled\n");
3317                         if (dev_priv->psr.enabled)
3318                                 seq_puts(m,
3319                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3320                         mutex_unlock(&drrs->mutex);
3321                         return;
3322                 }
3323
3324                 panel = &drrs->dp->attached_connector->panel;
3325                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3326                                         drrs->busy_frontbuffer_bits);
3327
3328                 seq_puts(m, "\n\t\t");
3329                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3330                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3331                         vrefresh = panel->fixed_mode->vrefresh;
3332                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3333                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3334                         vrefresh = panel->downclock_mode->vrefresh;
3335                 } else {
3336                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3337                                                 drrs->refresh_rate_type);
3338                         mutex_unlock(&drrs->mutex);
3339                         return;
3340                 }
3341                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3342
3343                 seq_puts(m, "\n\t\t");
3344                 mutex_unlock(&drrs->mutex);
3345         } else {
3346                 /* DRRS not supported. Print the VBT parameter*/
3347                 seq_puts(m, "\tDRRS Supported : No");
3348         }
3349         seq_puts(m, "\n");
3350 }
3351
3352 static int i915_drrs_status(struct seq_file *m, void *unused)
3353 {
3354         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3355         struct drm_device *dev = &dev_priv->drm;
3356         struct intel_crtc *intel_crtc;
3357         int active_crtc_cnt = 0;
3358
3359         drm_modeset_lock_all(dev);
3360         for_each_intel_crtc(dev, intel_crtc) {
3361                 if (intel_crtc->base.state->active) {
3362                         active_crtc_cnt++;
3363                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3364
3365                         drrs_status_per_crtc(m, dev, intel_crtc);
3366                 }
3367         }
3368         drm_modeset_unlock_all(dev);
3369
3370         if (!active_crtc_cnt)
3371                 seq_puts(m, "No active crtc found\n");
3372
3373         return 0;
3374 }
3375
3376 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3377 {
3378         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3379         struct drm_device *dev = &dev_priv->drm;
3380         struct intel_encoder *intel_encoder;
3381         struct intel_digital_port *intel_dig_port;
3382         struct drm_connector *connector;
3383         struct drm_connector_list_iter conn_iter;
3384
3385         drm_connector_list_iter_begin(dev, &conn_iter);
3386         drm_for_each_connector_iter(connector, &conn_iter) {
3387                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3388                         continue;
3389
3390                 intel_encoder = intel_attached_encoder(connector);
3391                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3392                         continue;
3393
3394                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3395                 if (!intel_dig_port->dp.can_mst)
3396                         continue;
3397
3398                 seq_printf(m, "MST Source Port %c\n",
3399                            port_name(intel_dig_port->base.port));
3400                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3401         }
3402         drm_connector_list_iter_end(&conn_iter);
3403
3404         return 0;
3405 }
3406
3407 static ssize_t i915_displayport_test_active_write(struct file *file,
3408                                                   const char __user *ubuf,
3409                                                   size_t len, loff_t *offp)
3410 {
3411         char *input_buffer;
3412         int status = 0;
3413         struct drm_device *dev;
3414         struct drm_connector *connector;
3415         struct drm_connector_list_iter conn_iter;
3416         struct intel_dp *intel_dp;
3417         int val = 0;
3418
3419         dev = ((struct seq_file *)file->private_data)->private;
3420
3421         if (len == 0)
3422                 return 0;
3423
3424         input_buffer = memdup_user_nul(ubuf, len);
3425         if (IS_ERR(input_buffer))
3426                 return PTR_ERR(input_buffer);
3427
3428         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3429
3430         drm_connector_list_iter_begin(dev, &conn_iter);
3431         drm_for_each_connector_iter(connector, &conn_iter) {
3432                 struct intel_encoder *encoder;
3433
3434                 if (connector->connector_type !=
3435                     DRM_MODE_CONNECTOR_DisplayPort)
3436                         continue;
3437
3438                 encoder = to_intel_encoder(connector->encoder);
3439                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3440                         continue;
3441
3442                 if (encoder && connector->status == connector_status_connected) {
3443                         intel_dp = enc_to_intel_dp(&encoder->base);
3444                         status = kstrtoint(input_buffer, 10, &val);
3445                         if (status < 0)
3446                                 break;
3447                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3448                         /* To prevent erroneous activation of the compliance
3449                          * testing code, only accept an actual value of 1 here
3450                          */
3451                         if (val == 1)
3452                                 intel_dp->compliance.test_active = 1;
3453                         else
3454                                 intel_dp->compliance.test_active = 0;
3455                 }
3456         }
3457         drm_connector_list_iter_end(&conn_iter);
3458         kfree(input_buffer);
3459         if (status < 0)
3460                 return status;
3461
3462         *offp += len;
3463         return len;
3464 }
3465
3466 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3467 {
3468         struct drm_i915_private *dev_priv = m->private;
3469         struct drm_device *dev = &dev_priv->drm;
3470         struct drm_connector *connector;
3471         struct drm_connector_list_iter conn_iter;
3472         struct intel_dp *intel_dp;
3473
3474         drm_connector_list_iter_begin(dev, &conn_iter);
3475         drm_for_each_connector_iter(connector, &conn_iter) {
3476                 struct intel_encoder *encoder;
3477
3478                 if (connector->connector_type !=
3479                     DRM_MODE_CONNECTOR_DisplayPort)
3480                         continue;
3481
3482                 encoder = to_intel_encoder(connector->encoder);
3483                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3484                         continue;
3485
3486                 if (encoder && connector->status == connector_status_connected) {
3487                         intel_dp = enc_to_intel_dp(&encoder->base);
3488                         if (intel_dp->compliance.test_active)
3489                                 seq_puts(m, "1");
3490                         else
3491                                 seq_puts(m, "0");
3492                 } else
3493                         seq_puts(m, "0");
3494         }
3495         drm_connector_list_iter_end(&conn_iter);
3496
3497         return 0;
3498 }
3499
3500 static int i915_displayport_test_active_open(struct inode *inode,
3501                                              struct file *file)
3502 {
3503         return single_open(file, i915_displayport_test_active_show,
3504                            inode->i_private);
3505 }
3506
3507 static const struct file_operations i915_displayport_test_active_fops = {
3508         .owner = THIS_MODULE,
3509         .open = i915_displayport_test_active_open,
3510         .read = seq_read,
3511         .llseek = seq_lseek,
3512         .release = single_release,
3513         .write = i915_displayport_test_active_write
3514 };
3515
3516 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3517 {
3518         struct drm_i915_private *dev_priv = m->private;
3519         struct drm_device *dev = &dev_priv->drm;
3520         struct drm_connector *connector;
3521         struct drm_connector_list_iter conn_iter;
3522         struct intel_dp *intel_dp;
3523
3524         drm_connector_list_iter_begin(dev, &conn_iter);
3525         drm_for_each_connector_iter(connector, &conn_iter) {
3526                 struct intel_encoder *encoder;
3527
3528                 if (connector->connector_type !=
3529                     DRM_MODE_CONNECTOR_DisplayPort)
3530                         continue;
3531
3532                 encoder = to_intel_encoder(connector->encoder);
3533                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3534                         continue;
3535
3536                 if (encoder && connector->status == connector_status_connected) {
3537                         intel_dp = enc_to_intel_dp(&encoder->base);
3538                         if (intel_dp->compliance.test_type ==
3539                             DP_TEST_LINK_EDID_READ)
3540                                 seq_printf(m, "%lx",
3541                                            intel_dp->compliance.test_data.edid);
3542                         else if (intel_dp->compliance.test_type ==
3543                                  DP_TEST_LINK_VIDEO_PATTERN) {
3544                                 seq_printf(m, "hdisplay: %d\n",
3545                                            intel_dp->compliance.test_data.hdisplay);
3546                                 seq_printf(m, "vdisplay: %d\n",
3547                                            intel_dp->compliance.test_data.vdisplay);
3548                                 seq_printf(m, "bpc: %u\n",
3549                                            intel_dp->compliance.test_data.bpc);
3550                         }
3551                 } else
3552                         seq_puts(m, "0");
3553         }
3554         drm_connector_list_iter_end(&conn_iter);
3555
3556         return 0;
3557 }
3558 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3559
3560 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3561 {
3562         struct drm_i915_private *dev_priv = m->private;
3563         struct drm_device *dev = &dev_priv->drm;
3564         struct drm_connector *connector;
3565         struct drm_connector_list_iter conn_iter;
3566         struct intel_dp *intel_dp;
3567
3568         drm_connector_list_iter_begin(dev, &conn_iter);
3569         drm_for_each_connector_iter(connector, &conn_iter) {
3570                 struct intel_encoder *encoder;
3571
3572                 if (connector->connector_type !=
3573                     DRM_MODE_CONNECTOR_DisplayPort)
3574                         continue;
3575
3576                 encoder = to_intel_encoder(connector->encoder);
3577                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3578                         continue;
3579
3580                 if (encoder && connector->status == connector_status_connected) {
3581                         intel_dp = enc_to_intel_dp(&encoder->base);
3582                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3583                 } else
3584                         seq_puts(m, "0");
3585         }
3586         drm_connector_list_iter_end(&conn_iter);
3587
3588         return 0;
3589 }
3590 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3591
3592 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3593 {
3594         struct drm_i915_private *dev_priv = m->private;
3595         struct drm_device *dev = &dev_priv->drm;
3596         int level;
3597         int num_levels;
3598
3599         if (IS_CHERRYVIEW(dev_priv))
3600                 num_levels = 3;
3601         else if (IS_VALLEYVIEW(dev_priv))
3602                 num_levels = 1;
3603         else if (IS_G4X(dev_priv))
3604                 num_levels = 3;
3605         else
3606                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3607
3608         drm_modeset_lock_all(dev);
3609
3610         for (level = 0; level < num_levels; level++) {
3611                 unsigned int latency = wm[level];
3612
3613                 /*
3614                  * - WM1+ latency values in 0.5us units
3615                  * - latencies are in us on gen9/vlv/chv
3616                  */
3617                 if (INTEL_GEN(dev_priv) >= 9 ||
3618                     IS_VALLEYVIEW(dev_priv) ||
3619                     IS_CHERRYVIEW(dev_priv) ||
3620                     IS_G4X(dev_priv))
3621                         latency *= 10;
3622                 else if (level > 0)
3623                         latency *= 5;
3624
3625                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3626                            level, wm[level], latency / 10, latency % 10);
3627         }
3628
3629         drm_modeset_unlock_all(dev);
3630 }
3631
3632 static int pri_wm_latency_show(struct seq_file *m, void *data)
3633 {
3634         struct drm_i915_private *dev_priv = m->private;
3635         const u16 *latencies;
3636
3637         if (INTEL_GEN(dev_priv) >= 9)
3638                 latencies = dev_priv->wm.skl_latency;
3639         else
3640                 latencies = dev_priv->wm.pri_latency;
3641
3642         wm_latency_show(m, latencies);
3643
3644         return 0;
3645 }
3646
3647 static int spr_wm_latency_show(struct seq_file *m, void *data)
3648 {
3649         struct drm_i915_private *dev_priv = m->private;
3650         const u16 *latencies;
3651
3652         if (INTEL_GEN(dev_priv) >= 9)
3653                 latencies = dev_priv->wm.skl_latency;
3654         else
3655                 latencies = dev_priv->wm.spr_latency;
3656
3657         wm_latency_show(m, latencies);
3658
3659         return 0;
3660 }
3661
3662 static int cur_wm_latency_show(struct seq_file *m, void *data)
3663 {
3664         struct drm_i915_private *dev_priv = m->private;
3665         const u16 *latencies;
3666
3667         if (INTEL_GEN(dev_priv) >= 9)
3668                 latencies = dev_priv->wm.skl_latency;
3669         else
3670                 latencies = dev_priv->wm.cur_latency;
3671
3672         wm_latency_show(m, latencies);
3673
3674         return 0;
3675 }
3676
3677 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3678 {
3679         struct drm_i915_private *dev_priv = inode->i_private;
3680
3681         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3682                 return -ENODEV;
3683
3684         return single_open(file, pri_wm_latency_show, dev_priv);
3685 }
3686
3687 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3688 {
3689         struct drm_i915_private *dev_priv = inode->i_private;
3690
3691         if (HAS_GMCH(dev_priv))
3692                 return -ENODEV;
3693
3694         return single_open(file, spr_wm_latency_show, dev_priv);
3695 }
3696
3697 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3698 {
3699         struct drm_i915_private *dev_priv = inode->i_private;
3700
3701         if (HAS_GMCH(dev_priv))
3702                 return -ENODEV;
3703
3704         return single_open(file, cur_wm_latency_show, dev_priv);
3705 }
3706
3707 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3708                                 size_t len, loff_t *offp, u16 wm[8])
3709 {
3710         struct seq_file *m = file->private_data;
3711         struct drm_i915_private *dev_priv = m->private;
3712         struct drm_device *dev = &dev_priv->drm;
3713         u16 new[8] = { 0 };
3714         int num_levels;
3715         int level;
3716         int ret;
3717         char tmp[32];
3718
3719         if (IS_CHERRYVIEW(dev_priv))
3720                 num_levels = 3;
3721         else if (IS_VALLEYVIEW(dev_priv))
3722                 num_levels = 1;
3723         else if (IS_G4X(dev_priv))
3724                 num_levels = 3;
3725         else
3726                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3727
3728         if (len >= sizeof(tmp))
3729                 return -EINVAL;
3730
3731         if (copy_from_user(tmp, ubuf, len))
3732                 return -EFAULT;
3733
3734         tmp[len] = '\0';
3735
3736         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3737                      &new[0], &new[1], &new[2], &new[3],
3738                      &new[4], &new[5], &new[6], &new[7]);
3739         if (ret != num_levels)
3740                 return -EINVAL;
3741
3742         drm_modeset_lock_all(dev);
3743
3744         for (level = 0; level < num_levels; level++)
3745                 wm[level] = new[level];
3746
3747         drm_modeset_unlock_all(dev);
3748
3749         return len;
3750 }
3751
3752
3753 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3754                                     size_t len, loff_t *offp)
3755 {
3756         struct seq_file *m = file->private_data;
3757         struct drm_i915_private *dev_priv = m->private;
3758         u16 *latencies;
3759
3760         if (INTEL_GEN(dev_priv) >= 9)
3761                 latencies = dev_priv->wm.skl_latency;
3762         else
3763                 latencies = dev_priv->wm.pri_latency;
3764
3765         return wm_latency_write(file, ubuf, len, offp, latencies);
3766 }
3767
3768 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3769                                     size_t len, loff_t *offp)
3770 {
3771         struct seq_file *m = file->private_data;
3772         struct drm_i915_private *dev_priv = m->private;
3773         u16 *latencies;
3774
3775         if (INTEL_GEN(dev_priv) >= 9)
3776                 latencies = dev_priv->wm.skl_latency;
3777         else
3778                 latencies = dev_priv->wm.spr_latency;
3779
3780         return wm_latency_write(file, ubuf, len, offp, latencies);
3781 }
3782
3783 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3784                                     size_t len, loff_t *offp)
3785 {
3786         struct seq_file *m = file->private_data;
3787         struct drm_i915_private *dev_priv = m->private;
3788         u16 *latencies;
3789
3790         if (INTEL_GEN(dev_priv) >= 9)
3791                 latencies = dev_priv->wm.skl_latency;
3792         else
3793                 latencies = dev_priv->wm.cur_latency;
3794
3795         return wm_latency_write(file, ubuf, len, offp, latencies);
3796 }
3797
3798 static const struct file_operations i915_pri_wm_latency_fops = {
3799         .owner = THIS_MODULE,
3800         .open = pri_wm_latency_open,
3801         .read = seq_read,
3802         .llseek = seq_lseek,
3803         .release = single_release,
3804         .write = pri_wm_latency_write
3805 };
3806
3807 static const struct file_operations i915_spr_wm_latency_fops = {
3808         .owner = THIS_MODULE,
3809         .open = spr_wm_latency_open,
3810         .read = seq_read,
3811         .llseek = seq_lseek,
3812         .release = single_release,
3813         .write = spr_wm_latency_write
3814 };
3815
3816 static const struct file_operations i915_cur_wm_latency_fops = {
3817         .owner = THIS_MODULE,
3818         .open = cur_wm_latency_open,
3819         .read = seq_read,
3820         .llseek = seq_lseek,
3821         .release = single_release,
3822         .write = cur_wm_latency_write
3823 };
3824
3825 static int
3826 i915_wedged_get(void *data, u64 *val)
3827 {
3828         int ret = i915_terminally_wedged(data);
3829
3830         switch (ret) {
3831         case -EIO:
3832                 *val = 1;
3833                 return 0;
3834         case 0:
3835                 *val = 0;
3836                 return 0;
3837         default:
3838                 return ret;
3839         }
3840 }
3841
3842 static int
3843 i915_wedged_set(void *data, u64 val)
3844 {
3845         struct drm_i915_private *i915 = data;
3846
3847         /* Flush any previous reset before applying for a new one */
3848         wait_event(i915->gpu_error.reset_queue,
3849                    !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3850
3851         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3852                           "Manually set wedged engine mask = %llx", val);
3853         return 0;
3854 }
3855
3856 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3857                         i915_wedged_get, i915_wedged_set,
3858                         "%llu\n");
3859
3860 #define DROP_UNBOUND    BIT(0)
3861 #define DROP_BOUND      BIT(1)
3862 #define DROP_RETIRE     BIT(2)
3863 #define DROP_ACTIVE     BIT(3)
3864 #define DROP_FREED      BIT(4)
3865 #define DROP_SHRINK_ALL BIT(5)
3866 #define DROP_IDLE       BIT(6)
3867 #define DROP_RESET_ACTIVE       BIT(7)
3868 #define DROP_RESET_SEQNO        BIT(8)
3869 #define DROP_ALL (DROP_UNBOUND  | \
3870                   DROP_BOUND    | \
3871                   DROP_RETIRE   | \
3872                   DROP_ACTIVE   | \
3873                   DROP_FREED    | \
3874                   DROP_SHRINK_ALL |\
3875                   DROP_IDLE     | \
3876                   DROP_RESET_ACTIVE | \
3877                   DROP_RESET_SEQNO)
3878 static int
3879 i915_drop_caches_get(void *data, u64 *val)
3880 {
3881         *val = DROP_ALL;
3882
3883         return 0;
3884 }
3885
3886 static int
3887 i915_drop_caches_set(void *data, u64 val)
3888 {
3889         struct drm_i915_private *i915 = data;
3890
3891         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3892                   val, val & DROP_ALL);
3893
3894         if (val & DROP_RESET_ACTIVE &&
3895             wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3896                 i915_gem_set_wedged(i915);
3897
3898         /* No need to check and wait for gpu resets, only libdrm auto-restarts
3899          * on ioctls on -EAGAIN. */
3900         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3901                 int ret;
3902
3903                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3904                 if (ret)
3905                         return ret;
3906
3907                 if (val & DROP_ACTIVE)
3908                         ret = i915_gem_wait_for_idle(i915,
3909                                                      I915_WAIT_INTERRUPTIBLE |
3910                                                      I915_WAIT_LOCKED,
3911                                                      MAX_SCHEDULE_TIMEOUT);
3912
3913                 if (val & DROP_RETIRE)
3914                         i915_retire_requests(i915);
3915
3916                 mutex_unlock(&i915->drm.struct_mutex);
3917         }
3918
3919         if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3920                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3921
3922         fs_reclaim_acquire(GFP_KERNEL);
3923         if (val & DROP_BOUND)
3924                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3925
3926         if (val & DROP_UNBOUND)
3927                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3928
3929         if (val & DROP_SHRINK_ALL)
3930                 i915_gem_shrink_all(i915);
3931         fs_reclaim_release(GFP_KERNEL);
3932
3933         if (val & DROP_IDLE) {
3934                 do {
3935                         if (READ_ONCE(i915->gt.active_requests))
3936                                 flush_delayed_work(&i915->gt.retire_work);
3937                         drain_delayed_work(&i915->gt.idle_work);
3938                 } while (READ_ONCE(i915->gt.awake));
3939         }
3940
3941         if (val & DROP_FREED)
3942                 i915_gem_drain_freed_objects(i915);
3943
3944         return 0;
3945 }
3946
3947 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3948                         i915_drop_caches_get, i915_drop_caches_set,
3949                         "0x%08llx\n");
3950
3951 static int
3952 i915_cache_sharing_get(void *data, u64 *val)
3953 {
3954         struct drm_i915_private *dev_priv = data;
3955         intel_wakeref_t wakeref;
3956         u32 snpcr = 0;
3957
3958         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3959                 return -ENODEV;
3960
3961         with_intel_runtime_pm(dev_priv, wakeref)
3962                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3963
3964         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3965
3966         return 0;
3967 }
3968
3969 static int
3970 i915_cache_sharing_set(void *data, u64 val)
3971 {
3972         struct drm_i915_private *dev_priv = data;
3973         intel_wakeref_t wakeref;
3974
3975         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3976                 return -ENODEV;
3977
3978         if (val > 3)
3979                 return -EINVAL;
3980
3981         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3982         with_intel_runtime_pm(dev_priv, wakeref) {
3983                 u32 snpcr;
3984
3985                 /* Update the cache sharing policy here as well */
3986                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3987                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3988                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3989                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3990         }
3991
3992         return 0;
3993 }
3994
3995 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3996                         i915_cache_sharing_get, i915_cache_sharing_set,
3997                         "%llu\n");
3998
3999 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4000                                           struct sseu_dev_info *sseu)
4001 {
4002 #define SS_MAX 2
4003         const int ss_max = SS_MAX;
4004         u32 sig1[SS_MAX], sig2[SS_MAX];
4005         int ss;
4006
4007         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4008         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4009         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4010         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4011
4012         for (ss = 0; ss < ss_max; ss++) {
4013                 unsigned int eu_cnt;
4014
4015                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4016                         /* skip disabled subslice */
4017                         continue;
4018
4019                 sseu->slice_mask = BIT(0);
4020                 sseu->subslice_mask[0] |= BIT(ss);
4021                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4022                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4023                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4024                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4025                 sseu->eu_total += eu_cnt;
4026                 sseu->eu_per_subslice = max_t(unsigned int,
4027                                               sseu->eu_per_subslice, eu_cnt);
4028         }
4029 #undef SS_MAX
4030 }
4031
4032 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4033                                      struct sseu_dev_info *sseu)
4034 {
4035 #define SS_MAX 6
4036         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4037         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4038         int s, ss;
4039
4040         for (s = 0; s < info->sseu.max_slices; s++) {
4041                 /*
4042                  * FIXME: Valid SS Mask respects the spec and read
4043                  * only valid bits for those registers, excluding reserved
4044                  * although this seems wrong because it would leave many
4045                  * subslices without ACK.
4046                  */
4047                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4048                         GEN10_PGCTL_VALID_SS_MASK(s);
4049                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4050                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4051         }
4052
4053         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4054                      GEN9_PGCTL_SSA_EU19_ACK |
4055                      GEN9_PGCTL_SSA_EU210_ACK |
4056                      GEN9_PGCTL_SSA_EU311_ACK;
4057         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4058                      GEN9_PGCTL_SSB_EU19_ACK |
4059                      GEN9_PGCTL_SSB_EU210_ACK |
4060                      GEN9_PGCTL_SSB_EU311_ACK;
4061
4062         for (s = 0; s < info->sseu.max_slices; s++) {
4063                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4064                         /* skip disabled slice */
4065                         continue;
4066
4067                 sseu->slice_mask |= BIT(s);
4068                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4069
4070                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4071                         unsigned int eu_cnt;
4072
4073                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4074                                 /* skip disabled subslice */
4075                                 continue;
4076
4077                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4078                                                eu_mask[ss % 2]);
4079                         sseu->eu_total += eu_cnt;
4080                         sseu->eu_per_subslice = max_t(unsigned int,
4081                                                       sseu->eu_per_subslice,
4082                                                       eu_cnt);
4083                 }
4084         }
4085 #undef SS_MAX
4086 }
4087
4088 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4089                                     struct sseu_dev_info *sseu)
4090 {
4091 #define SS_MAX 3
4092         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4093         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4094         int s, ss;
4095
4096         for (s = 0; s < info->sseu.max_slices; s++) {
4097                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4098                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4099                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4100         }
4101
4102         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4103                      GEN9_PGCTL_SSA_EU19_ACK |
4104                      GEN9_PGCTL_SSA_EU210_ACK |
4105                      GEN9_PGCTL_SSA_EU311_ACK;
4106         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4107                      GEN9_PGCTL_SSB_EU19_ACK |
4108                      GEN9_PGCTL_SSB_EU210_ACK |
4109                      GEN9_PGCTL_SSB_EU311_ACK;
4110
4111         for (s = 0; s < info->sseu.max_slices; s++) {
4112                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4113                         /* skip disabled slice */
4114                         continue;
4115
4116                 sseu->slice_mask |= BIT(s);
4117
4118                 if (IS_GEN9_BC(dev_priv))
4119                         sseu->subslice_mask[s] =
4120                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4121
4122                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4123                         unsigned int eu_cnt;
4124
4125                         if (IS_GEN9_LP(dev_priv)) {
4126                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4127                                         /* skip disabled subslice */
4128                                         continue;
4129
4130                                 sseu->subslice_mask[s] |= BIT(ss);
4131                         }
4132
4133                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4134                                                eu_mask[ss%2]);
4135                         sseu->eu_total += eu_cnt;
4136                         sseu->eu_per_subslice = max_t(unsigned int,
4137                                                       sseu->eu_per_subslice,
4138                                                       eu_cnt);
4139                 }
4140         }
4141 #undef SS_MAX
4142 }
4143
4144 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4145                                          struct sseu_dev_info *sseu)
4146 {
4147         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4148         int s;
4149
4150         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4151
4152         if (sseu->slice_mask) {
4153                 sseu->eu_per_subslice =
4154                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4155                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4156                         sseu->subslice_mask[s] =
4157                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4158                 }
4159                 sseu->eu_total = sseu->eu_per_subslice *
4160                                  sseu_subslice_total(sseu);
4161
4162                 /* subtract fused off EU(s) from enabled slice(s) */
4163                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4164                         u8 subslice_7eu =
4165                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4166
4167                         sseu->eu_total -= hweight8(subslice_7eu);
4168                 }
4169         }
4170 }
4171
4172 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4173                                  const struct sseu_dev_info *sseu)
4174 {
4175         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4176         const char *type = is_available_info ? "Available" : "Enabled";
4177         int s;
4178
4179         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4180                    sseu->slice_mask);
4181         seq_printf(m, "  %s Slice Total: %u\n", type,
4182                    hweight8(sseu->slice_mask));
4183         seq_printf(m, "  %s Subslice Total: %u\n", type,
4184                    sseu_subslice_total(sseu));
4185         for (s = 0; s < fls(sseu->slice_mask); s++) {
4186                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4187                            s, hweight8(sseu->subslice_mask[s]));
4188         }
4189         seq_printf(m, "  %s EU Total: %u\n", type,
4190                    sseu->eu_total);
4191         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4192                    sseu->eu_per_subslice);
4193
4194         if (!is_available_info)
4195                 return;
4196
4197         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4198         if (HAS_POOLED_EU(dev_priv))
4199                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4200
4201         seq_printf(m, "  Has Slice Power Gating: %s\n",
4202                    yesno(sseu->has_slice_pg));
4203         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4204                    yesno(sseu->has_subslice_pg));
4205         seq_printf(m, "  Has EU Power Gating: %s\n",
4206                    yesno(sseu->has_eu_pg));
4207 }
4208
4209 static int i915_sseu_status(struct seq_file *m, void *unused)
4210 {
4211         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4212         struct sseu_dev_info sseu;
4213         intel_wakeref_t wakeref;
4214
4215         if (INTEL_GEN(dev_priv) < 8)
4216                 return -ENODEV;
4217
4218         seq_puts(m, "SSEU Device Info\n");
4219         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4220
4221         seq_puts(m, "SSEU Device Status\n");
4222         memset(&sseu, 0, sizeof(sseu));
4223         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4224         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4225         sseu.max_eus_per_subslice =
4226                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4227
4228         with_intel_runtime_pm(dev_priv, wakeref) {
4229                 if (IS_CHERRYVIEW(dev_priv))
4230                         cherryview_sseu_device_status(dev_priv, &sseu);
4231                 else if (IS_BROADWELL(dev_priv))
4232                         broadwell_sseu_device_status(dev_priv, &sseu);
4233                 else if (IS_GEN(dev_priv, 9))
4234                         gen9_sseu_device_status(dev_priv, &sseu);
4235                 else if (INTEL_GEN(dev_priv) >= 10)
4236                         gen10_sseu_device_status(dev_priv, &sseu);
4237         }
4238
4239         i915_print_sseu_info(m, false, &sseu);
4240
4241         return 0;
4242 }
4243
4244 static int i915_forcewake_open(struct inode *inode, struct file *file)
4245 {
4246         struct drm_i915_private *i915 = inode->i_private;
4247
4248         if (INTEL_GEN(i915) < 6)
4249                 return 0;
4250
4251         file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4252         intel_uncore_forcewake_user_get(&i915->uncore);
4253
4254         return 0;
4255 }
4256
4257 static int i915_forcewake_release(struct inode *inode, struct file *file)
4258 {
4259         struct drm_i915_private *i915 = inode->i_private;
4260
4261         if (INTEL_GEN(i915) < 6)
4262                 return 0;
4263
4264         intel_uncore_forcewake_user_put(&i915->uncore);
4265         intel_runtime_pm_put(i915,
4266                              (intel_wakeref_t)(uintptr_t)file->private_data);
4267
4268         return 0;
4269 }
4270
4271 static const struct file_operations i915_forcewake_fops = {
4272         .owner = THIS_MODULE,
4273         .open = i915_forcewake_open,
4274         .release = i915_forcewake_release,
4275 };
4276
4277 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4278 {
4279         struct drm_i915_private *dev_priv = m->private;
4280         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4281
4282         /* Synchronize with everything first in case there's been an HPD
4283          * storm, but we haven't finished handling it in the kernel yet
4284          */
4285         synchronize_irq(dev_priv->drm.irq);
4286         flush_work(&dev_priv->hotplug.dig_port_work);
4287         flush_work(&dev_priv->hotplug.hotplug_work);
4288
4289         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4290         seq_printf(m, "Detected: %s\n",
4291                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4292
4293         return 0;
4294 }
4295
4296 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4297                                         const char __user *ubuf, size_t len,
4298                                         loff_t *offp)
4299 {
4300         struct seq_file *m = file->private_data;
4301         struct drm_i915_private *dev_priv = m->private;
4302         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4303         unsigned int new_threshold;
4304         int i;
4305         char *newline;
4306         char tmp[16];
4307
4308         if (len >= sizeof(tmp))
4309                 return -EINVAL;
4310
4311         if (copy_from_user(tmp, ubuf, len))
4312                 return -EFAULT;
4313
4314         tmp[len] = '\0';
4315
4316         /* Strip newline, if any */
4317         newline = strchr(tmp, '\n');
4318         if (newline)
4319                 *newline = '\0';
4320
4321         if (strcmp(tmp, "reset") == 0)
4322                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4323         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4324                 return -EINVAL;
4325
4326         if (new_threshold > 0)
4327                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4328                               new_threshold);
4329         else
4330                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4331
4332         spin_lock_irq(&dev_priv->irq_lock);
4333         hotplug->hpd_storm_threshold = new_threshold;
4334         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4335         for_each_hpd_pin(i)
4336                 hotplug->stats[i].count = 0;
4337         spin_unlock_irq(&dev_priv->irq_lock);
4338
4339         /* Re-enable hpd immediately if we were in an irq storm */
4340         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4341
4342         return len;
4343 }
4344
4345 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4346 {
4347         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4348 }
4349
4350 static const struct file_operations i915_hpd_storm_ctl_fops = {
4351         .owner = THIS_MODULE,
4352         .open = i915_hpd_storm_ctl_open,
4353         .read = seq_read,
4354         .llseek = seq_lseek,
4355         .release = single_release,
4356         .write = i915_hpd_storm_ctl_write
4357 };
4358
4359 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4360 {
4361         struct drm_i915_private *dev_priv = m->private;
4362
4363         seq_printf(m, "Enabled: %s\n",
4364                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4365
4366         return 0;
4367 }
4368
4369 static int
4370 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4371 {
4372         return single_open(file, i915_hpd_short_storm_ctl_show,
4373                            inode->i_private);
4374 }
4375
4376 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4377                                               const char __user *ubuf,
4378                                               size_t len, loff_t *offp)
4379 {
4380         struct seq_file *m = file->private_data;
4381         struct drm_i915_private *dev_priv = m->private;
4382         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4383         char *newline;
4384         char tmp[16];
4385         int i;
4386         bool new_state;
4387
4388         if (len >= sizeof(tmp))
4389                 return -EINVAL;
4390
4391         if (copy_from_user(tmp, ubuf, len))
4392                 return -EFAULT;
4393
4394         tmp[len] = '\0';
4395
4396         /* Strip newline, if any */
4397         newline = strchr(tmp, '\n');
4398         if (newline)
4399                 *newline = '\0';
4400
4401         /* Reset to the "default" state for this system */
4402         if (strcmp(tmp, "reset") == 0)
4403                 new_state = !HAS_DP_MST(dev_priv);
4404         else if (kstrtobool(tmp, &new_state) != 0)
4405                 return -EINVAL;
4406
4407         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4408                       new_state ? "En" : "Dis");
4409
4410         spin_lock_irq(&dev_priv->irq_lock);
4411         hotplug->hpd_short_storm_enabled = new_state;
4412         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4413         for_each_hpd_pin(i)
4414                 hotplug->stats[i].count = 0;
4415         spin_unlock_irq(&dev_priv->irq_lock);
4416
4417         /* Re-enable hpd immediately if we were in an irq storm */
4418         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4419
4420         return len;
4421 }
4422
4423 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4424         .owner = THIS_MODULE,
4425         .open = i915_hpd_short_storm_ctl_open,
4426         .read = seq_read,
4427         .llseek = seq_lseek,
4428         .release = single_release,
4429         .write = i915_hpd_short_storm_ctl_write,
4430 };
4431
4432 static int i915_drrs_ctl_set(void *data, u64 val)
4433 {
4434         struct drm_i915_private *dev_priv = data;
4435         struct drm_device *dev = &dev_priv->drm;
4436         struct intel_crtc *crtc;
4437
4438         if (INTEL_GEN(dev_priv) < 7)
4439                 return -ENODEV;
4440
4441         for_each_intel_crtc(dev, crtc) {
4442                 struct drm_connector_list_iter conn_iter;
4443                 struct intel_crtc_state *crtc_state;
4444                 struct drm_connector *connector;
4445                 struct drm_crtc_commit *commit;
4446                 int ret;
4447
4448                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4449                 if (ret)
4450                         return ret;
4451
4452                 crtc_state = to_intel_crtc_state(crtc->base.state);
4453
4454                 if (!crtc_state->base.active ||
4455                     !crtc_state->has_drrs)
4456                         goto out;
4457
4458                 commit = crtc_state->base.commit;
4459                 if (commit) {
4460                         ret = wait_for_completion_interruptible(&commit->hw_done);
4461                         if (ret)
4462                                 goto out;
4463                 }
4464
4465                 drm_connector_list_iter_begin(dev, &conn_iter);
4466                 drm_for_each_connector_iter(connector, &conn_iter) {
4467                         struct intel_encoder *encoder;
4468                         struct intel_dp *intel_dp;
4469
4470                         if (!(crtc_state->base.connector_mask &
4471                               drm_connector_mask(connector)))
4472                                 continue;
4473
4474                         encoder = intel_attached_encoder(connector);
4475                         if (encoder->type != INTEL_OUTPUT_EDP)
4476                                 continue;
4477
4478                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4479                                                 val ? "en" : "dis", val);
4480
4481                         intel_dp = enc_to_intel_dp(&encoder->base);
4482                         if (val)
4483                                 intel_edp_drrs_enable(intel_dp,
4484                                                       crtc_state);
4485                         else
4486                                 intel_edp_drrs_disable(intel_dp,
4487                                                        crtc_state);
4488                 }
4489                 drm_connector_list_iter_end(&conn_iter);
4490
4491 out:
4492                 drm_modeset_unlock(&crtc->base.mutex);
4493                 if (ret)
4494                         return ret;
4495         }
4496
4497         return 0;
4498 }
4499
4500 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4501
4502 static ssize_t
4503 i915_fifo_underrun_reset_write(struct file *filp,
4504                                const char __user *ubuf,
4505                                size_t cnt, loff_t *ppos)
4506 {
4507         struct drm_i915_private *dev_priv = filp->private_data;
4508         struct intel_crtc *intel_crtc;
4509         struct drm_device *dev = &dev_priv->drm;
4510         int ret;
4511         bool reset;
4512
4513         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4514         if (ret)
4515                 return ret;
4516
4517         if (!reset)
4518                 return cnt;
4519
4520         for_each_intel_crtc(dev, intel_crtc) {
4521                 struct drm_crtc_commit *commit;
4522                 struct intel_crtc_state *crtc_state;
4523
4524                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4525                 if (ret)
4526                         return ret;
4527
4528                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4529                 commit = crtc_state->base.commit;
4530                 if (commit) {
4531                         ret = wait_for_completion_interruptible(&commit->hw_done);
4532                         if (!ret)
4533                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4534                 }
4535
4536                 if (!ret && crtc_state->base.active) {
4537                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4538                                       pipe_name(intel_crtc->pipe));
4539
4540                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4541                 }
4542
4543                 drm_modeset_unlock(&intel_crtc->base.mutex);
4544
4545                 if (ret)
4546                         return ret;
4547         }
4548
4549         ret = intel_fbc_reset_underrun(dev_priv);
4550         if (ret)
4551                 return ret;
4552
4553         return cnt;
4554 }
4555
4556 static const struct file_operations i915_fifo_underrun_reset_ops = {
4557         .owner = THIS_MODULE,
4558         .open = simple_open,
4559         .write = i915_fifo_underrun_reset_write,
4560         .llseek = default_llseek,
4561 };
4562
4563 static const struct drm_info_list i915_debugfs_list[] = {
4564         {"i915_capabilities", i915_capabilities, 0},
4565         {"i915_gem_objects", i915_gem_object_info, 0},
4566         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4567         {"i915_gem_stolen", i915_gem_stolen_list_info },
4568         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4569         {"i915_gem_interrupt", i915_interrupt_info, 0},
4570         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4571         {"i915_guc_info", i915_guc_info, 0},
4572         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4573         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4574         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4575         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4576         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4577         {"i915_frequency_info", i915_frequency_info, 0},
4578         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4579         {"i915_reset_info", i915_reset_info, 0},
4580         {"i915_drpc_info", i915_drpc_info, 0},
4581         {"i915_emon_status", i915_emon_status, 0},
4582         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4583         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4584         {"i915_fbc_status", i915_fbc_status, 0},
4585         {"i915_ips_status", i915_ips_status, 0},
4586         {"i915_sr_status", i915_sr_status, 0},
4587         {"i915_opregion", i915_opregion, 0},
4588         {"i915_vbt", i915_vbt, 0},
4589         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4590         {"i915_context_status", i915_context_status, 0},
4591         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4592         {"i915_swizzle_info", i915_swizzle_info, 0},
4593         {"i915_llc", i915_llc, 0},
4594         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4595         {"i915_energy_uJ", i915_energy_uJ, 0},
4596         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4597         {"i915_power_domain_info", i915_power_domain_info, 0},
4598         {"i915_dmc_info", i915_dmc_info, 0},
4599         {"i915_display_info", i915_display_info, 0},
4600         {"i915_engine_info", i915_engine_info, 0},
4601         {"i915_rcs_topology", i915_rcs_topology, 0},
4602         {"i915_shrinker_info", i915_shrinker_info, 0},
4603         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4604         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4605         {"i915_wa_registers", i915_wa_registers, 0},
4606         {"i915_ddb_info", i915_ddb_info, 0},
4607         {"i915_sseu_status", i915_sseu_status, 0},
4608         {"i915_drrs_status", i915_drrs_status, 0},
4609         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4610 };
4611 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4612
4613 static const struct i915_debugfs_files {
4614         const char *name;
4615         const struct file_operations *fops;
4616 } i915_debugfs_files[] = {
4617         {"i915_wedged", &i915_wedged_fops},
4618         {"i915_cache_sharing", &i915_cache_sharing_fops},
4619         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4620 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4621         {"i915_error_state", &i915_error_state_fops},
4622         {"i915_gpu_info", &i915_gpu_info_fops},
4623 #endif
4624         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4625         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4626         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4627         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4628         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4629         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4630         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4631         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4632         {"i915_guc_log_level", &i915_guc_log_level_fops},
4633         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4634         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4635         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4636         {"i915_ipc_status", &i915_ipc_status_fops},
4637         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4638         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4639 };
4640
4641 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4642 {
4643         struct drm_minor *minor = dev_priv->drm.primary;
4644         struct dentry *ent;
4645         int i;
4646
4647         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4648                                   minor->debugfs_root, to_i915(minor->dev),
4649                                   &i915_forcewake_fops);
4650         if (!ent)
4651                 return -ENOMEM;
4652
4653         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4654                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4655                                           S_IRUGO | S_IWUSR,
4656                                           minor->debugfs_root,
4657                                           to_i915(minor->dev),
4658                                           i915_debugfs_files[i].fops);
4659                 if (!ent)
4660                         return -ENOMEM;
4661         }
4662
4663         return drm_debugfs_create_files(i915_debugfs_list,
4664                                         I915_DEBUGFS_ENTRIES,
4665                                         minor->debugfs_root, minor);
4666 }
4667
4668 struct dpcd_block {
4669         /* DPCD dump start address. */
4670         unsigned int offset;
4671         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4672         unsigned int end;
4673         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4674         size_t size;
4675         /* Only valid for eDP. */
4676         bool edp;
4677 };
4678
4679 static const struct dpcd_block i915_dpcd_debug[] = {
4680         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4681         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4682         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4683         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4684         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4685         { .offset = DP_SET_POWER },
4686         { .offset = DP_EDP_DPCD_REV },
4687         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4688         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4689         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4690 };
4691
4692 static int i915_dpcd_show(struct seq_file *m, void *data)
4693 {
4694         struct drm_connector *connector = m->private;
4695         struct intel_dp *intel_dp =
4696                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4697         u8 buf[16];
4698         ssize_t err;
4699         int i;
4700
4701         if (connector->status != connector_status_connected)
4702                 return -ENODEV;
4703
4704         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4705                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4706                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4707
4708                 if (b->edp &&
4709                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4710                         continue;
4711
4712                 /* low tech for now */
4713                 if (WARN_ON(size > sizeof(buf)))
4714                         continue;
4715
4716                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4717                 if (err < 0)
4718                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4719                 else
4720                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4721         }
4722
4723         return 0;
4724 }
4725 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4726
4727 static int i915_panel_show(struct seq_file *m, void *data)
4728 {
4729         struct drm_connector *connector = m->private;
4730         struct intel_dp *intel_dp =
4731                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4732
4733         if (connector->status != connector_status_connected)
4734                 return -ENODEV;
4735
4736         seq_printf(m, "Panel power up delay: %d\n",
4737                    intel_dp->panel_power_up_delay);
4738         seq_printf(m, "Panel power down delay: %d\n",
4739                    intel_dp->panel_power_down_delay);
4740         seq_printf(m, "Backlight on delay: %d\n",
4741                    intel_dp->backlight_on_delay);
4742         seq_printf(m, "Backlight off delay: %d\n",
4743                    intel_dp->backlight_off_delay);
4744
4745         return 0;
4746 }
4747 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4748
4749 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4750 {
4751         struct drm_connector *connector = m->private;
4752         struct intel_connector *intel_connector = to_intel_connector(connector);
4753
4754         if (connector->status != connector_status_connected)
4755                 return -ENODEV;
4756
4757         /* HDCP is supported by connector */
4758         if (!intel_connector->hdcp.shim)
4759                 return -EINVAL;
4760
4761         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4762                    connector->base.id);
4763         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4764                    "None" : "HDCP1.4");
4765         seq_puts(m, "\n");
4766
4767         return 0;
4768 }
4769 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4770
4771 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4772 {
4773         struct drm_connector *connector = m->private;
4774         struct drm_device *dev = connector->dev;
4775         struct drm_crtc *crtc;
4776         struct intel_dp *intel_dp;
4777         struct drm_modeset_acquire_ctx ctx;
4778         struct intel_crtc_state *crtc_state = NULL;
4779         int ret = 0;
4780         bool try_again = false;
4781
4782         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4783
4784         do {
4785                 try_again = false;
4786                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4787                                        &ctx);
4788                 if (ret) {
4789                         ret = -EINTR;
4790                         break;
4791                 }
4792                 crtc = connector->state->crtc;
4793                 if (connector->status != connector_status_connected || !crtc) {
4794                         ret = -ENODEV;
4795                         break;
4796                 }
4797                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4798                 if (ret == -EDEADLK) {
4799                         ret = drm_modeset_backoff(&ctx);
4800                         if (!ret) {
4801                                 try_again = true;
4802                                 continue;
4803                         }
4804                         break;
4805                 } else if (ret) {
4806                         break;
4807                 }
4808                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4809                 crtc_state = to_intel_crtc_state(crtc->state);
4810                 seq_printf(m, "DSC_Enabled: %s\n",
4811                            yesno(crtc_state->dsc_params.compression_enable));
4812                 seq_printf(m, "DSC_Sink_Support: %s\n",
4813                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4814                 if (!intel_dp_is_edp(intel_dp))
4815                         seq_printf(m, "FEC_Sink_Support: %s\n",
4816                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4817         } while (try_again);
4818
4819         drm_modeset_drop_locks(&ctx);
4820         drm_modeset_acquire_fini(&ctx);
4821
4822         return ret;
4823 }
4824
4825 static ssize_t i915_dsc_fec_support_write(struct file *file,
4826                                           const char __user *ubuf,
4827                                           size_t len, loff_t *offp)
4828 {
4829         bool dsc_enable = false;
4830         int ret;
4831         struct drm_connector *connector =
4832                 ((struct seq_file *)file->private_data)->private;
4833         struct intel_encoder *encoder = intel_attached_encoder(connector);
4834         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4835
4836         if (len == 0)
4837                 return 0;
4838
4839         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4840                          len);
4841
4842         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4843         if (ret < 0)
4844                 return ret;
4845
4846         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4847                          (dsc_enable) ? "true" : "false");
4848         intel_dp->force_dsc_en = dsc_enable;
4849
4850         *offp += len;
4851         return len;
4852 }
4853
4854 static int i915_dsc_fec_support_open(struct inode *inode,
4855                                      struct file *file)
4856 {
4857         return single_open(file, i915_dsc_fec_support_show,
4858                            inode->i_private);
4859 }
4860
4861 static const struct file_operations i915_dsc_fec_support_fops = {
4862         .owner = THIS_MODULE,
4863         .open = i915_dsc_fec_support_open,
4864         .read = seq_read,
4865         .llseek = seq_lseek,
4866         .release = single_release,
4867         .write = i915_dsc_fec_support_write
4868 };
4869
4870 /**
4871  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4872  * @connector: pointer to a registered drm_connector
4873  *
4874  * Cleanup will be done by drm_connector_unregister() through a call to
4875  * drm_debugfs_connector_remove().
4876  *
4877  * Returns 0 on success, negative error codes on error.
4878  */
4879 int i915_debugfs_connector_add(struct drm_connector *connector)
4880 {
4881         struct dentry *root = connector->debugfs_entry;
4882         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4883
4884         /* The connector must have been registered beforehands. */
4885         if (!root)
4886                 return -ENODEV;
4887
4888         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4889             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4890                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4891                                     connector, &i915_dpcd_fops);
4892
4893         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4894                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4895                                     connector, &i915_panel_fops);
4896                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4897                                     connector, &i915_psr_sink_status_fops);
4898         }
4899
4900         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4901             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4902             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4903                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4904                                     connector, &i915_hdcp_sink_capability_fops);
4905         }
4906
4907         if (INTEL_GEN(dev_priv) >= 10 &&
4908             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4909              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4910                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4911                                     connector, &i915_dsc_fec_support_fops);
4912
4913         return 0;
4914 }