Merge branches 'clk-baikal', 'clk-broadcom', 'clk-vc5' and 'clk-versaclock' into...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 /* need to do gmc hw init early so we can allocate gpu mem */
2369                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2370                         /* Try to reserve bad pages early */
2371                         if (amdgpu_sriov_vf(adev))
2372                                 amdgpu_virt_exchange_data(adev);
2373
2374                         r = amdgpu_device_vram_scratch_init(adev);
2375                         if (r) {
2376                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2377                                 goto init_failed;
2378                         }
2379                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2380                         if (r) {
2381                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2382                                 goto init_failed;
2383                         }
2384                         r = amdgpu_device_wb_init(adev);
2385                         if (r) {
2386                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2387                                 goto init_failed;
2388                         }
2389                         adev->ip_blocks[i].status.hw = true;
2390
2391                         /* right after GMC hw init, we create CSA */
2392                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2393                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2394                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2395                                                                 AMDGPU_CSA_SIZE);
2396                                 if (r) {
2397                                         DRM_ERROR("allocate CSA failed %d\n", r);
2398                                         goto init_failed;
2399                                 }
2400                         }
2401                 }
2402         }
2403
2404         if (amdgpu_sriov_vf(adev))
2405                 amdgpu_virt_init_data_exchange(adev);
2406
2407         r = amdgpu_ib_pool_init(adev);
2408         if (r) {
2409                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2410                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2411                 goto init_failed;
2412         }
2413
2414         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2415         if (r)
2416                 goto init_failed;
2417
2418         r = amdgpu_device_ip_hw_init_phase1(adev);
2419         if (r)
2420                 goto init_failed;
2421
2422         r = amdgpu_device_fw_loading(adev);
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase2(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         /*
2431          * retired pages will be loaded from eeprom and reserved here,
2432          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2433          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2434          * for I2C communication which only true at this point.
2435          *
2436          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2437          * failure from bad gpu situation and stop amdgpu init process
2438          * accordingly. For other failed cases, it will still release all
2439          * the resource and print error message, rather than returning one
2440          * negative value to upper level.
2441          *
2442          * Note: theoretically, this should be called before all vram allocations
2443          * to protect retired page from abusing
2444          */
2445         r = amdgpu_ras_recovery_init(adev);
2446         if (r)
2447                 goto init_failed;
2448
2449         /**
2450          * In case of XGMI grab extra reference for reset domain for this device
2451          */
2452         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2453                 if (amdgpu_xgmi_add_device(adev) == 0) {
2454                         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2455
2456                         if (!hive->reset_domain ||
2457                             !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2458                                 r = -ENOENT;
2459                                 goto init_failed;
2460                         }
2461
2462                         /* Drop the early temporary reset domain we created for device */
2463                         amdgpu_reset_put_reset_domain(adev->reset_domain);
2464                         adev->reset_domain = hive->reset_domain;
2465                 }
2466         }
2467
2468         r = amdgpu_device_init_schedulers(adev);
2469         if (r)
2470                 goto init_failed;
2471
2472         /* Don't init kfd if whole hive need to be reset during init */
2473         if (!adev->gmc.xgmi.pending_reset)
2474                 amdgpu_amdkfd_device_init(adev);
2475
2476         amdgpu_fru_get_product_info(adev);
2477
2478 init_failed:
2479         if (amdgpu_sriov_vf(adev))
2480                 amdgpu_virt_release_full_gpu(adev, true);
2481
2482         return r;
2483 }
2484
2485 /**
2486  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2487  *
2488  * @adev: amdgpu_device pointer
2489  *
2490  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2491  * this function before a GPU reset.  If the value is retained after a
2492  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2493  */
2494 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2495 {
2496         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2497 }
2498
2499 /**
2500  * amdgpu_device_check_vram_lost - check if vram is valid
2501  *
2502  * @adev: amdgpu_device pointer
2503  *
2504  * Checks the reset magic value written to the gart pointer in VRAM.
2505  * The driver calls this after a GPU reset to see if the contents of
2506  * VRAM is lost or now.
2507  * returns true if vram is lost, false if not.
2508  */
2509 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2510 {
2511         if (memcmp(adev->gart.ptr, adev->reset_magic,
2512                         AMDGPU_RESET_MAGIC_NUM))
2513                 return true;
2514
2515         if (!amdgpu_in_reset(adev))
2516                 return false;
2517
2518         /*
2519          * For all ASICs with baco/mode1 reset, the VRAM is
2520          * always assumed to be lost.
2521          */
2522         switch (amdgpu_asic_reset_method(adev)) {
2523         case AMD_RESET_METHOD_BACO:
2524         case AMD_RESET_METHOD_MODE1:
2525                 return true;
2526         default:
2527                 return false;
2528         }
2529 }
2530
2531 /**
2532  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2533  *
2534  * @adev: amdgpu_device pointer
2535  * @state: clockgating state (gate or ungate)
2536  *
2537  * The list of all the hardware IPs that make up the asic is walked and the
2538  * set_clockgating_state callbacks are run.
2539  * Late initialization pass enabling clockgating for hardware IPs.
2540  * Fini or suspend, pass disabling clockgating for hardware IPs.
2541  * Returns 0 on success, negative error code on failure.
2542  */
2543
2544 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2545                                enum amd_clockgating_state state)
2546 {
2547         int i, j, r;
2548
2549         if (amdgpu_emu_mode == 1)
2550                 return 0;
2551
2552         for (j = 0; j < adev->num_ip_blocks; j++) {
2553                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2554                 if (!adev->ip_blocks[i].status.late_initialized)
2555                         continue;
2556                 /* skip CG for GFX on S0ix */
2557                 if (adev->in_s0ix &&
2558                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2559                         continue;
2560                 /* skip CG for VCE/UVD, it's handled specially */
2561                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2562                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2563                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2564                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2565                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2566                         /* enable clockgating to save power */
2567                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2568                                                                                      state);
2569                         if (r) {
2570                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2571                                           adev->ip_blocks[i].version->funcs->name, r);
2572                                 return r;
2573                         }
2574                 }
2575         }
2576
2577         return 0;
2578 }
2579
2580 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2581                                enum amd_powergating_state state)
2582 {
2583         int i, j, r;
2584
2585         if (amdgpu_emu_mode == 1)
2586                 return 0;
2587
2588         for (j = 0; j < adev->num_ip_blocks; j++) {
2589                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2590                 if (!adev->ip_blocks[i].status.late_initialized)
2591                         continue;
2592                 /* skip PG for GFX on S0ix */
2593                 if (adev->in_s0ix &&
2594                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2595                         continue;
2596                 /* skip CG for VCE/UVD, it's handled specially */
2597                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2598                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2599                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2600                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2601                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2602                         /* enable powergating to save power */
2603                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2604                                                                                         state);
2605                         if (r) {
2606                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2607                                           adev->ip_blocks[i].version->funcs->name, r);
2608                                 return r;
2609                         }
2610                 }
2611         }
2612         return 0;
2613 }
2614
2615 static int amdgpu_device_enable_mgpu_fan_boost(void)
2616 {
2617         struct amdgpu_gpu_instance *gpu_ins;
2618         struct amdgpu_device *adev;
2619         int i, ret = 0;
2620
2621         mutex_lock(&mgpu_info.mutex);
2622
2623         /*
2624          * MGPU fan boost feature should be enabled
2625          * only when there are two or more dGPUs in
2626          * the system
2627          */
2628         if (mgpu_info.num_dgpu < 2)
2629                 goto out;
2630
2631         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2632                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2633                 adev = gpu_ins->adev;
2634                 if (!(adev->flags & AMD_IS_APU) &&
2635                     !gpu_ins->mgpu_fan_enabled) {
2636                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2637                         if (ret)
2638                                 break;
2639
2640                         gpu_ins->mgpu_fan_enabled = 1;
2641                 }
2642         }
2643
2644 out:
2645         mutex_unlock(&mgpu_info.mutex);
2646
2647         return ret;
2648 }
2649
2650 /**
2651  * amdgpu_device_ip_late_init - run late init for hardware IPs
2652  *
2653  * @adev: amdgpu_device pointer
2654  *
2655  * Late initialization pass for hardware IPs.  The list of all the hardware
2656  * IPs that make up the asic is walked and the late_init callbacks are run.
2657  * late_init covers any special initialization that an IP requires
2658  * after all of the have been initialized or something that needs to happen
2659  * late in the init process.
2660  * Returns 0 on success, negative error code on failure.
2661  */
2662 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2663 {
2664         struct amdgpu_gpu_instance *gpu_instance;
2665         int i = 0, r;
2666
2667         for (i = 0; i < adev->num_ip_blocks; i++) {
2668                 if (!adev->ip_blocks[i].status.hw)
2669                         continue;
2670                 if (adev->ip_blocks[i].version->funcs->late_init) {
2671                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2672                         if (r) {
2673                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2674                                           adev->ip_blocks[i].version->funcs->name, r);
2675                                 return r;
2676                         }
2677                 }
2678                 adev->ip_blocks[i].status.late_initialized = true;
2679         }
2680
2681         r = amdgpu_ras_late_init(adev);
2682         if (r) {
2683                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2684                 return r;
2685         }
2686
2687         amdgpu_ras_set_error_query_ready(adev, true);
2688
2689         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2690         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2691
2692         amdgpu_device_fill_reset_magic(adev);
2693
2694         r = amdgpu_device_enable_mgpu_fan_boost();
2695         if (r)
2696                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2697
2698         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2699         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2700                                adev->asic_type == CHIP_ALDEBARAN ))
2701                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2702
2703         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2704                 mutex_lock(&mgpu_info.mutex);
2705
2706                 /*
2707                  * Reset device p-state to low as this was booted with high.
2708                  *
2709                  * This should be performed only after all devices from the same
2710                  * hive get initialized.
2711                  *
2712                  * However, it's unknown how many device in the hive in advance.
2713                  * As this is counted one by one during devices initializations.
2714                  *
2715                  * So, we wait for all XGMI interlinked devices initialized.
2716                  * This may bring some delays as those devices may come from
2717                  * different hives. But that should be OK.
2718                  */
2719                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2720                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2721                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2722                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2723                                         continue;
2724
2725                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2726                                                 AMDGPU_XGMI_PSTATE_MIN);
2727                                 if (r) {
2728                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2729                                         break;
2730                                 }
2731                         }
2732                 }
2733
2734                 mutex_unlock(&mgpu_info.mutex);
2735         }
2736
2737         return 0;
2738 }
2739
2740 /**
2741  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2742  *
2743  * @adev: amdgpu_device pointer
2744  *
2745  * For ASICs need to disable SMC first
2746  */
2747 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2748 {
2749         int i, r;
2750
2751         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2752                 return;
2753
2754         for (i = 0; i < adev->num_ip_blocks; i++) {
2755                 if (!adev->ip_blocks[i].status.hw)
2756                         continue;
2757                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2758                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2759                         /* XXX handle errors */
2760                         if (r) {
2761                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2762                                           adev->ip_blocks[i].version->funcs->name, r);
2763                         }
2764                         adev->ip_blocks[i].status.hw = false;
2765                         break;
2766                 }
2767         }
2768 }
2769
2770 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2771 {
2772         int i, r;
2773
2774         for (i = 0; i < adev->num_ip_blocks; i++) {
2775                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2776                         continue;
2777
2778                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2779                 if (r) {
2780                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2781                                   adev->ip_blocks[i].version->funcs->name, r);
2782                 }
2783         }
2784
2785         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2786         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2787
2788         amdgpu_amdkfd_suspend(adev, false);
2789
2790         /* Workaroud for ASICs need to disable SMC first */
2791         amdgpu_device_smu_fini_early(adev);
2792
2793         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2794                 if (!adev->ip_blocks[i].status.hw)
2795                         continue;
2796
2797                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2798                 /* XXX handle errors */
2799                 if (r) {
2800                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2801                                   adev->ip_blocks[i].version->funcs->name, r);
2802                 }
2803
2804                 adev->ip_blocks[i].status.hw = false;
2805         }
2806
2807         if (amdgpu_sriov_vf(adev)) {
2808                 if (amdgpu_virt_release_full_gpu(adev, false))
2809                         DRM_ERROR("failed to release exclusive mode on fini\n");
2810         }
2811
2812         return 0;
2813 }
2814
2815 /**
2816  * amdgpu_device_ip_fini - run fini for hardware IPs
2817  *
2818  * @adev: amdgpu_device pointer
2819  *
2820  * Main teardown pass for hardware IPs.  The list of all the hardware
2821  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2822  * are run.  hw_fini tears down the hardware associated with each IP
2823  * and sw_fini tears down any software state associated with each IP.
2824  * Returns 0 on success, negative error code on failure.
2825  */
2826 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2827 {
2828         int i, r;
2829
2830         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2831                 amdgpu_virt_release_ras_err_handler_data(adev);
2832
2833         if (adev->gmc.xgmi.num_physical_nodes > 1)
2834                 amdgpu_xgmi_remove_device(adev);
2835
2836         amdgpu_amdkfd_device_fini_sw(adev);
2837
2838         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2839                 if (!adev->ip_blocks[i].status.sw)
2840                         continue;
2841
2842                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2843                         amdgpu_ucode_free_bo(adev);
2844                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2845                         amdgpu_device_wb_fini(adev);
2846                         amdgpu_device_vram_scratch_fini(adev);
2847                         amdgpu_ib_pool_fini(adev);
2848                 }
2849
2850                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2851                 /* XXX handle errors */
2852                 if (r) {
2853                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2854                                   adev->ip_blocks[i].version->funcs->name, r);
2855                 }
2856                 adev->ip_blocks[i].status.sw = false;
2857                 adev->ip_blocks[i].status.valid = false;
2858         }
2859
2860         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2861                 if (!adev->ip_blocks[i].status.late_initialized)
2862                         continue;
2863                 if (adev->ip_blocks[i].version->funcs->late_fini)
2864                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2865                 adev->ip_blocks[i].status.late_initialized = false;
2866         }
2867
2868         amdgpu_ras_fini(adev);
2869
2870         return 0;
2871 }
2872
2873 /**
2874  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2875  *
2876  * @work: work_struct.
2877  */
2878 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2879 {
2880         struct amdgpu_device *adev =
2881                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2882         int r;
2883
2884         r = amdgpu_ib_ring_tests(adev);
2885         if (r)
2886                 DRM_ERROR("ib ring test failed (%d).\n", r);
2887 }
2888
2889 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2890 {
2891         struct amdgpu_device *adev =
2892                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2893
2894         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2895         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2896
2897         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2898                 adev->gfx.gfx_off_state = true;
2899 }
2900
2901 /**
2902  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2903  *
2904  * @adev: amdgpu_device pointer
2905  *
2906  * Main suspend function for hardware IPs.  The list of all the hardware
2907  * IPs that make up the asic is walked, clockgating is disabled and the
2908  * suspend callbacks are run.  suspend puts the hardware and software state
2909  * in each IP into a state suitable for suspend.
2910  * Returns 0 on success, negative error code on failure.
2911  */
2912 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2913 {
2914         int i, r;
2915
2916         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2917         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2918
2919         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2920                 if (!adev->ip_blocks[i].status.valid)
2921                         continue;
2922
2923                 /* displays are handled separately */
2924                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2925                         continue;
2926
2927                 /* XXX handle errors */
2928                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2929                 /* XXX handle errors */
2930                 if (r) {
2931                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2932                                   adev->ip_blocks[i].version->funcs->name, r);
2933                         return r;
2934                 }
2935
2936                 adev->ip_blocks[i].status.hw = false;
2937         }
2938
2939         return 0;
2940 }
2941
2942 /**
2943  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2944  *
2945  * @adev: amdgpu_device pointer
2946  *
2947  * Main suspend function for hardware IPs.  The list of all the hardware
2948  * IPs that make up the asic is walked, clockgating is disabled and the
2949  * suspend callbacks are run.  suspend puts the hardware and software state
2950  * in each IP into a state suitable for suspend.
2951  * Returns 0 on success, negative error code on failure.
2952  */
2953 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2954 {
2955         int i, r;
2956
2957         if (adev->in_s0ix)
2958                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2959
2960         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2961                 if (!adev->ip_blocks[i].status.valid)
2962                         continue;
2963                 /* displays are handled in phase1 */
2964                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2965                         continue;
2966                 /* PSP lost connection when err_event_athub occurs */
2967                 if (amdgpu_ras_intr_triggered() &&
2968                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2969                         adev->ip_blocks[i].status.hw = false;
2970                         continue;
2971                 }
2972
2973                 /* skip unnecessary suspend if we do not initialize them yet */
2974                 if (adev->gmc.xgmi.pending_reset &&
2975                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2976                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2977                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2978                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2979                         adev->ip_blocks[i].status.hw = false;
2980                         continue;
2981                 }
2982
2983                 /* skip suspend of gfx and psp for S0ix
2984                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2985                  * like at runtime. PSP is also part of the always on hardware
2986                  * so no need to suspend it.
2987                  */
2988                 if (adev->in_s0ix &&
2989                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2990                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2991                         continue;
2992
2993                 /* XXX handle errors */
2994                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2995                 /* XXX handle errors */
2996                 if (r) {
2997                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2998                                   adev->ip_blocks[i].version->funcs->name, r);
2999                 }
3000                 adev->ip_blocks[i].status.hw = false;
3001                 /* handle putting the SMC in the appropriate state */
3002                 if(!amdgpu_sriov_vf(adev)){
3003                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3004                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3005                                 if (r) {
3006                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3007                                                         adev->mp1_state, r);
3008                                         return r;
3009                                 }
3010                         }
3011                 }
3012         }
3013
3014         return 0;
3015 }
3016
3017 /**
3018  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3019  *
3020  * @adev: amdgpu_device pointer
3021  *
3022  * Main suspend function for hardware IPs.  The list of all the hardware
3023  * IPs that make up the asic is walked, clockgating is disabled and the
3024  * suspend callbacks are run.  suspend puts the hardware and software state
3025  * in each IP into a state suitable for suspend.
3026  * Returns 0 on success, negative error code on failure.
3027  */
3028 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3029 {
3030         int r;
3031
3032         if (amdgpu_sriov_vf(adev)) {
3033                 amdgpu_virt_fini_data_exchange(adev);
3034                 amdgpu_virt_request_full_gpu(adev, false);
3035         }
3036
3037         r = amdgpu_device_ip_suspend_phase1(adev);
3038         if (r)
3039                 return r;
3040         r = amdgpu_device_ip_suspend_phase2(adev);
3041
3042         if (amdgpu_sriov_vf(adev))
3043                 amdgpu_virt_release_full_gpu(adev, false);
3044
3045         return r;
3046 }
3047
3048 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3049 {
3050         int i, r;
3051
3052         static enum amd_ip_block_type ip_order[] = {
3053                 AMD_IP_BLOCK_TYPE_GMC,
3054                 AMD_IP_BLOCK_TYPE_COMMON,
3055                 AMD_IP_BLOCK_TYPE_PSP,
3056                 AMD_IP_BLOCK_TYPE_IH,
3057         };
3058
3059         for (i = 0; i < adev->num_ip_blocks; i++) {
3060                 int j;
3061                 struct amdgpu_ip_block *block;
3062
3063                 block = &adev->ip_blocks[i];
3064                 block->status.hw = false;
3065
3066                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3067
3068                         if (block->version->type != ip_order[j] ||
3069                                 !block->status.valid)
3070                                 continue;
3071
3072                         r = block->version->funcs->hw_init(adev);
3073                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3074                         if (r)
3075                                 return r;
3076                         block->status.hw = true;
3077                 }
3078         }
3079
3080         return 0;
3081 }
3082
3083 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3084 {
3085         int i, r;
3086
3087         static enum amd_ip_block_type ip_order[] = {
3088                 AMD_IP_BLOCK_TYPE_SMC,
3089                 AMD_IP_BLOCK_TYPE_DCE,
3090                 AMD_IP_BLOCK_TYPE_GFX,
3091                 AMD_IP_BLOCK_TYPE_SDMA,
3092                 AMD_IP_BLOCK_TYPE_UVD,
3093                 AMD_IP_BLOCK_TYPE_VCE,
3094                 AMD_IP_BLOCK_TYPE_VCN
3095         };
3096
3097         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3098                 int j;
3099                 struct amdgpu_ip_block *block;
3100
3101                 for (j = 0; j < adev->num_ip_blocks; j++) {
3102                         block = &adev->ip_blocks[j];
3103
3104                         if (block->version->type != ip_order[i] ||
3105                                 !block->status.valid ||
3106                                 block->status.hw)
3107                                 continue;
3108
3109                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3110                                 r = block->version->funcs->resume(adev);
3111                         else
3112                                 r = block->version->funcs->hw_init(adev);
3113
3114                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3115                         if (r)
3116                                 return r;
3117                         block->status.hw = true;
3118                 }
3119         }
3120
3121         return 0;
3122 }
3123
3124 /**
3125  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3126  *
3127  * @adev: amdgpu_device pointer
3128  *
3129  * First resume function for hardware IPs.  The list of all the hardware
3130  * IPs that make up the asic is walked and the resume callbacks are run for
3131  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3132  * after a suspend and updates the software state as necessary.  This
3133  * function is also used for restoring the GPU after a GPU reset.
3134  * Returns 0 on success, negative error code on failure.
3135  */
3136 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3137 {
3138         int i, r;
3139
3140         for (i = 0; i < adev->num_ip_blocks; i++) {
3141                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3142                         continue;
3143                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3144                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3145                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3146
3147                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3148                         if (r) {
3149                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3150                                           adev->ip_blocks[i].version->funcs->name, r);
3151                                 return r;
3152                         }
3153                         adev->ip_blocks[i].status.hw = true;
3154                 }
3155         }
3156
3157         return 0;
3158 }
3159
3160 /**
3161  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3162  *
3163  * @adev: amdgpu_device pointer
3164  *
3165  * First resume function for hardware IPs.  The list of all the hardware
3166  * IPs that make up the asic is walked and the resume callbacks are run for
3167  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3168  * functional state after a suspend and updates the software state as
3169  * necessary.  This function is also used for restoring the GPU after a GPU
3170  * reset.
3171  * Returns 0 on success, negative error code on failure.
3172  */
3173 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3174 {
3175         int i, r;
3176
3177         for (i = 0; i < adev->num_ip_blocks; i++) {
3178                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3179                         continue;
3180                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3181                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3182                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3183                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3184                         continue;
3185                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3186                 if (r) {
3187                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3188                                   adev->ip_blocks[i].version->funcs->name, r);
3189                         return r;
3190                 }
3191                 adev->ip_blocks[i].status.hw = true;
3192         }
3193
3194         return 0;
3195 }
3196
3197 /**
3198  * amdgpu_device_ip_resume - run resume for hardware IPs
3199  *
3200  * @adev: amdgpu_device pointer
3201  *
3202  * Main resume function for hardware IPs.  The hardware IPs
3203  * are split into two resume functions because they are
3204  * are also used in in recovering from a GPU reset and some additional
3205  * steps need to be take between them.  In this case (S3/S4) they are
3206  * run sequentially.
3207  * Returns 0 on success, negative error code on failure.
3208  */
3209 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3210 {
3211         int r;
3212
3213         r = amdgpu_amdkfd_resume_iommu(adev);
3214         if (r)
3215                 return r;
3216
3217         r = amdgpu_device_ip_resume_phase1(adev);
3218         if (r)
3219                 return r;
3220
3221         r = amdgpu_device_fw_loading(adev);
3222         if (r)
3223                 return r;
3224
3225         r = amdgpu_device_ip_resume_phase2(adev);
3226
3227         return r;
3228 }
3229
3230 /**
3231  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3232  *
3233  * @adev: amdgpu_device pointer
3234  *
3235  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3236  */
3237 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3238 {
3239         if (amdgpu_sriov_vf(adev)) {
3240                 if (adev->is_atom_fw) {
3241                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3242                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3243                 } else {
3244                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3245                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3246                 }
3247
3248                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3249                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3250         }
3251 }
3252
3253 /**
3254  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3255  *
3256  * @asic_type: AMD asic type
3257  *
3258  * Check if there is DC (new modesetting infrastructre) support for an asic.
3259  * returns true if DC has support, false if not.
3260  */
3261 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3262 {
3263         switch (asic_type) {
3264 #ifdef CONFIG_DRM_AMDGPU_SI
3265         case CHIP_HAINAN:
3266 #endif
3267         case CHIP_TOPAZ:
3268                 /* chips with no display hardware */
3269                 return false;
3270 #if defined(CONFIG_DRM_AMD_DC)
3271         case CHIP_TAHITI:
3272         case CHIP_PITCAIRN:
3273         case CHIP_VERDE:
3274         case CHIP_OLAND:
3275                 /*
3276                  * We have systems in the wild with these ASICs that require
3277                  * LVDS and VGA support which is not supported with DC.
3278                  *
3279                  * Fallback to the non-DC driver here by default so as not to
3280                  * cause regressions.
3281                  */
3282 #if defined(CONFIG_DRM_AMD_DC_SI)
3283                 return amdgpu_dc > 0;
3284 #else
3285                 return false;
3286 #endif
3287         case CHIP_BONAIRE:
3288         case CHIP_KAVERI:
3289         case CHIP_KABINI:
3290         case CHIP_MULLINS:
3291                 /*
3292                  * We have systems in the wild with these ASICs that require
3293                  * VGA support which is not supported with DC.
3294                  *
3295                  * Fallback to the non-DC driver here by default so as not to
3296                  * cause regressions.
3297                  */
3298                 return amdgpu_dc > 0;
3299         default:
3300                 return amdgpu_dc != 0;
3301 #else
3302         default:
3303                 if (amdgpu_dc > 0)
3304                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3305                                          "but isn't supported by ASIC, ignoring\n");
3306                 return false;
3307 #endif
3308         }
3309 }
3310
3311 /**
3312  * amdgpu_device_has_dc_support - check if dc is supported
3313  *
3314  * @adev: amdgpu_device pointer
3315  *
3316  * Returns true for supported, false for not supported
3317  */
3318 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3319 {
3320         if (amdgpu_sriov_vf(adev) ||
3321             adev->enable_virtual_display ||
3322             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3323                 return false;
3324
3325         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3326 }
3327
3328 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3329 {
3330         struct amdgpu_device *adev =
3331                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3332         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3333
3334         /* It's a bug to not have a hive within this function */
3335         if (WARN_ON(!hive))
3336                 return;
3337
3338         /*
3339          * Use task barrier to synchronize all xgmi reset works across the
3340          * hive. task_barrier_enter and task_barrier_exit will block
3341          * until all the threads running the xgmi reset works reach
3342          * those points. task_barrier_full will do both blocks.
3343          */
3344         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3345
3346                 task_barrier_enter(&hive->tb);
3347                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3348
3349                 if (adev->asic_reset_res)
3350                         goto fail;
3351
3352                 task_barrier_exit(&hive->tb);
3353                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3354
3355                 if (adev->asic_reset_res)
3356                         goto fail;
3357
3358                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3359                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3360                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3361         } else {
3362
3363                 task_barrier_full(&hive->tb);
3364                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3365         }
3366
3367 fail:
3368         if (adev->asic_reset_res)
3369                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3370                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3371         amdgpu_put_xgmi_hive(hive);
3372 }
3373
3374 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3375 {
3376         char *input = amdgpu_lockup_timeout;
3377         char *timeout_setting = NULL;
3378         int index = 0;
3379         long timeout;
3380         int ret = 0;
3381
3382         /*
3383          * By default timeout for non compute jobs is 10000
3384          * and 60000 for compute jobs.
3385          * In SR-IOV or passthrough mode, timeout for compute
3386          * jobs are 60000 by default.
3387          */
3388         adev->gfx_timeout = msecs_to_jiffies(10000);
3389         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3390         if (amdgpu_sriov_vf(adev))
3391                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3392                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3393         else
3394                 adev->compute_timeout =  msecs_to_jiffies(60000);
3395
3396         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3397                 while ((timeout_setting = strsep(&input, ",")) &&
3398                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3399                         ret = kstrtol(timeout_setting, 0, &timeout);
3400                         if (ret)
3401                                 return ret;
3402
3403                         if (timeout == 0) {
3404                                 index++;
3405                                 continue;
3406                         } else if (timeout < 0) {
3407                                 timeout = MAX_SCHEDULE_TIMEOUT;
3408                                 dev_warn(adev->dev, "lockup timeout disabled");
3409                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3410                         } else {
3411                                 timeout = msecs_to_jiffies(timeout);
3412                         }
3413
3414                         switch (index++) {
3415                         case 0:
3416                                 adev->gfx_timeout = timeout;
3417                                 break;
3418                         case 1:
3419                                 adev->compute_timeout = timeout;
3420                                 break;
3421                         case 2:
3422                                 adev->sdma_timeout = timeout;
3423                                 break;
3424                         case 3:
3425                                 adev->video_timeout = timeout;
3426                                 break;
3427                         default:
3428                                 break;
3429                         }
3430                 }
3431                 /*
3432                  * There is only one value specified and
3433                  * it should apply to all non-compute jobs.
3434                  */
3435                 if (index == 1) {
3436                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3437                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3438                                 adev->compute_timeout = adev->gfx_timeout;
3439                 }
3440         }
3441
3442         return ret;
3443 }
3444
3445 /**
3446  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3447  *
3448  * @adev: amdgpu_device pointer
3449  *
3450  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3451  */
3452 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3453 {
3454         struct iommu_domain *domain;
3455
3456         domain = iommu_get_domain_for_dev(adev->dev);
3457         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3458                 adev->ram_is_direct_mapped = true;
3459 }
3460
3461 static const struct attribute *amdgpu_dev_attributes[] = {
3462         &dev_attr_product_name.attr,
3463         &dev_attr_product_number.attr,
3464         &dev_attr_serial_number.attr,
3465         &dev_attr_pcie_replay_count.attr,
3466         NULL
3467 };
3468
3469 /**
3470  * amdgpu_device_init - initialize the driver
3471  *
3472  * @adev: amdgpu_device pointer
3473  * @flags: driver flags
3474  *
3475  * Initializes the driver info and hw (all asics).
3476  * Returns 0 for success or an error on failure.
3477  * Called at driver startup.
3478  */
3479 int amdgpu_device_init(struct amdgpu_device *adev,
3480                        uint32_t flags)
3481 {
3482         struct drm_device *ddev = adev_to_drm(adev);
3483         struct pci_dev *pdev = adev->pdev;
3484         int r, i;
3485         bool px = false;
3486         u32 max_MBps;
3487
3488         adev->shutdown = false;
3489         adev->flags = flags;
3490
3491         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3492                 adev->asic_type = amdgpu_force_asic_type;
3493         else
3494                 adev->asic_type = flags & AMD_ASIC_MASK;
3495
3496         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3497         if (amdgpu_emu_mode == 1)
3498                 adev->usec_timeout *= 10;
3499         adev->gmc.gart_size = 512 * 1024 * 1024;
3500         adev->accel_working = false;
3501         adev->num_rings = 0;
3502         adev->mman.buffer_funcs = NULL;
3503         adev->mman.buffer_funcs_ring = NULL;
3504         adev->vm_manager.vm_pte_funcs = NULL;
3505         adev->vm_manager.vm_pte_num_scheds = 0;
3506         adev->gmc.gmc_funcs = NULL;
3507         adev->harvest_ip_mask = 0x0;
3508         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3509         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3510
3511         adev->smc_rreg = &amdgpu_invalid_rreg;
3512         adev->smc_wreg = &amdgpu_invalid_wreg;
3513         adev->pcie_rreg = &amdgpu_invalid_rreg;
3514         adev->pcie_wreg = &amdgpu_invalid_wreg;
3515         adev->pciep_rreg = &amdgpu_invalid_rreg;
3516         adev->pciep_wreg = &amdgpu_invalid_wreg;
3517         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3518         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3519         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3520         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3521         adev->didt_rreg = &amdgpu_invalid_rreg;
3522         adev->didt_wreg = &amdgpu_invalid_wreg;
3523         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3524         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3525         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3526         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3527
3528         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3529                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3530                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3531
3532         /* mutex initialization are all done here so we
3533          * can recall function without having locking issues */
3534         mutex_init(&adev->firmware.mutex);
3535         mutex_init(&adev->pm.mutex);
3536         mutex_init(&adev->gfx.gpu_clock_mutex);
3537         mutex_init(&adev->srbm_mutex);
3538         mutex_init(&adev->gfx.pipe_reserve_mutex);
3539         mutex_init(&adev->gfx.gfx_off_mutex);
3540         mutex_init(&adev->grbm_idx_mutex);
3541         mutex_init(&adev->mn_lock);
3542         mutex_init(&adev->virt.vf_errors.lock);
3543         hash_init(adev->mn_hash);
3544         mutex_init(&adev->psp.mutex);
3545         mutex_init(&adev->notifier_lock);
3546         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3547         mutex_init(&adev->benchmark_mutex);
3548
3549         amdgpu_device_init_apu_flags(adev);
3550
3551         r = amdgpu_device_check_arguments(adev);
3552         if (r)
3553                 return r;
3554
3555         spin_lock_init(&adev->mmio_idx_lock);
3556         spin_lock_init(&adev->smc_idx_lock);
3557         spin_lock_init(&adev->pcie_idx_lock);
3558         spin_lock_init(&adev->uvd_ctx_idx_lock);
3559         spin_lock_init(&adev->didt_idx_lock);
3560         spin_lock_init(&adev->gc_cac_idx_lock);
3561         spin_lock_init(&adev->se_cac_idx_lock);
3562         spin_lock_init(&adev->audio_endpt_idx_lock);
3563         spin_lock_init(&adev->mm_stats.lock);
3564
3565         INIT_LIST_HEAD(&adev->shadow_list);
3566         mutex_init(&adev->shadow_list_lock);
3567
3568         INIT_LIST_HEAD(&adev->reset_list);
3569
3570         INIT_LIST_HEAD(&adev->ras_list);
3571
3572         INIT_DELAYED_WORK(&adev->delayed_init_work,
3573                           amdgpu_device_delayed_init_work_handler);
3574         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3575                           amdgpu_device_delay_enable_gfx_off);
3576
3577         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3578
3579         adev->gfx.gfx_off_req_count = 1;
3580         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3581
3582         atomic_set(&adev->throttling_logging_enabled, 1);
3583         /*
3584          * If throttling continues, logging will be performed every minute
3585          * to avoid log flooding. "-1" is subtracted since the thermal
3586          * throttling interrupt comes every second. Thus, the total logging
3587          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3588          * for throttling interrupt) = 60 seconds.
3589          */
3590         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3591         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3592
3593         /* Registers mapping */
3594         /* TODO: block userspace mapping of io register */
3595         if (adev->asic_type >= CHIP_BONAIRE) {
3596                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3597                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3598         } else {
3599                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3600                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3601         }
3602
3603         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3604                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3605
3606         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3607         if (adev->rmmio == NULL) {
3608                 return -ENOMEM;
3609         }
3610         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3611         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3612
3613         amdgpu_device_get_pcie_info(adev);
3614
3615         if (amdgpu_mcbp)
3616                 DRM_INFO("MCBP is enabled\n");
3617
3618         /*
3619          * Reset domain needs to be present early, before XGMI hive discovered
3620          * (if any) and intitialized to use reset sem and in_gpu reset flag
3621          * early on during init and before calling to RREG32.
3622          */
3623         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3624         if (!adev->reset_domain)
3625                 return -ENOMEM;
3626
3627         /* detect hw virtualization here */
3628         amdgpu_detect_virtualization(adev);
3629
3630         r = amdgpu_device_get_job_timeout_settings(adev);
3631         if (r) {
3632                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3633                 return r;
3634         }
3635
3636         /* early init functions */
3637         r = amdgpu_device_ip_early_init(adev);
3638         if (r)
3639                 return r;
3640
3641         /* Enable TMZ based on IP_VERSION */
3642         amdgpu_gmc_tmz_set(adev);
3643
3644         amdgpu_gmc_noretry_set(adev);
3645         /* Need to get xgmi info early to decide the reset behavior*/
3646         if (adev->gmc.xgmi.supported) {
3647                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3648                 if (r)
3649                         return r;
3650         }
3651
3652         /* enable PCIE atomic ops */
3653         if (amdgpu_sriov_vf(adev))
3654                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3655                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3656                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3657         else
3658                 adev->have_atomics_support =
3659                         !pci_enable_atomic_ops_to_root(adev->pdev,
3660                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3661                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3662         if (!adev->have_atomics_support)
3663                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3664
3665         /* doorbell bar mapping and doorbell index init*/
3666         amdgpu_device_doorbell_init(adev);
3667
3668         if (amdgpu_emu_mode == 1) {
3669                 /* post the asic on emulation mode */
3670                 emu_soc_asic_init(adev);
3671                 goto fence_driver_init;
3672         }
3673
3674         amdgpu_reset_init(adev);
3675
3676         /* detect if we are with an SRIOV vbios */
3677         amdgpu_device_detect_sriov_bios(adev);
3678
3679         /* check if we need to reset the asic
3680          *  E.g., driver was not cleanly unloaded previously, etc.
3681          */
3682         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3683                 if (adev->gmc.xgmi.num_physical_nodes) {
3684                         dev_info(adev->dev, "Pending hive reset.\n");
3685                         adev->gmc.xgmi.pending_reset = true;
3686                         /* Only need to init necessary block for SMU to handle the reset */
3687                         for (i = 0; i < adev->num_ip_blocks; i++) {
3688                                 if (!adev->ip_blocks[i].status.valid)
3689                                         continue;
3690                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3691                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3692                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3693                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3694                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3695                                                 adev->ip_blocks[i].version->funcs->name);
3696                                         adev->ip_blocks[i].status.hw = true;
3697                                 }
3698                         }
3699                 } else {
3700                         r = amdgpu_asic_reset(adev);
3701                         if (r) {
3702                                 dev_err(adev->dev, "asic reset on init failed\n");
3703                                 goto failed;
3704                         }
3705                 }
3706         }
3707
3708         pci_enable_pcie_error_reporting(adev->pdev);
3709
3710         /* Post card if necessary */
3711         if (amdgpu_device_need_post(adev)) {
3712                 if (!adev->bios) {
3713                         dev_err(adev->dev, "no vBIOS found\n");
3714                         r = -EINVAL;
3715                         goto failed;
3716                 }
3717                 DRM_INFO("GPU posting now...\n");
3718                 r = amdgpu_device_asic_init(adev);
3719                 if (r) {
3720                         dev_err(adev->dev, "gpu post error!\n");
3721                         goto failed;
3722                 }
3723         }
3724
3725         if (adev->is_atom_fw) {
3726                 /* Initialize clocks */
3727                 r = amdgpu_atomfirmware_get_clock_info(adev);
3728                 if (r) {
3729                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3730                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3731                         goto failed;
3732                 }
3733         } else {
3734                 /* Initialize clocks */
3735                 r = amdgpu_atombios_get_clock_info(adev);
3736                 if (r) {
3737                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3738                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3739                         goto failed;
3740                 }
3741                 /* init i2c buses */
3742                 if (!amdgpu_device_has_dc_support(adev))
3743                         amdgpu_atombios_i2c_init(adev);
3744         }
3745
3746 fence_driver_init:
3747         /* Fence driver */
3748         r = amdgpu_fence_driver_sw_init(adev);
3749         if (r) {
3750                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3751                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3752                 goto failed;
3753         }
3754
3755         /* init the mode config */
3756         drm_mode_config_init(adev_to_drm(adev));
3757
3758         r = amdgpu_device_ip_init(adev);
3759         if (r) {
3760                 /* failed in exclusive mode due to timeout */
3761                 if (amdgpu_sriov_vf(adev) &&
3762                     !amdgpu_sriov_runtime(adev) &&
3763                     amdgpu_virt_mmio_blocked(adev) &&
3764                     !amdgpu_virt_wait_reset(adev)) {
3765                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3766                         /* Don't send request since VF is inactive. */
3767                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3768                         adev->virt.ops = NULL;
3769                         r = -EAGAIN;
3770                         goto release_ras_con;
3771                 }
3772                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3773                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3774                 goto release_ras_con;
3775         }
3776
3777         amdgpu_fence_driver_hw_init(adev);
3778
3779         dev_info(adev->dev,
3780                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3781                         adev->gfx.config.max_shader_engines,
3782                         adev->gfx.config.max_sh_per_se,
3783                         adev->gfx.config.max_cu_per_sh,
3784                         adev->gfx.cu_info.number);
3785
3786         adev->accel_working = true;
3787
3788         amdgpu_vm_check_compute_bug(adev);
3789
3790         /* Initialize the buffer migration limit. */
3791         if (amdgpu_moverate >= 0)
3792                 max_MBps = amdgpu_moverate;
3793         else
3794                 max_MBps = 8; /* Allow 8 MB/s. */
3795         /* Get a log2 for easy divisions. */
3796         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3797
3798         r = amdgpu_pm_sysfs_init(adev);
3799         if (r) {
3800                 adev->pm_sysfs_en = false;
3801                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3802         } else
3803                 adev->pm_sysfs_en = true;
3804
3805         r = amdgpu_ucode_sysfs_init(adev);
3806         if (r) {
3807                 adev->ucode_sysfs_en = false;
3808                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3809         } else
3810                 adev->ucode_sysfs_en = true;
3811
3812         r = amdgpu_psp_sysfs_init(adev);
3813         if (r) {
3814                 adev->psp_sysfs_en = false;
3815                 if (!amdgpu_sriov_vf(adev))
3816                         DRM_ERROR("Creating psp sysfs failed\n");
3817         } else
3818                 adev->psp_sysfs_en = true;
3819
3820         /*
3821          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3822          * Otherwise the mgpu fan boost feature will be skipped due to the
3823          * gpu instance is counted less.
3824          */
3825         amdgpu_register_gpu_instance(adev);
3826
3827         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3828          * explicit gating rather than handling it automatically.
3829          */
3830         if (!adev->gmc.xgmi.pending_reset) {
3831                 r = amdgpu_device_ip_late_init(adev);
3832                 if (r) {
3833                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3834                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3835                         goto release_ras_con;
3836                 }
3837                 /* must succeed. */
3838                 amdgpu_ras_resume(adev);
3839                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3840                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3841         }
3842
3843         if (amdgpu_sriov_vf(adev))
3844                 flush_delayed_work(&adev->delayed_init_work);
3845
3846         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3847         if (r)
3848                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3849
3850         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3851                 r = amdgpu_pmu_init(adev);
3852         if (r)
3853                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3854
3855         /* Have stored pci confspace at hand for restore in sudden PCI error */
3856         if (amdgpu_device_cache_pci_state(adev->pdev))
3857                 pci_restore_state(pdev);
3858
3859         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3860         /* this will fail for cards that aren't VGA class devices, just
3861          * ignore it */
3862         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3863                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3864
3865         if (amdgpu_device_supports_px(ddev)) {
3866                 px = true;
3867                 vga_switcheroo_register_client(adev->pdev,
3868                                                &amdgpu_switcheroo_ops, px);
3869                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3870         }
3871
3872         if (adev->gmc.xgmi.pending_reset)
3873                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3874                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3875
3876         amdgpu_device_check_iommu_direct_map(adev);
3877
3878         return 0;
3879
3880 release_ras_con:
3881         amdgpu_release_ras_context(adev);
3882
3883 failed:
3884         amdgpu_vf_error_trans_all(adev);
3885
3886         return r;
3887 }
3888
3889 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3890 {
3891
3892         /* Clear all CPU mappings pointing to this device */
3893         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3894
3895         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3896         amdgpu_device_doorbell_fini(adev);
3897
3898         iounmap(adev->rmmio);
3899         adev->rmmio = NULL;
3900         if (adev->mman.aper_base_kaddr)
3901                 iounmap(adev->mman.aper_base_kaddr);
3902         adev->mman.aper_base_kaddr = NULL;
3903
3904         /* Memory manager related */
3905         if (!adev->gmc.xgmi.connected_to_cpu) {
3906                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3907                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3908         }
3909 }
3910
3911 /**
3912  * amdgpu_device_fini_hw - tear down the driver
3913  *
3914  * @adev: amdgpu_device pointer
3915  *
3916  * Tear down the driver info (all asics).
3917  * Called at driver shutdown.
3918  */
3919 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3920 {
3921         dev_info(adev->dev, "amdgpu: finishing device.\n");
3922         flush_delayed_work(&adev->delayed_init_work);
3923         adev->shutdown = true;
3924
3925         /* make sure IB test finished before entering exclusive mode
3926          * to avoid preemption on IB test
3927          * */
3928         if (amdgpu_sriov_vf(adev)) {
3929                 amdgpu_virt_request_full_gpu(adev, false);
3930                 amdgpu_virt_fini_data_exchange(adev);
3931         }
3932
3933         /* disable all interrupts */
3934         amdgpu_irq_disable_all(adev);
3935         if (adev->mode_info.mode_config_initialized){
3936                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3937                         drm_helper_force_disable_all(adev_to_drm(adev));
3938                 else
3939                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3940         }
3941         amdgpu_fence_driver_hw_fini(adev);
3942
3943         if (adev->mman.initialized) {
3944                 flush_delayed_work(&adev->mman.bdev.wq);
3945                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3946         }
3947
3948         if (adev->pm_sysfs_en)
3949                 amdgpu_pm_sysfs_fini(adev);
3950         if (adev->ucode_sysfs_en)
3951                 amdgpu_ucode_sysfs_fini(adev);
3952         if (adev->psp_sysfs_en)
3953                 amdgpu_psp_sysfs_fini(adev);
3954         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3955
3956         /* disable ras feature must before hw fini */
3957         amdgpu_ras_pre_fini(adev);
3958
3959         amdgpu_device_ip_fini_early(adev);
3960
3961         amdgpu_irq_fini_hw(adev);
3962
3963         if (adev->mman.initialized)
3964                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3965
3966         amdgpu_gart_dummy_page_fini(adev);
3967
3968         if (drm_dev_is_unplugged(adev_to_drm(adev)))
3969                 amdgpu_device_unmap_mmio(adev);
3970
3971 }
3972
3973 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3974 {
3975         int idx;
3976
3977         amdgpu_fence_driver_sw_fini(adev);
3978         amdgpu_device_ip_fini(adev);
3979         release_firmware(adev->firmware.gpu_info_fw);
3980         adev->firmware.gpu_info_fw = NULL;
3981         adev->accel_working = false;
3982
3983         amdgpu_reset_fini(adev);
3984
3985         /* free i2c buses */
3986         if (!amdgpu_device_has_dc_support(adev))
3987                 amdgpu_i2c_fini(adev);
3988
3989         if (amdgpu_emu_mode != 1)
3990                 amdgpu_atombios_fini(adev);
3991
3992         kfree(adev->bios);
3993         adev->bios = NULL;
3994         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3995                 vga_switcheroo_unregister_client(adev->pdev);
3996                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3997         }
3998         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3999                 vga_client_unregister(adev->pdev);
4000
4001         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4002
4003                 iounmap(adev->rmmio);
4004                 adev->rmmio = NULL;
4005                 amdgpu_device_doorbell_fini(adev);
4006                 drm_dev_exit(idx);
4007         }
4008
4009         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4010                 amdgpu_pmu_fini(adev);
4011         if (adev->mman.discovery_bin)
4012                 amdgpu_discovery_fini(adev);
4013
4014         amdgpu_reset_put_reset_domain(adev->reset_domain);
4015         adev->reset_domain = NULL;
4016
4017         kfree(adev->pci_state);
4018
4019 }
4020
4021 /**
4022  * amdgpu_device_evict_resources - evict device resources
4023  * @adev: amdgpu device object
4024  *
4025  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4026  * of the vram memory type. Mainly used for evicting device resources
4027  * at suspend time.
4028  *
4029  */
4030 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4031 {
4032         /* No need to evict vram on APUs for suspend to ram or s2idle */
4033         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4034                 return;
4035
4036         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4037                 DRM_WARN("evicting device resources failed\n");
4038
4039 }
4040
4041 /*
4042  * Suspend & resume.
4043  */
4044 /**
4045  * amdgpu_device_suspend - initiate device suspend
4046  *
4047  * @dev: drm dev pointer
4048  * @fbcon : notify the fbdev of suspend
4049  *
4050  * Puts the hw in the suspend state (all asics).
4051  * Returns 0 for success or an error on failure.
4052  * Called at driver suspend.
4053  */
4054 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4055 {
4056         struct amdgpu_device *adev = drm_to_adev(dev);
4057
4058         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4059                 return 0;
4060
4061         adev->in_suspend = true;
4062
4063         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4064                 DRM_WARN("smart shift update failed\n");
4065
4066         drm_kms_helper_poll_disable(dev);
4067
4068         if (fbcon)
4069                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4070
4071         cancel_delayed_work_sync(&adev->delayed_init_work);
4072
4073         amdgpu_ras_suspend(adev);
4074
4075         amdgpu_device_ip_suspend_phase1(adev);
4076
4077         if (!adev->in_s0ix)
4078                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4079
4080         amdgpu_device_evict_resources(adev);
4081
4082         amdgpu_fence_driver_hw_fini(adev);
4083
4084         amdgpu_device_ip_suspend_phase2(adev);
4085
4086         return 0;
4087 }
4088
4089 /**
4090  * amdgpu_device_resume - initiate device resume
4091  *
4092  * @dev: drm dev pointer
4093  * @fbcon : notify the fbdev of resume
4094  *
4095  * Bring the hw back to operating state (all asics).
4096  * Returns 0 for success or an error on failure.
4097  * Called at driver resume.
4098  */
4099 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4100 {
4101         struct amdgpu_device *adev = drm_to_adev(dev);
4102         int r = 0;
4103
4104         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4105                 return 0;
4106
4107         if (adev->in_s0ix)
4108                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4109
4110         /* post card */
4111         if (amdgpu_device_need_post(adev)) {
4112                 r = amdgpu_device_asic_init(adev);
4113                 if (r)
4114                         dev_err(adev->dev, "amdgpu asic init failed\n");
4115         }
4116
4117         r = amdgpu_device_ip_resume(adev);
4118         if (r) {
4119                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4120                 return r;
4121         }
4122         amdgpu_fence_driver_hw_init(adev);
4123
4124         r = amdgpu_device_ip_late_init(adev);
4125         if (r)
4126                 return r;
4127
4128         queue_delayed_work(system_wq, &adev->delayed_init_work,
4129                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4130
4131         if (!adev->in_s0ix) {
4132                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4133                 if (r)
4134                         return r;
4135         }
4136
4137         /* Make sure IB tests flushed */
4138         flush_delayed_work(&adev->delayed_init_work);
4139
4140         if (fbcon)
4141                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4142
4143         drm_kms_helper_poll_enable(dev);
4144
4145         amdgpu_ras_resume(adev);
4146
4147         /*
4148          * Most of the connector probing functions try to acquire runtime pm
4149          * refs to ensure that the GPU is powered on when connector polling is
4150          * performed. Since we're calling this from a runtime PM callback,
4151          * trying to acquire rpm refs will cause us to deadlock.
4152          *
4153          * Since we're guaranteed to be holding the rpm lock, it's safe to
4154          * temporarily disable the rpm helpers so this doesn't deadlock us.
4155          */
4156 #ifdef CONFIG_PM
4157         dev->dev->power.disable_depth++;
4158 #endif
4159         if (!amdgpu_device_has_dc_support(adev))
4160                 drm_helper_hpd_irq_event(dev);
4161         else
4162                 drm_kms_helper_hotplug_event(dev);
4163 #ifdef CONFIG_PM
4164         dev->dev->power.disable_depth--;
4165 #endif
4166         adev->in_suspend = false;
4167
4168         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4169                 DRM_WARN("smart shift update failed\n");
4170
4171         return 0;
4172 }
4173
4174 /**
4175  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4176  *
4177  * @adev: amdgpu_device pointer
4178  *
4179  * The list of all the hardware IPs that make up the asic is walked and
4180  * the check_soft_reset callbacks are run.  check_soft_reset determines
4181  * if the asic is still hung or not.
4182  * Returns true if any of the IPs are still in a hung state, false if not.
4183  */
4184 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4185 {
4186         int i;
4187         bool asic_hang = false;
4188
4189         if (amdgpu_sriov_vf(adev))
4190                 return true;
4191
4192         if (amdgpu_asic_need_full_reset(adev))
4193                 return true;
4194
4195         for (i = 0; i < adev->num_ip_blocks; i++) {
4196                 if (!adev->ip_blocks[i].status.valid)
4197                         continue;
4198                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4199                         adev->ip_blocks[i].status.hang =
4200                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4201                 if (adev->ip_blocks[i].status.hang) {
4202                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4203                         asic_hang = true;
4204                 }
4205         }
4206         return asic_hang;
4207 }
4208
4209 /**
4210  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4211  *
4212  * @adev: amdgpu_device pointer
4213  *
4214  * The list of all the hardware IPs that make up the asic is walked and the
4215  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4216  * handles any IP specific hardware or software state changes that are
4217  * necessary for a soft reset to succeed.
4218  * Returns 0 on success, negative error code on failure.
4219  */
4220 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4221 {
4222         int i, r = 0;
4223
4224         for (i = 0; i < adev->num_ip_blocks; i++) {
4225                 if (!adev->ip_blocks[i].status.valid)
4226                         continue;
4227                 if (adev->ip_blocks[i].status.hang &&
4228                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4229                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4230                         if (r)
4231                                 return r;
4232                 }
4233         }
4234
4235         return 0;
4236 }
4237
4238 /**
4239  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4240  *
4241  * @adev: amdgpu_device pointer
4242  *
4243  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4244  * reset is necessary to recover.
4245  * Returns true if a full asic reset is required, false if not.
4246  */
4247 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4248 {
4249         int i;
4250
4251         if (amdgpu_asic_need_full_reset(adev))
4252                 return true;
4253
4254         for (i = 0; i < adev->num_ip_blocks; i++) {
4255                 if (!adev->ip_blocks[i].status.valid)
4256                         continue;
4257                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4258                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4259                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4260                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4261                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4262                         if (adev->ip_blocks[i].status.hang) {
4263                                 dev_info(adev->dev, "Some block need full reset!\n");
4264                                 return true;
4265                         }
4266                 }
4267         }
4268         return false;
4269 }
4270
4271 /**
4272  * amdgpu_device_ip_soft_reset - do a soft reset
4273  *
4274  * @adev: amdgpu_device pointer
4275  *
4276  * The list of all the hardware IPs that make up the asic is walked and the
4277  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4278  * IP specific hardware or software state changes that are necessary to soft
4279  * reset the IP.
4280  * Returns 0 on success, negative error code on failure.
4281  */
4282 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4283 {
4284         int i, r = 0;
4285
4286         for (i = 0; i < adev->num_ip_blocks; i++) {
4287                 if (!adev->ip_blocks[i].status.valid)
4288                         continue;
4289                 if (adev->ip_blocks[i].status.hang &&
4290                     adev->ip_blocks[i].version->funcs->soft_reset) {
4291                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4292                         if (r)
4293                                 return r;
4294                 }
4295         }
4296
4297         return 0;
4298 }
4299
4300 /**
4301  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4302  *
4303  * @adev: amdgpu_device pointer
4304  *
4305  * The list of all the hardware IPs that make up the asic is walked and the
4306  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4307  * handles any IP specific hardware or software state changes that are
4308  * necessary after the IP has been soft reset.
4309  * Returns 0 on success, negative error code on failure.
4310  */
4311 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4312 {
4313         int i, r = 0;
4314
4315         for (i = 0; i < adev->num_ip_blocks; i++) {
4316                 if (!adev->ip_blocks[i].status.valid)
4317                         continue;
4318                 if (adev->ip_blocks[i].status.hang &&
4319                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4320                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4321                 if (r)
4322                         return r;
4323         }
4324
4325         return 0;
4326 }
4327
4328 /**
4329  * amdgpu_device_recover_vram - Recover some VRAM contents
4330  *
4331  * @adev: amdgpu_device pointer
4332  *
4333  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4334  * restore things like GPUVM page tables after a GPU reset where
4335  * the contents of VRAM might be lost.
4336  *
4337  * Returns:
4338  * 0 on success, negative error code on failure.
4339  */
4340 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4341 {
4342         struct dma_fence *fence = NULL, *next = NULL;
4343         struct amdgpu_bo *shadow;
4344         struct amdgpu_bo_vm *vmbo;
4345         long r = 1, tmo;
4346
4347         if (amdgpu_sriov_runtime(adev))
4348                 tmo = msecs_to_jiffies(8000);
4349         else
4350                 tmo = msecs_to_jiffies(100);
4351
4352         dev_info(adev->dev, "recover vram bo from shadow start\n");
4353         mutex_lock(&adev->shadow_list_lock);
4354         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4355                 shadow = &vmbo->bo;
4356                 /* No need to recover an evicted BO */
4357                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4358                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4359                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4360                         continue;
4361
4362                 r = amdgpu_bo_restore_shadow(shadow, &next);
4363                 if (r)
4364                         break;
4365
4366                 if (fence) {
4367                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4368                         dma_fence_put(fence);
4369                         fence = next;
4370                         if (tmo == 0) {
4371                                 r = -ETIMEDOUT;
4372                                 break;
4373                         } else if (tmo < 0) {
4374                                 r = tmo;
4375                                 break;
4376                         }
4377                 } else {
4378                         fence = next;
4379                 }
4380         }
4381         mutex_unlock(&adev->shadow_list_lock);
4382
4383         if (fence)
4384                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4385         dma_fence_put(fence);
4386
4387         if (r < 0 || tmo <= 0) {
4388                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4389                 return -EIO;
4390         }
4391
4392         dev_info(adev->dev, "recover vram bo from shadow done\n");
4393         return 0;
4394 }
4395
4396
4397 /**
4398  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4399  *
4400  * @adev: amdgpu_device pointer
4401  * @from_hypervisor: request from hypervisor
4402  *
4403  * do VF FLR and reinitialize Asic
4404  * return 0 means succeeded otherwise failed
4405  */
4406 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4407                                      bool from_hypervisor)
4408 {
4409         int r;
4410         struct amdgpu_hive_info *hive = NULL;
4411         int retry_limit = 0;
4412
4413 retry:
4414         amdgpu_amdkfd_pre_reset(adev);
4415
4416         amdgpu_amdkfd_pre_reset(adev);
4417
4418         if (from_hypervisor)
4419                 r = amdgpu_virt_request_full_gpu(adev, true);
4420         else
4421                 r = amdgpu_virt_reset_gpu(adev);
4422         if (r)
4423                 return r;
4424
4425         /* Resume IP prior to SMC */
4426         r = amdgpu_device_ip_reinit_early_sriov(adev);
4427         if (r)
4428                 goto error;
4429
4430         amdgpu_virt_init_data_exchange(adev);
4431
4432         r = amdgpu_device_fw_loading(adev);
4433         if (r)
4434                 return r;
4435
4436         /* now we are okay to resume SMC/CP/SDMA */
4437         r = amdgpu_device_ip_reinit_late_sriov(adev);
4438         if (r)
4439                 goto error;
4440
4441         hive = amdgpu_get_xgmi_hive(adev);
4442         /* Update PSP FW topology after reset */
4443         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4444                 r = amdgpu_xgmi_update_topology(hive, adev);
4445
4446         if (hive)
4447                 amdgpu_put_xgmi_hive(hive);
4448
4449         if (!r) {
4450                 amdgpu_irq_gpu_reset_resume_helper(adev);
4451                 r = amdgpu_ib_ring_tests(adev);
4452
4453                 amdgpu_amdkfd_post_reset(adev);
4454         }
4455
4456 error:
4457         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4458                 amdgpu_inc_vram_lost(adev);
4459                 r = amdgpu_device_recover_vram(adev);
4460         }
4461         amdgpu_virt_release_full_gpu(adev, true);
4462
4463         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4464                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4465                         retry_limit++;
4466                         goto retry;
4467                 } else
4468                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4469         }
4470
4471         return r;
4472 }
4473
4474 /**
4475  * amdgpu_device_has_job_running - check if there is any job in mirror list
4476  *
4477  * @adev: amdgpu_device pointer
4478  *
4479  * check if there is any job in mirror list
4480  */
4481 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4482 {
4483         int i;
4484         struct drm_sched_job *job;
4485
4486         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4487                 struct amdgpu_ring *ring = adev->rings[i];
4488
4489                 if (!ring || !ring->sched.thread)
4490                         continue;
4491
4492                 spin_lock(&ring->sched.job_list_lock);
4493                 job = list_first_entry_or_null(&ring->sched.pending_list,
4494                                                struct drm_sched_job, list);
4495                 spin_unlock(&ring->sched.job_list_lock);
4496                 if (job)
4497                         return true;
4498         }
4499         return false;
4500 }
4501
4502 /**
4503  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4504  *
4505  * @adev: amdgpu_device pointer
4506  *
4507  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4508  * a hung GPU.
4509  */
4510 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4511 {
4512         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4513                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4514                 return false;
4515         }
4516
4517         if (amdgpu_gpu_recovery == 0)
4518                 goto disabled;
4519
4520         if (amdgpu_sriov_vf(adev))
4521                 return true;
4522
4523         if (amdgpu_gpu_recovery == -1) {
4524                 switch (adev->asic_type) {
4525 #ifdef CONFIG_DRM_AMDGPU_SI
4526                 case CHIP_VERDE:
4527                 case CHIP_TAHITI:
4528                 case CHIP_PITCAIRN:
4529                 case CHIP_OLAND:
4530                 case CHIP_HAINAN:
4531 #endif
4532 #ifdef CONFIG_DRM_AMDGPU_CIK
4533                 case CHIP_KAVERI:
4534                 case CHIP_KABINI:
4535                 case CHIP_MULLINS:
4536 #endif
4537                 case CHIP_CARRIZO:
4538                 case CHIP_STONEY:
4539                 case CHIP_CYAN_SKILLFISH:
4540                         goto disabled;
4541                 default:
4542                         break;
4543                 }
4544         }
4545
4546         return true;
4547
4548 disabled:
4549                 dev_info(adev->dev, "GPU recovery disabled.\n");
4550                 return false;
4551 }
4552
4553 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4554 {
4555         u32 i;
4556         int ret = 0;
4557
4558         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4559
4560         dev_info(adev->dev, "GPU mode1 reset\n");
4561
4562         /* disable BM */
4563         pci_clear_master(adev->pdev);
4564
4565         amdgpu_device_cache_pci_state(adev->pdev);
4566
4567         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4568                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4569                 ret = amdgpu_dpm_mode1_reset(adev);
4570         } else {
4571                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4572                 ret = psp_gpu_reset(adev);
4573         }
4574
4575         if (ret)
4576                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4577
4578         amdgpu_device_load_pci_state(adev->pdev);
4579
4580         /* wait for asic to come out of reset */
4581         for (i = 0; i < adev->usec_timeout; i++) {
4582                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4583
4584                 if (memsize != 0xffffffff)
4585                         break;
4586                 udelay(1);
4587         }
4588
4589         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4590         return ret;
4591 }
4592
4593 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4594                                  struct amdgpu_reset_context *reset_context)
4595 {
4596         int i, r = 0;
4597         struct amdgpu_job *job = NULL;
4598         bool need_full_reset =
4599                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4600
4601         if (reset_context->reset_req_dev == adev)
4602                 job = reset_context->job;
4603
4604         if (amdgpu_sriov_vf(adev)) {
4605                 /* stop the data exchange thread */
4606                 amdgpu_virt_fini_data_exchange(adev);
4607         }
4608
4609         amdgpu_fence_driver_isr_toggle(adev, true);
4610
4611         /* block all schedulers and reset given job's ring */
4612         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4613                 struct amdgpu_ring *ring = adev->rings[i];
4614
4615                 if (!ring || !ring->sched.thread)
4616                         continue;
4617
4618                 /*clear job fence from fence drv to avoid force_completion
4619                  *leave NULL and vm flush fence in fence drv */
4620                 amdgpu_fence_driver_clear_job_fences(ring);
4621
4622                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4623                 amdgpu_fence_driver_force_completion(ring);
4624         }
4625
4626         amdgpu_fence_driver_isr_toggle(adev, false);
4627
4628         if (job && job->vm)
4629                 drm_sched_increase_karma(&job->base);
4630
4631         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4632         /* If reset handler not implemented, continue; otherwise return */
4633         if (r == -ENOSYS)
4634                 r = 0;
4635         else
4636                 return r;
4637
4638         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4639         if (!amdgpu_sriov_vf(adev)) {
4640
4641                 if (!need_full_reset)
4642                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4643
4644                 if (!need_full_reset) {
4645                         amdgpu_device_ip_pre_soft_reset(adev);
4646                         r = amdgpu_device_ip_soft_reset(adev);
4647                         amdgpu_device_ip_post_soft_reset(adev);
4648                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4649                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4650                                 need_full_reset = true;
4651                         }
4652                 }
4653
4654                 if (need_full_reset)
4655                         r = amdgpu_device_ip_suspend(adev);
4656                 if (need_full_reset)
4657                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4658                 else
4659                         clear_bit(AMDGPU_NEED_FULL_RESET,
4660                                   &reset_context->flags);
4661         }
4662
4663         return r;
4664 }
4665
4666 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4667 {
4668         int i;
4669
4670         lockdep_assert_held(&adev->reset_domain->sem);
4671
4672         for (i = 0; i < adev->num_regs; i++) {
4673                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4674                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4675                                              adev->reset_dump_reg_value[i]);
4676         }
4677
4678         return 0;
4679 }
4680
4681 #ifdef CONFIG_DEV_COREDUMP
4682 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4683                 size_t count, void *data, size_t datalen)
4684 {
4685         struct drm_printer p;
4686         struct amdgpu_device *adev = data;
4687         struct drm_print_iterator iter;
4688         int i;
4689
4690         iter.data = buffer;
4691         iter.offset = 0;
4692         iter.start = offset;
4693         iter.remain = count;
4694
4695         p = drm_coredump_printer(&iter);
4696
4697         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4698         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4699         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4700         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4701         if (adev->reset_task_info.pid)
4702                 drm_printf(&p, "process_name: %s PID: %d\n",
4703                            adev->reset_task_info.process_name,
4704                            adev->reset_task_info.pid);
4705
4706         if (adev->reset_vram_lost)
4707                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4708         if (adev->num_regs) {
4709                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4710
4711                 for (i = 0; i < adev->num_regs; i++)
4712                         drm_printf(&p, "0x%08x: 0x%08x\n",
4713                                    adev->reset_dump_reg_list[i],
4714                                    adev->reset_dump_reg_value[i]);
4715         }
4716
4717         return count - iter.remain;
4718 }
4719
4720 static void amdgpu_devcoredump_free(void *data)
4721 {
4722 }
4723
4724 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4725 {
4726         struct drm_device *dev = adev_to_drm(adev);
4727
4728         ktime_get_ts64(&adev->reset_time);
4729         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4730                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4731 }
4732 #endif
4733
4734 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4735                          struct amdgpu_reset_context *reset_context)
4736 {
4737         struct amdgpu_device *tmp_adev = NULL;
4738         bool need_full_reset, skip_hw_reset, vram_lost = false;
4739         int r = 0;
4740
4741         /* Try reset handler method first */
4742         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4743                                     reset_list);
4744         amdgpu_reset_reg_dumps(tmp_adev);
4745
4746         reset_context->reset_device_list = device_list_handle;
4747         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4748         /* If reset handler not implemented, continue; otherwise return */
4749         if (r == -ENOSYS)
4750                 r = 0;
4751         else
4752                 return r;
4753
4754         /* Reset handler not implemented, use the default method */
4755         need_full_reset =
4756                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4757         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4758
4759         /*
4760          * ASIC reset has to be done on all XGMI hive nodes ASAP
4761          * to allow proper links negotiation in FW (within 1 sec)
4762          */
4763         if (!skip_hw_reset && need_full_reset) {
4764                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4765                         /* For XGMI run all resets in parallel to speed up the process */
4766                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4767                                 tmp_adev->gmc.xgmi.pending_reset = false;
4768                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4769                                         r = -EALREADY;
4770                         } else
4771                                 r = amdgpu_asic_reset(tmp_adev);
4772
4773                         if (r) {
4774                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4775                                          r, adev_to_drm(tmp_adev)->unique);
4776                                 break;
4777                         }
4778                 }
4779
4780                 /* For XGMI wait for all resets to complete before proceed */
4781                 if (!r) {
4782                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4783                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4784                                         flush_work(&tmp_adev->xgmi_reset_work);
4785                                         r = tmp_adev->asic_reset_res;
4786                                         if (r)
4787                                                 break;
4788                                 }
4789                         }
4790                 }
4791         }
4792
4793         if (!r && amdgpu_ras_intr_triggered()) {
4794                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4795                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4796                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4797                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4798                 }
4799
4800                 amdgpu_ras_intr_cleared();
4801         }
4802
4803         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4804                 if (need_full_reset) {
4805                         /* post card */
4806                         r = amdgpu_device_asic_init(tmp_adev);
4807                         if (r) {
4808                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4809                         } else {
4810                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4811                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4812                                 if (r)
4813                                         goto out;
4814
4815                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4816                                 if (r)
4817                                         goto out;
4818
4819                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4820 #ifdef CONFIG_DEV_COREDUMP
4821                                 tmp_adev->reset_vram_lost = vram_lost;
4822                                 memset(&tmp_adev->reset_task_info, 0,
4823                                                 sizeof(tmp_adev->reset_task_info));
4824                                 if (reset_context->job && reset_context->job->vm)
4825                                         tmp_adev->reset_task_info =
4826                                                 reset_context->job->vm->task_info;
4827                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4828 #endif
4829                                 if (vram_lost) {
4830                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4831                                         amdgpu_inc_vram_lost(tmp_adev);
4832                                 }
4833
4834                                 r = amdgpu_device_fw_loading(tmp_adev);
4835                                 if (r)
4836                                         return r;
4837
4838                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4839                                 if (r)
4840                                         goto out;
4841
4842                                 if (vram_lost)
4843                                         amdgpu_device_fill_reset_magic(tmp_adev);
4844
4845                                 /*
4846                                  * Add this ASIC as tracked as reset was already
4847                                  * complete successfully.
4848                                  */
4849                                 amdgpu_register_gpu_instance(tmp_adev);
4850
4851                                 if (!reset_context->hive &&
4852                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4853                                         amdgpu_xgmi_add_device(tmp_adev);
4854
4855                                 r = amdgpu_device_ip_late_init(tmp_adev);
4856                                 if (r)
4857                                         goto out;
4858
4859                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4860
4861                                 /*
4862                                  * The GPU enters bad state once faulty pages
4863                                  * by ECC has reached the threshold, and ras
4864                                  * recovery is scheduled next. So add one check
4865                                  * here to break recovery if it indeed exceeds
4866                                  * bad page threshold, and remind user to
4867                                  * retire this GPU or setting one bigger
4868                                  * bad_page_threshold value to fix this once
4869                                  * probing driver again.
4870                                  */
4871                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4872                                         /* must succeed. */
4873                                         amdgpu_ras_resume(tmp_adev);
4874                                 } else {
4875                                         r = -EINVAL;
4876                                         goto out;
4877                                 }
4878
4879                                 /* Update PSP FW topology after reset */
4880                                 if (reset_context->hive &&
4881                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4882                                         r = amdgpu_xgmi_update_topology(
4883                                                 reset_context->hive, tmp_adev);
4884                         }
4885                 }
4886
4887 out:
4888                 if (!r) {
4889                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4890                         r = amdgpu_ib_ring_tests(tmp_adev);
4891                         if (r) {
4892                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4893                                 need_full_reset = true;
4894                                 r = -EAGAIN;
4895                                 goto end;
4896                         }
4897                 }
4898
4899                 if (!r)
4900                         r = amdgpu_device_recover_vram(tmp_adev);
4901                 else
4902                         tmp_adev->asic_reset_res = r;
4903         }
4904
4905 end:
4906         if (need_full_reset)
4907                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4908         else
4909                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4910         return r;
4911 }
4912
4913 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4914 {
4915
4916         switch (amdgpu_asic_reset_method(adev)) {
4917         case AMD_RESET_METHOD_MODE1:
4918                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4919                 break;
4920         case AMD_RESET_METHOD_MODE2:
4921                 adev->mp1_state = PP_MP1_STATE_RESET;
4922                 break;
4923         default:
4924                 adev->mp1_state = PP_MP1_STATE_NONE;
4925                 break;
4926         }
4927 }
4928
4929 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4930 {
4931         amdgpu_vf_error_trans_all(adev);
4932         adev->mp1_state = PP_MP1_STATE_NONE;
4933 }
4934
4935 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4936 {
4937         struct pci_dev *p = NULL;
4938
4939         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4940                         adev->pdev->bus->number, 1);
4941         if (p) {
4942                 pm_runtime_enable(&(p->dev));
4943                 pm_runtime_resume(&(p->dev));
4944         }
4945 }
4946
4947 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4948 {
4949         enum amd_reset_method reset_method;
4950         struct pci_dev *p = NULL;
4951         u64 expires;
4952
4953         /*
4954          * For now, only BACO and mode1 reset are confirmed
4955          * to suffer the audio issue without proper suspended.
4956          */
4957         reset_method = amdgpu_asic_reset_method(adev);
4958         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4959              (reset_method != AMD_RESET_METHOD_MODE1))
4960                 return -EINVAL;
4961
4962         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4963                         adev->pdev->bus->number, 1);
4964         if (!p)
4965                 return -ENODEV;
4966
4967         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4968         if (!expires)
4969                 /*
4970                  * If we cannot get the audio device autosuspend delay,
4971                  * a fixed 4S interval will be used. Considering 3S is
4972                  * the audio controller default autosuspend delay setting.
4973                  * 4S used here is guaranteed to cover that.
4974                  */
4975                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4976
4977         while (!pm_runtime_status_suspended(&(p->dev))) {
4978                 if (!pm_runtime_suspend(&(p->dev)))
4979                         break;
4980
4981                 if (expires < ktime_get_mono_fast_ns()) {
4982                         dev_warn(adev->dev, "failed to suspend display audio\n");
4983                         /* TODO: abort the succeeding gpu reset? */
4984                         return -ETIMEDOUT;
4985                 }
4986         }
4987
4988         pm_runtime_disable(&(p->dev));
4989
4990         return 0;
4991 }
4992
4993 static void amdgpu_device_recheck_guilty_jobs(
4994         struct amdgpu_device *adev, struct list_head *device_list_handle,
4995         struct amdgpu_reset_context *reset_context)
4996 {
4997         int i, r = 0;
4998
4999         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5000                 struct amdgpu_ring *ring = adev->rings[i];
5001                 int ret = 0;
5002                 struct drm_sched_job *s_job;
5003
5004                 if (!ring || !ring->sched.thread)
5005                         continue;
5006
5007                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5008                                 struct drm_sched_job, list);
5009                 if (s_job == NULL)
5010                         continue;
5011
5012                 /* clear job's guilty and depend the folowing step to decide the real one */
5013                 drm_sched_reset_karma(s_job);
5014                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5015
5016                 if (!s_job->s_fence->parent) {
5017                         DRM_WARN("Failed to get a HW fence for job!");
5018                         continue;
5019                 }
5020
5021                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5022                 if (ret == 0) { /* timeout */
5023                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5024                                                 ring->sched.name, s_job->id);
5025
5026
5027                         amdgpu_fence_driver_isr_toggle(adev, true);
5028
5029                         /* Clear this failed job from fence array */
5030                         amdgpu_fence_driver_clear_job_fences(ring);
5031
5032                         amdgpu_fence_driver_isr_toggle(adev, false);
5033
5034                         /* Since the job won't signal and we go for
5035                          * another resubmit drop this parent pointer
5036                          */
5037                         dma_fence_put(s_job->s_fence->parent);
5038                         s_job->s_fence->parent = NULL;
5039
5040                         /* set guilty */
5041                         drm_sched_increase_karma(s_job);
5042 retry:
5043                         /* do hw reset */
5044                         if (amdgpu_sriov_vf(adev)) {
5045                                 amdgpu_virt_fini_data_exchange(adev);
5046                                 r = amdgpu_device_reset_sriov(adev, false);
5047                                 if (r)
5048                                         adev->asic_reset_res = r;
5049                         } else {
5050                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5051                                           &reset_context->flags);
5052                                 r = amdgpu_do_asic_reset(device_list_handle,
5053                                                          reset_context);
5054                                 if (r && r == -EAGAIN)
5055                                         goto retry;
5056                         }
5057
5058                         /*
5059                          * add reset counter so that the following
5060                          * resubmitted job could flush vmid
5061                          */
5062                         atomic_inc(&adev->gpu_reset_counter);
5063                         continue;
5064                 }
5065
5066                 /* got the hw fence, signal finished fence */
5067                 atomic_dec(ring->sched.score);
5068                 dma_fence_get(&s_job->s_fence->finished);
5069                 dma_fence_signal(&s_job->s_fence->finished);
5070                 dma_fence_put(&s_job->s_fence->finished);
5071
5072                 /* remove node from list and free the job */
5073                 spin_lock(&ring->sched.job_list_lock);
5074                 list_del_init(&s_job->list);
5075                 spin_unlock(&ring->sched.job_list_lock);
5076                 ring->sched.ops->free_job(s_job);
5077         }
5078 }
5079
5080 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5081 {
5082         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5083
5084 #if defined(CONFIG_DEBUG_FS)
5085         if (!amdgpu_sriov_vf(adev))
5086                 cancel_work(&adev->reset_work);
5087 #endif
5088
5089         if (adev->kfd.dev)
5090                 cancel_work(&adev->kfd.reset_work);
5091
5092         if (amdgpu_sriov_vf(adev))
5093                 cancel_work(&adev->virt.flr_work);
5094
5095         if (con && adev->ras_enabled)
5096                 cancel_work(&con->recovery_work);
5097
5098 }
5099
5100
5101 /**
5102  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5103  *
5104  * @adev: amdgpu_device pointer
5105  * @job: which job trigger hang
5106  *
5107  * Attempt to reset the GPU if it has hung (all asics).
5108  * Attempt to do soft-reset or full-reset and reinitialize Asic
5109  * Returns 0 for success or an error on failure.
5110  */
5111
5112 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5113                               struct amdgpu_job *job,
5114                               struct amdgpu_reset_context *reset_context)
5115 {
5116         struct list_head device_list, *device_list_handle =  NULL;
5117         bool job_signaled = false;
5118         struct amdgpu_hive_info *hive = NULL;
5119         struct amdgpu_device *tmp_adev = NULL;
5120         int i, r = 0;
5121         bool need_emergency_restart = false;
5122         bool audio_suspended = false;
5123         int tmp_vram_lost_counter;
5124
5125         /*
5126          * Special case: RAS triggered and full reset isn't supported
5127          */
5128         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5129
5130         /*
5131          * Flush RAM to disk so that after reboot
5132          * the user can read log and see why the system rebooted.
5133          */
5134         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5135                 DRM_WARN("Emergency reboot.");
5136
5137                 ksys_sync_helper();
5138                 emergency_restart();
5139         }
5140
5141         dev_info(adev->dev, "GPU %s begin!\n",
5142                 need_emergency_restart ? "jobs stop":"reset");
5143
5144         if (!amdgpu_sriov_vf(adev))
5145                 hive = amdgpu_get_xgmi_hive(adev);
5146         if (hive)
5147                 mutex_lock(&hive->hive_lock);
5148
5149         reset_context->job = job;
5150         reset_context->hive = hive;
5151         /*
5152          * Build list of devices to reset.
5153          * In case we are in XGMI hive mode, resort the device list
5154          * to put adev in the 1st position.
5155          */
5156         INIT_LIST_HEAD(&device_list);
5157         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5158                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5159                         list_add_tail(&tmp_adev->reset_list, &device_list);
5160                 if (!list_is_first(&adev->reset_list, &device_list))
5161                         list_rotate_to_front(&adev->reset_list, &device_list);
5162                 device_list_handle = &device_list;
5163         } else {
5164                 list_add_tail(&adev->reset_list, &device_list);
5165                 device_list_handle = &device_list;
5166         }
5167
5168         /* We need to lock reset domain only once both for XGMI and single device */
5169         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5170                                     reset_list);
5171         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5172
5173         /* block all schedulers and reset given job's ring */
5174         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5175
5176                 amdgpu_device_set_mp1_state(tmp_adev);
5177
5178                 /*
5179                  * Try to put the audio codec into suspend state
5180                  * before gpu reset started.
5181                  *
5182                  * Due to the power domain of the graphics device
5183                  * is shared with AZ power domain. Without this,
5184                  * we may change the audio hardware from behind
5185                  * the audio driver's back. That will trigger
5186                  * some audio codec errors.
5187                  */
5188                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5189                         audio_suspended = true;
5190
5191                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5192
5193                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5194
5195                 if (!amdgpu_sriov_vf(tmp_adev))
5196                         amdgpu_amdkfd_pre_reset(tmp_adev);
5197
5198                 /*
5199                  * Mark these ASICs to be reseted as untracked first
5200                  * And add them back after reset completed
5201                  */
5202                 amdgpu_unregister_gpu_instance(tmp_adev);
5203
5204                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5205
5206                 /* disable ras on ALL IPs */
5207                 if (!need_emergency_restart &&
5208                       amdgpu_device_ip_need_full_reset(tmp_adev))
5209                         amdgpu_ras_suspend(tmp_adev);
5210
5211                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5212                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5213
5214                         if (!ring || !ring->sched.thread)
5215                                 continue;
5216
5217                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5218
5219                         if (need_emergency_restart)
5220                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5221                 }
5222                 atomic_inc(&tmp_adev->gpu_reset_counter);
5223         }
5224
5225         if (need_emergency_restart)
5226                 goto skip_sched_resume;
5227
5228         /*
5229          * Must check guilty signal here since after this point all old
5230          * HW fences are force signaled.
5231          *
5232          * job->base holds a reference to parent fence
5233          */
5234         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5235                 job_signaled = true;
5236                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5237                 goto skip_hw_reset;
5238         }
5239
5240 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5241         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5242                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5243                 /*TODO Should we stop ?*/
5244                 if (r) {
5245                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5246                                   r, adev_to_drm(tmp_adev)->unique);
5247                         tmp_adev->asic_reset_res = r;
5248                 }
5249
5250                 /*
5251                  * Drop all pending non scheduler resets. Scheduler resets
5252                  * were already dropped during drm_sched_stop
5253                  */
5254                 amdgpu_device_stop_pending_resets(tmp_adev);
5255         }
5256
5257         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5258         /* Actual ASIC resets if needed.*/
5259         /* Host driver will handle XGMI hive reset for SRIOV */
5260         if (amdgpu_sriov_vf(adev)) {
5261                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5262                 if (r)
5263                         adev->asic_reset_res = r;
5264
5265                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5266                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5267                         amdgpu_ras_resume(adev);
5268         } else {
5269                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5270                 if (r && r == -EAGAIN)
5271                         goto retry;
5272         }
5273
5274 skip_hw_reset:
5275
5276         /* Post ASIC reset for all devs .*/
5277         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5278
5279                 /*
5280                  * Sometimes a later bad compute job can block a good gfx job as gfx
5281                  * and compute ring share internal GC HW mutually. We add an additional
5282                  * guilty jobs recheck step to find the real guilty job, it synchronously
5283                  * submits and pends for the first job being signaled. If it gets timeout,
5284                  * we identify it as a real guilty job.
5285                  */
5286                 if (amdgpu_gpu_recovery == 2 &&
5287                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5288                         amdgpu_device_recheck_guilty_jobs(
5289                                 tmp_adev, device_list_handle, reset_context);
5290
5291                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5292                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5293
5294                         if (!ring || !ring->sched.thread)
5295                                 continue;
5296
5297                         /* No point to resubmit jobs if we didn't HW reset*/
5298                         if (!tmp_adev->asic_reset_res && !job_signaled)
5299                                 drm_sched_resubmit_jobs(&ring->sched);
5300
5301                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5302                 }
5303
5304                 if (adev->enable_mes)
5305                         amdgpu_mes_self_test(tmp_adev);
5306
5307                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5308                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5309                 }
5310
5311                 if (tmp_adev->asic_reset_res)
5312                         r = tmp_adev->asic_reset_res;
5313
5314                 tmp_adev->asic_reset_res = 0;
5315
5316                 if (r) {
5317                         /* bad news, how to tell it to userspace ? */
5318                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5319                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5320                 } else {
5321                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5322                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5323                                 DRM_WARN("smart shift update failed\n");
5324                 }
5325         }
5326
5327 skip_sched_resume:
5328         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5329                 /* unlock kfd: SRIOV would do it separately */
5330                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5331                         amdgpu_amdkfd_post_reset(tmp_adev);
5332
5333                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5334                  * need to bring up kfd here if it's not be initialized before
5335                  */
5336                 if (!adev->kfd.init_complete)
5337                         amdgpu_amdkfd_device_init(adev);
5338
5339                 if (audio_suspended)
5340                         amdgpu_device_resume_display_audio(tmp_adev);
5341
5342                 amdgpu_device_unset_mp1_state(tmp_adev);
5343         }
5344
5345         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5346                                             reset_list);
5347         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5348
5349         if (hive) {
5350                 mutex_unlock(&hive->hive_lock);
5351                 amdgpu_put_xgmi_hive(hive);
5352         }
5353
5354         if (r)
5355                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5356
5357         atomic_set(&adev->reset_domain->reset_res, r);
5358         return r;
5359 }
5360
5361 /**
5362  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5363  *
5364  * @adev: amdgpu_device pointer
5365  *
5366  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5367  * and lanes) of the slot the device is in. Handles APUs and
5368  * virtualized environments where PCIE config space may not be available.
5369  */
5370 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5371 {
5372         struct pci_dev *pdev;
5373         enum pci_bus_speed speed_cap, platform_speed_cap;
5374         enum pcie_link_width platform_link_width;
5375
5376         if (amdgpu_pcie_gen_cap)
5377                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5378
5379         if (amdgpu_pcie_lane_cap)
5380                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5381
5382         /* covers APUs as well */
5383         if (pci_is_root_bus(adev->pdev->bus)) {
5384                 if (adev->pm.pcie_gen_mask == 0)
5385                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5386                 if (adev->pm.pcie_mlw_mask == 0)
5387                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5388                 return;
5389         }
5390
5391         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5392                 return;
5393
5394         pcie_bandwidth_available(adev->pdev, NULL,
5395                                  &platform_speed_cap, &platform_link_width);
5396
5397         if (adev->pm.pcie_gen_mask == 0) {
5398                 /* asic caps */
5399                 pdev = adev->pdev;
5400                 speed_cap = pcie_get_speed_cap(pdev);
5401                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5402                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5403                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5404                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5405                 } else {
5406                         if (speed_cap == PCIE_SPEED_32_0GT)
5407                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5408                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5409                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5410                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5411                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5412                         else if (speed_cap == PCIE_SPEED_16_0GT)
5413                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5414                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5415                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5416                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5417                         else if (speed_cap == PCIE_SPEED_8_0GT)
5418                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5419                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5420                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5421                         else if (speed_cap == PCIE_SPEED_5_0GT)
5422                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5423                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5424                         else
5425                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5426                 }
5427                 /* platform caps */
5428                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5429                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5430                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5431                 } else {
5432                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5433                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5434                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5435                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5436                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5437                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5438                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5439                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5440                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5441                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5442                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5443                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5444                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5445                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5446                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5447                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5448                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5449                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5450                         else
5451                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5452
5453                 }
5454         }
5455         if (adev->pm.pcie_mlw_mask == 0) {
5456                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5457                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5458                 } else {
5459                         switch (platform_link_width) {
5460                         case PCIE_LNK_X32:
5461                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5462                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5463                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5464                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5465                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5466                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5467                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5468                                 break;
5469                         case PCIE_LNK_X16:
5470                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5471                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5472                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5473                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5474                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5475                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5476                                 break;
5477                         case PCIE_LNK_X12:
5478                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5479                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5480                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5481                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5482                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5483                                 break;
5484                         case PCIE_LNK_X8:
5485                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5486                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5487                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5488                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5489                                 break;
5490                         case PCIE_LNK_X4:
5491                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5492                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5493                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5494                                 break;
5495                         case PCIE_LNK_X2:
5496                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5497                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5498                                 break;
5499                         case PCIE_LNK_X1:
5500                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5501                                 break;
5502                         default:
5503                                 break;
5504                         }
5505                 }
5506         }
5507 }
5508
5509 /**
5510  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5511  *
5512  * @adev: amdgpu_device pointer
5513  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5514  *
5515  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5516  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5517  * @peer_adev.
5518  */
5519 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5520                                       struct amdgpu_device *peer_adev)
5521 {
5522 #ifdef CONFIG_HSA_AMD_P2P
5523         uint64_t address_mask = peer_adev->dev->dma_mask ?
5524                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5525         resource_size_t aper_limit =
5526                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5527         bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
5528                                         &peer_adev->dev, 1, true) < 0);
5529
5530         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5531                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5532                 !(adev->gmc.aper_base & address_mask ||
5533                   aper_limit & address_mask));
5534 #else
5535         return false;
5536 #endif
5537 }
5538
5539 int amdgpu_device_baco_enter(struct drm_device *dev)
5540 {
5541         struct amdgpu_device *adev = drm_to_adev(dev);
5542         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5543
5544         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5545                 return -ENOTSUPP;
5546
5547         if (ras && adev->ras_enabled &&
5548             adev->nbio.funcs->enable_doorbell_interrupt)
5549                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5550
5551         return amdgpu_dpm_baco_enter(adev);
5552 }
5553
5554 int amdgpu_device_baco_exit(struct drm_device *dev)
5555 {
5556         struct amdgpu_device *adev = drm_to_adev(dev);
5557         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5558         int ret = 0;
5559
5560         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5561                 return -ENOTSUPP;
5562
5563         ret = amdgpu_dpm_baco_exit(adev);
5564         if (ret)
5565                 return ret;
5566
5567         if (ras && adev->ras_enabled &&
5568             adev->nbio.funcs->enable_doorbell_interrupt)
5569                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5570
5571         if (amdgpu_passthrough(adev) &&
5572             adev->nbio.funcs->clear_doorbell_interrupt)
5573                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5574
5575         return 0;
5576 }
5577
5578 /**
5579  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5580  * @pdev: PCI device struct
5581  * @state: PCI channel state
5582  *
5583  * Description: Called when a PCI error is detected.
5584  *
5585  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5586  */
5587 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5588 {
5589         struct drm_device *dev = pci_get_drvdata(pdev);
5590         struct amdgpu_device *adev = drm_to_adev(dev);
5591         int i;
5592
5593         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5594
5595         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5596                 DRM_WARN("No support for XGMI hive yet...");
5597                 return PCI_ERS_RESULT_DISCONNECT;
5598         }
5599
5600         adev->pci_channel_state = state;
5601
5602         switch (state) {
5603         case pci_channel_io_normal:
5604                 return PCI_ERS_RESULT_CAN_RECOVER;
5605         /* Fatal error, prepare for slot reset */
5606         case pci_channel_io_frozen:
5607                 /*
5608                  * Locking adev->reset_domain->sem will prevent any external access
5609                  * to GPU during PCI error recovery
5610                  */
5611                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5612                 amdgpu_device_set_mp1_state(adev);
5613
5614                 /*
5615                  * Block any work scheduling as we do for regular GPU reset
5616                  * for the duration of the recovery
5617                  */
5618                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5619                         struct amdgpu_ring *ring = adev->rings[i];
5620
5621                         if (!ring || !ring->sched.thread)
5622                                 continue;
5623
5624                         drm_sched_stop(&ring->sched, NULL);
5625                 }
5626                 atomic_inc(&adev->gpu_reset_counter);
5627                 return PCI_ERS_RESULT_NEED_RESET;
5628         case pci_channel_io_perm_failure:
5629                 /* Permanent error, prepare for device removal */
5630                 return PCI_ERS_RESULT_DISCONNECT;
5631         }
5632
5633         return PCI_ERS_RESULT_NEED_RESET;
5634 }
5635
5636 /**
5637  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5638  * @pdev: pointer to PCI device
5639  */
5640 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5641 {
5642
5643         DRM_INFO("PCI error: mmio enabled callback!!\n");
5644
5645         /* TODO - dump whatever for debugging purposes */
5646
5647         /* This called only if amdgpu_pci_error_detected returns
5648          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5649          * works, no need to reset slot.
5650          */
5651
5652         return PCI_ERS_RESULT_RECOVERED;
5653 }
5654
5655 /**
5656  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5657  * @pdev: PCI device struct
5658  *
5659  * Description: This routine is called by the pci error recovery
5660  * code after the PCI slot has been reset, just before we
5661  * should resume normal operations.
5662  */
5663 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5664 {
5665         struct drm_device *dev = pci_get_drvdata(pdev);
5666         struct amdgpu_device *adev = drm_to_adev(dev);
5667         int r, i;
5668         struct amdgpu_reset_context reset_context;
5669         u32 memsize;
5670         struct list_head device_list;
5671
5672         DRM_INFO("PCI error: slot reset callback!!\n");
5673
5674         memset(&reset_context, 0, sizeof(reset_context));
5675
5676         INIT_LIST_HEAD(&device_list);
5677         list_add_tail(&adev->reset_list, &device_list);
5678
5679         /* wait for asic to come out of reset */
5680         msleep(500);
5681
5682         /* Restore PCI confspace */
5683         amdgpu_device_load_pci_state(pdev);
5684
5685         /* confirm  ASIC came out of reset */
5686         for (i = 0; i < adev->usec_timeout; i++) {
5687                 memsize = amdgpu_asic_get_config_memsize(adev);
5688
5689                 if (memsize != 0xffffffff)
5690                         break;
5691                 udelay(1);
5692         }
5693         if (memsize == 0xffffffff) {
5694                 r = -ETIME;
5695                 goto out;
5696         }
5697
5698         reset_context.method = AMD_RESET_METHOD_NONE;
5699         reset_context.reset_req_dev = adev;
5700         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5701         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5702
5703         adev->no_hw_access = true;
5704         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5705         adev->no_hw_access = false;
5706         if (r)
5707                 goto out;
5708
5709         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5710
5711 out:
5712         if (!r) {
5713                 if (amdgpu_device_cache_pci_state(adev->pdev))
5714                         pci_restore_state(adev->pdev);
5715
5716                 DRM_INFO("PCIe error recovery succeeded\n");
5717         } else {
5718                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5719                 amdgpu_device_unset_mp1_state(adev);
5720                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5721         }
5722
5723         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5724 }
5725
5726 /**
5727  * amdgpu_pci_resume() - resume normal ops after PCI reset
5728  * @pdev: pointer to PCI device
5729  *
5730  * Called when the error recovery driver tells us that its
5731  * OK to resume normal operation.
5732  */
5733 void amdgpu_pci_resume(struct pci_dev *pdev)
5734 {
5735         struct drm_device *dev = pci_get_drvdata(pdev);
5736         struct amdgpu_device *adev = drm_to_adev(dev);
5737         int i;
5738
5739
5740         DRM_INFO("PCI error: resume callback!!\n");
5741
5742         /* Only continue execution for the case of pci_channel_io_frozen */
5743         if (adev->pci_channel_state != pci_channel_io_frozen)
5744                 return;
5745
5746         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5747                 struct amdgpu_ring *ring = adev->rings[i];
5748
5749                 if (!ring || !ring->sched.thread)
5750                         continue;
5751
5752
5753                 drm_sched_resubmit_jobs(&ring->sched);
5754                 drm_sched_start(&ring->sched, true);
5755         }
5756
5757         amdgpu_device_unset_mp1_state(adev);
5758         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5759 }
5760
5761 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5762 {
5763         struct drm_device *dev = pci_get_drvdata(pdev);
5764         struct amdgpu_device *adev = drm_to_adev(dev);
5765         int r;
5766
5767         r = pci_save_state(pdev);
5768         if (!r) {
5769                 kfree(adev->pci_state);
5770
5771                 adev->pci_state = pci_store_saved_state(pdev);
5772
5773                 if (!adev->pci_state) {
5774                         DRM_ERROR("Failed to store PCI saved state");
5775                         return false;
5776                 }
5777         } else {
5778                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5779                 return false;
5780         }
5781
5782         return true;
5783 }
5784
5785 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5786 {
5787         struct drm_device *dev = pci_get_drvdata(pdev);
5788         struct amdgpu_device *adev = drm_to_adev(dev);
5789         int r;
5790
5791         if (!adev->pci_state)
5792                 return false;
5793
5794         r = pci_load_saved_state(pdev, adev->pci_state);
5795
5796         if (!r) {
5797                 pci_restore_state(pdev);
5798         } else {
5799                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5800                 return false;
5801         }
5802
5803         return true;
5804 }
5805
5806 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5807                 struct amdgpu_ring *ring)
5808 {
5809 #ifdef CONFIG_X86_64
5810         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5811                 return;
5812 #endif
5813         if (adev->gmc.xgmi.connected_to_cpu)
5814                 return;
5815
5816         if (ring && ring->funcs->emit_hdp_flush)
5817                 amdgpu_ring_emit_hdp_flush(ring);
5818         else
5819                 amdgpu_asic_flush_hdp(adev, ring);
5820 }
5821
5822 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5823                 struct amdgpu_ring *ring)
5824 {
5825 #ifdef CONFIG_X86_64
5826         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5827                 return;
5828 #endif
5829         if (adev->gmc.xgmi.connected_to_cpu)
5830                 return;
5831
5832         amdgpu_asic_invalidate_hdp(adev, ring);
5833 }
5834
5835 int amdgpu_in_reset(struct amdgpu_device *adev)
5836 {
5837         return atomic_read(&adev->reset_domain->in_gpu_reset);
5838         }
5839         
5840 /**
5841  * amdgpu_device_halt() - bring hardware to some kind of halt state
5842  *
5843  * @adev: amdgpu_device pointer
5844  *
5845  * Bring hardware to some kind of halt state so that no one can touch it
5846  * any more. It will help to maintain error context when error occurred.
5847  * Compare to a simple hang, the system will keep stable at least for SSH
5848  * access. Then it should be trivial to inspect the hardware state and
5849  * see what's going on. Implemented as following:
5850  *
5851  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5852  *    clears all CPU mappings to device, disallows remappings through page faults
5853  * 2. amdgpu_irq_disable_all() disables all interrupts
5854  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5855  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5856  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5857  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5858  *    flush any in flight DMA operations
5859  */
5860 void amdgpu_device_halt(struct amdgpu_device *adev)
5861 {
5862         struct pci_dev *pdev = adev->pdev;
5863         struct drm_device *ddev = adev_to_drm(adev);
5864
5865         drm_dev_unplug(ddev);
5866
5867         amdgpu_irq_disable_all(adev);
5868
5869         amdgpu_fence_driver_hw_fini(adev);
5870
5871         adev->no_hw_access = true;
5872
5873         amdgpu_device_unmap_mmio(adev);
5874
5875         pci_disable_device(pdev);
5876         pci_wait_for_pending_transaction(pdev);
5877 }
5878
5879 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5880                                 u32 reg)
5881 {
5882         unsigned long flags, address, data;
5883         u32 r;
5884
5885         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5886         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5887
5888         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5889         WREG32(address, reg * 4);
5890         (void)RREG32(address);
5891         r = RREG32(data);
5892         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5893         return r;
5894 }
5895
5896 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5897                                 u32 reg, u32 v)
5898 {
5899         unsigned long flags, address, data;
5900
5901         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5902         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5903
5904         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5905         WREG32(address, reg * 4);
5906         (void)RREG32(address);
5907         WREG32(data, v);
5908         (void)RREG32(data);
5909         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5910 }