5b9f992e4607457fedc518b25f0fb3e97d32c902
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369                         /* need to do common hw init early so everything is set up for gmc */
2370                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371                         if (r) {
2372                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373                                 goto init_failed;
2374                         }
2375                         adev->ip_blocks[i].status.hw = true;
2376                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377                         /* need to do gmc hw init early so we can allocate gpu mem */
2378                         /* Try to reserve bad pages early */
2379                         if (amdgpu_sriov_vf(adev))
2380                                 amdgpu_virt_exchange_data(adev);
2381
2382                         r = amdgpu_device_vram_scratch_init(adev);
2383                         if (r) {
2384                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385                                 goto init_failed;
2386                         }
2387                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388                         if (r) {
2389                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390                                 goto init_failed;
2391                         }
2392                         r = amdgpu_device_wb_init(adev);
2393                         if (r) {
2394                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395                                 goto init_failed;
2396                         }
2397                         adev->ip_blocks[i].status.hw = true;
2398
2399                         /* right after GMC hw init, we create CSA */
2400                         if (amdgpu_mcbp) {
2401                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2403                                                                 AMDGPU_CSA_SIZE);
2404                                 if (r) {
2405                                         DRM_ERROR("allocate CSA failed %d\n", r);
2406                                         goto init_failed;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412         if (amdgpu_sriov_vf(adev))
2413                 amdgpu_virt_init_data_exchange(adev);
2414
2415         r = amdgpu_ib_pool_init(adev);
2416         if (r) {
2417                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419                 goto init_failed;
2420         }
2421
2422         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase1(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         r = amdgpu_device_fw_loading(adev);
2431         if (r)
2432                 goto init_failed;
2433
2434         r = amdgpu_device_ip_hw_init_phase2(adev);
2435         if (r)
2436                 goto init_failed;
2437
2438         /*
2439          * retired pages will be loaded from eeprom and reserved here,
2440          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442          * for I2C communication which only true at this point.
2443          *
2444          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445          * failure from bad gpu situation and stop amdgpu init process
2446          * accordingly. For other failed cases, it will still release all
2447          * the resource and print error message, rather than returning one
2448          * negative value to upper level.
2449          *
2450          * Note: theoretically, this should be called before all vram allocations
2451          * to protect retired page from abusing
2452          */
2453         r = amdgpu_ras_recovery_init(adev);
2454         if (r)
2455                 goto init_failed;
2456
2457         /**
2458          * In case of XGMI grab extra reference for reset domain for this device
2459          */
2460         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461                 if (amdgpu_xgmi_add_device(adev) == 0) {
2462                         if (!amdgpu_sriov_vf(adev)) {
2463                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464
2465                                 if (!hive->reset_domain ||
2466                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467                                         r = -ENOENT;
2468                                         amdgpu_put_xgmi_hive(hive);
2469                                         goto init_failed;
2470                                 }
2471
2472                                 /* Drop the early temporary reset domain we created for device */
2473                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474                                 adev->reset_domain = hive->reset_domain;
2475                                 amdgpu_put_xgmi_hive(hive);
2476                         }
2477                 }
2478         }
2479
2480         r = amdgpu_device_init_schedulers(adev);
2481         if (r)
2482                 goto init_failed;
2483
2484         /* Don't init kfd if whole hive need to be reset during init */
2485         if (!adev->gmc.xgmi.pending_reset)
2486                 amdgpu_amdkfd_device_init(adev);
2487
2488         amdgpu_fru_get_product_info(adev);
2489
2490 init_failed:
2491         if (amdgpu_sriov_vf(adev))
2492                 amdgpu_virt_release_full_gpu(adev, true);
2493
2494         return r;
2495 }
2496
2497 /**
2498  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499  *
2500  * @adev: amdgpu_device pointer
2501  *
2502  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2503  * this function before a GPU reset.  If the value is retained after a
2504  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2505  */
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510
2511 /**
2512  * amdgpu_device_check_vram_lost - check if vram is valid
2513  *
2514  * @adev: amdgpu_device pointer
2515  *
2516  * Checks the reset magic value written to the gart pointer in VRAM.
2517  * The driver calls this after a GPU reset to see if the contents of
2518  * VRAM is lost or now.
2519  * returns true if vram is lost, false if not.
2520  */
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523         if (memcmp(adev->gart.ptr, adev->reset_magic,
2524                         AMDGPU_RESET_MAGIC_NUM))
2525                 return true;
2526
2527         if (!amdgpu_in_reset(adev))
2528                 return false;
2529
2530         /*
2531          * For all ASICs with baco/mode1 reset, the VRAM is
2532          * always assumed to be lost.
2533          */
2534         switch (amdgpu_asic_reset_method(adev)) {
2535         case AMD_RESET_METHOD_BACO:
2536         case AMD_RESET_METHOD_MODE1:
2537                 return true;
2538         default:
2539                 return false;
2540         }
2541 }
2542
2543 /**
2544  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545  *
2546  * @adev: amdgpu_device pointer
2547  * @state: clockgating state (gate or ungate)
2548  *
2549  * The list of all the hardware IPs that make up the asic is walked and the
2550  * set_clockgating_state callbacks are run.
2551  * Late initialization pass enabling clockgating for hardware IPs.
2552  * Fini or suspend, pass disabling clockgating for hardware IPs.
2553  * Returns 0 on success, negative error code on failure.
2554  */
2555
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557                                enum amd_clockgating_state state)
2558 {
2559         int i, j, r;
2560
2561         if (amdgpu_emu_mode == 1)
2562                 return 0;
2563
2564         for (j = 0; j < adev->num_ip_blocks; j++) {
2565                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566                 if (!adev->ip_blocks[i].status.late_initialized)
2567                         continue;
2568                 /* skip CG for GFX on S0ix */
2569                 if (adev->in_s0ix &&
2570                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571                         continue;
2572                 /* skip CG for VCE/UVD, it's handled specially */
2573                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578                         /* enable clockgating to save power */
2579                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580                                                                                      state);
2581                         if (r) {
2582                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583                                           adev->ip_blocks[i].version->funcs->name, r);
2584                                 return r;
2585                         }
2586                 }
2587         }
2588
2589         return 0;
2590 }
2591
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593                                enum amd_powergating_state state)
2594 {
2595         int i, j, r;
2596
2597         if (amdgpu_emu_mode == 1)
2598                 return 0;
2599
2600         for (j = 0; j < adev->num_ip_blocks; j++) {
2601                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602                 if (!adev->ip_blocks[i].status.late_initialized)
2603                         continue;
2604                 /* skip PG for GFX on S0ix */
2605                 if (adev->in_s0ix &&
2606                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607                         continue;
2608                 /* skip CG for VCE/UVD, it's handled specially */
2609                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614                         /* enable powergating to save power */
2615                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616                                                                                         state);
2617                         if (r) {
2618                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619                                           adev->ip_blocks[i].version->funcs->name, r);
2620                                 return r;
2621                         }
2622                 }
2623         }
2624         return 0;
2625 }
2626
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629         struct amdgpu_gpu_instance *gpu_ins;
2630         struct amdgpu_device *adev;
2631         int i, ret = 0;
2632
2633         mutex_lock(&mgpu_info.mutex);
2634
2635         /*
2636          * MGPU fan boost feature should be enabled
2637          * only when there are two or more dGPUs in
2638          * the system
2639          */
2640         if (mgpu_info.num_dgpu < 2)
2641                 goto out;
2642
2643         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645                 adev = gpu_ins->adev;
2646                 if (!(adev->flags & AMD_IS_APU) &&
2647                     !gpu_ins->mgpu_fan_enabled) {
2648                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649                         if (ret)
2650                                 break;
2651
2652                         gpu_ins->mgpu_fan_enabled = 1;
2653                 }
2654         }
2655
2656 out:
2657         mutex_unlock(&mgpu_info.mutex);
2658
2659         return ret;
2660 }
2661
2662 /**
2663  * amdgpu_device_ip_late_init - run late init for hardware IPs
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Late initialization pass for hardware IPs.  The list of all the hardware
2668  * IPs that make up the asic is walked and the late_init callbacks are run.
2669  * late_init covers any special initialization that an IP requires
2670  * after all of the have been initialized or something that needs to happen
2671  * late in the init process.
2672  * Returns 0 on success, negative error code on failure.
2673  */
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676         struct amdgpu_gpu_instance *gpu_instance;
2677         int i = 0, r;
2678
2679         for (i = 0; i < adev->num_ip_blocks; i++) {
2680                 if (!adev->ip_blocks[i].status.hw)
2681                         continue;
2682                 if (adev->ip_blocks[i].version->funcs->late_init) {
2683                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684                         if (r) {
2685                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686                                           adev->ip_blocks[i].version->funcs->name, r);
2687                                 return r;
2688                         }
2689                 }
2690                 adev->ip_blocks[i].status.late_initialized = true;
2691         }
2692
2693         r = amdgpu_ras_late_init(adev);
2694         if (r) {
2695                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696                 return r;
2697         }
2698
2699         amdgpu_ras_set_error_query_ready(adev, true);
2700
2701         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703
2704         amdgpu_device_fill_reset_magic(adev);
2705
2706         r = amdgpu_device_enable_mgpu_fan_boost();
2707         if (r)
2708                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709
2710         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712                                adev->asic_type == CHIP_ALDEBARAN ))
2713                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714
2715         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716                 mutex_lock(&mgpu_info.mutex);
2717
2718                 /*
2719                  * Reset device p-state to low as this was booted with high.
2720                  *
2721                  * This should be performed only after all devices from the same
2722                  * hive get initialized.
2723                  *
2724                  * However, it's unknown how many device in the hive in advance.
2725                  * As this is counted one by one during devices initializations.
2726                  *
2727                  * So, we wait for all XGMI interlinked devices initialized.
2728                  * This may bring some delays as those devices may come from
2729                  * different hives. But that should be OK.
2730                  */
2731                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2733                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2735                                         continue;
2736
2737                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738                                                 AMDGPU_XGMI_PSTATE_MIN);
2739                                 if (r) {
2740                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2741                                         break;
2742                                 }
2743                         }
2744                 }
2745
2746                 mutex_unlock(&mgpu_info.mutex);
2747         }
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * For ASICs need to disable SMC first
2758  */
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761         int i, r;
2762
2763         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764                 return;
2765
2766         for (i = 0; i < adev->num_ip_blocks; i++) {
2767                 if (!adev->ip_blocks[i].status.hw)
2768                         continue;
2769                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771                         /* XXX handle errors */
2772                         if (r) {
2773                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774                                           adev->ip_blocks[i].version->funcs->name, r);
2775                         }
2776                         adev->ip_blocks[i].status.hw = false;
2777                         break;
2778                 }
2779         }
2780 }
2781
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784         int i, r;
2785
2786         for (i = 0; i < adev->num_ip_blocks; i++) {
2787                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2788                         continue;
2789
2790                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791                 if (r) {
2792                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793                                   adev->ip_blocks[i].version->funcs->name, r);
2794                 }
2795         }
2796
2797         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799
2800         amdgpu_amdkfd_suspend(adev, false);
2801
2802         /* Workaroud for ASICs need to disable SMC first */
2803         amdgpu_device_smu_fini_early(adev);
2804
2805         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806                 if (!adev->ip_blocks[i].status.hw)
2807                         continue;
2808
2809                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810                 /* XXX handle errors */
2811                 if (r) {
2812                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813                                   adev->ip_blocks[i].version->funcs->name, r);
2814                 }
2815
2816                 adev->ip_blocks[i].status.hw = false;
2817         }
2818
2819         if (amdgpu_sriov_vf(adev)) {
2820                 if (amdgpu_virt_release_full_gpu(adev, false))
2821                         DRM_ERROR("failed to release exclusive mode on fini\n");
2822         }
2823
2824         return 0;
2825 }
2826
2827 /**
2828  * amdgpu_device_ip_fini - run fini for hardware IPs
2829  *
2830  * @adev: amdgpu_device pointer
2831  *
2832  * Main teardown pass for hardware IPs.  The list of all the hardware
2833  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834  * are run.  hw_fini tears down the hardware associated with each IP
2835  * and sw_fini tears down any software state associated with each IP.
2836  * Returns 0 on success, negative error code on failure.
2837  */
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840         int i, r;
2841
2842         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843                 amdgpu_virt_release_ras_err_handler_data(adev);
2844
2845         if (adev->gmc.xgmi.num_physical_nodes > 1)
2846                 amdgpu_xgmi_remove_device(adev);
2847
2848         amdgpu_amdkfd_device_fini_sw(adev);
2849
2850         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851                 if (!adev->ip_blocks[i].status.sw)
2852                         continue;
2853
2854                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855                         amdgpu_ucode_free_bo(adev);
2856                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2857                         amdgpu_device_wb_fini(adev);
2858                         amdgpu_device_vram_scratch_fini(adev);
2859                         amdgpu_ib_pool_fini(adev);
2860                 }
2861
2862                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863                 /* XXX handle errors */
2864                 if (r) {
2865                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866                                   adev->ip_blocks[i].version->funcs->name, r);
2867                 }
2868                 adev->ip_blocks[i].status.sw = false;
2869                 adev->ip_blocks[i].status.valid = false;
2870         }
2871
2872         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873                 if (!adev->ip_blocks[i].status.late_initialized)
2874                         continue;
2875                 if (adev->ip_blocks[i].version->funcs->late_fini)
2876                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877                 adev->ip_blocks[i].status.late_initialized = false;
2878         }
2879
2880         amdgpu_ras_fini(adev);
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887  *
2888  * @work: work_struct.
2889  */
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892         struct amdgpu_device *adev =
2893                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2894         int r;
2895
2896         r = amdgpu_ib_ring_tests(adev);
2897         if (r)
2898                 DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903         struct amdgpu_device *adev =
2904                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905
2906         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908
2909         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910                 adev->gfx.gfx_off_state = true;
2911 }
2912
2913 /**
2914  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915  *
2916  * @adev: amdgpu_device pointer
2917  *
2918  * Main suspend function for hardware IPs.  The list of all the hardware
2919  * IPs that make up the asic is walked, clockgating is disabled and the
2920  * suspend callbacks are run.  suspend puts the hardware and software state
2921  * in each IP into a state suitable for suspend.
2922  * Returns 0 on success, negative error code on failure.
2923  */
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926         int i, r;
2927
2928         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930
2931         /*
2932          * Per PMFW team's suggestion, driver needs to handle gfxoff
2933          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2934          * scenario. Add the missing df cstate disablement here.
2935          */
2936         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2937                 dev_warn(adev->dev, "Failed to disallow df cstate");
2938
2939         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2940                 if (!adev->ip_blocks[i].status.valid)
2941                         continue;
2942
2943                 /* displays are handled separately */
2944                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2945                         continue;
2946
2947                 /* XXX handle errors */
2948                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2949                 /* XXX handle errors */
2950                 if (r) {
2951                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2952                                   adev->ip_blocks[i].version->funcs->name, r);
2953                         return r;
2954                 }
2955
2956                 adev->ip_blocks[i].status.hw = false;
2957         }
2958
2959         return 0;
2960 }
2961
2962 /**
2963  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2964  *
2965  * @adev: amdgpu_device pointer
2966  *
2967  * Main suspend function for hardware IPs.  The list of all the hardware
2968  * IPs that make up the asic is walked, clockgating is disabled and the
2969  * suspend callbacks are run.  suspend puts the hardware and software state
2970  * in each IP into a state suitable for suspend.
2971  * Returns 0 on success, negative error code on failure.
2972  */
2973 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2974 {
2975         int i, r;
2976
2977         if (adev->in_s0ix)
2978                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2979
2980         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2981                 if (!adev->ip_blocks[i].status.valid)
2982                         continue;
2983                 /* displays are handled in phase1 */
2984                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2985                         continue;
2986                 /* PSP lost connection when err_event_athub occurs */
2987                 if (amdgpu_ras_intr_triggered() &&
2988                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2989                         adev->ip_blocks[i].status.hw = false;
2990                         continue;
2991                 }
2992
2993                 /* skip unnecessary suspend if we do not initialize them yet */
2994                 if (adev->gmc.xgmi.pending_reset &&
2995                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2996                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2997                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2998                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2999                         adev->ip_blocks[i].status.hw = false;
3000                         continue;
3001                 }
3002
3003                 /* skip suspend of gfx and psp for S0ix
3004                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3005                  * like at runtime. PSP is also part of the always on hardware
3006                  * so no need to suspend it.
3007                  */
3008                 if (adev->in_s0ix &&
3009                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3010                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3011                         continue;
3012
3013                 /* XXX handle errors */
3014                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3015                 /* XXX handle errors */
3016                 if (r) {
3017                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3018                                   adev->ip_blocks[i].version->funcs->name, r);
3019                 }
3020                 adev->ip_blocks[i].status.hw = false;
3021                 /* handle putting the SMC in the appropriate state */
3022                 if(!amdgpu_sriov_vf(adev)){
3023                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3024                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3025                                 if (r) {
3026                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3027                                                         adev->mp1_state, r);
3028                                         return r;
3029                                 }
3030                         }
3031                 }
3032         }
3033
3034         return 0;
3035 }
3036
3037 /**
3038  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3039  *
3040  * @adev: amdgpu_device pointer
3041  *
3042  * Main suspend function for hardware IPs.  The list of all the hardware
3043  * IPs that make up the asic is walked, clockgating is disabled and the
3044  * suspend callbacks are run.  suspend puts the hardware and software state
3045  * in each IP into a state suitable for suspend.
3046  * Returns 0 on success, negative error code on failure.
3047  */
3048 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3049 {
3050         int r;
3051
3052         if (amdgpu_sriov_vf(adev)) {
3053                 amdgpu_virt_fini_data_exchange(adev);
3054                 amdgpu_virt_request_full_gpu(adev, false);
3055         }
3056
3057         r = amdgpu_device_ip_suspend_phase1(adev);
3058         if (r)
3059                 return r;
3060         r = amdgpu_device_ip_suspend_phase2(adev);
3061
3062         if (amdgpu_sriov_vf(adev))
3063                 amdgpu_virt_release_full_gpu(adev, false);
3064
3065         return r;
3066 }
3067
3068 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3069 {
3070         int i, r;
3071
3072         static enum amd_ip_block_type ip_order[] = {
3073                 AMD_IP_BLOCK_TYPE_COMMON,
3074                 AMD_IP_BLOCK_TYPE_GMC,
3075                 AMD_IP_BLOCK_TYPE_PSP,
3076                 AMD_IP_BLOCK_TYPE_IH,
3077         };
3078
3079         for (i = 0; i < adev->num_ip_blocks; i++) {
3080                 int j;
3081                 struct amdgpu_ip_block *block;
3082
3083                 block = &adev->ip_blocks[i];
3084                 block->status.hw = false;
3085
3086                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3087
3088                         if (block->version->type != ip_order[j] ||
3089                                 !block->status.valid)
3090                                 continue;
3091
3092                         r = block->version->funcs->hw_init(adev);
3093                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3094                         if (r)
3095                                 return r;
3096                         block->status.hw = true;
3097                 }
3098         }
3099
3100         return 0;
3101 }
3102
3103 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3104 {
3105         int i, r;
3106
3107         static enum amd_ip_block_type ip_order[] = {
3108                 AMD_IP_BLOCK_TYPE_SMC,
3109                 AMD_IP_BLOCK_TYPE_DCE,
3110                 AMD_IP_BLOCK_TYPE_GFX,
3111                 AMD_IP_BLOCK_TYPE_SDMA,
3112                 AMD_IP_BLOCK_TYPE_UVD,
3113                 AMD_IP_BLOCK_TYPE_VCE,
3114                 AMD_IP_BLOCK_TYPE_VCN
3115         };
3116
3117         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3118                 int j;
3119                 struct amdgpu_ip_block *block;
3120
3121                 for (j = 0; j < adev->num_ip_blocks; j++) {
3122                         block = &adev->ip_blocks[j];
3123
3124                         if (block->version->type != ip_order[i] ||
3125                                 !block->status.valid ||
3126                                 block->status.hw)
3127                                 continue;
3128
3129                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3130                                 r = block->version->funcs->resume(adev);
3131                         else
3132                                 r = block->version->funcs->hw_init(adev);
3133
3134                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3135                         if (r)
3136                                 return r;
3137                         block->status.hw = true;
3138                 }
3139         }
3140
3141         return 0;
3142 }
3143
3144 /**
3145  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3146  *
3147  * @adev: amdgpu_device pointer
3148  *
3149  * First resume function for hardware IPs.  The list of all the hardware
3150  * IPs that make up the asic is walked and the resume callbacks are run for
3151  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3152  * after a suspend and updates the software state as necessary.  This
3153  * function is also used for restoring the GPU after a GPU reset.
3154  * Returns 0 on success, negative error code on failure.
3155  */
3156 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3157 {
3158         int i, r;
3159
3160         for (i = 0; i < adev->num_ip_blocks; i++) {
3161                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3162                         continue;
3163                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3164                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3165                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3166                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3167
3168                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3169                         if (r) {
3170                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3171                                           adev->ip_blocks[i].version->funcs->name, r);
3172                                 return r;
3173                         }
3174                         adev->ip_blocks[i].status.hw = true;
3175                 }
3176         }
3177
3178         return 0;
3179 }
3180
3181 /**
3182  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3183  *
3184  * @adev: amdgpu_device pointer
3185  *
3186  * First resume function for hardware IPs.  The list of all the hardware
3187  * IPs that make up the asic is walked and the resume callbacks are run for
3188  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3189  * functional state after a suspend and updates the software state as
3190  * necessary.  This function is also used for restoring the GPU after a GPU
3191  * reset.
3192  * Returns 0 on success, negative error code on failure.
3193  */
3194 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3195 {
3196         int i, r;
3197
3198         for (i = 0; i < adev->num_ip_blocks; i++) {
3199                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3200                         continue;
3201                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3202                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3203                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3204                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3205                         continue;
3206                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3207                 if (r) {
3208                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3209                                   adev->ip_blocks[i].version->funcs->name, r);
3210                         return r;
3211                 }
3212                 adev->ip_blocks[i].status.hw = true;
3213
3214                 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3215                         /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3216                          * amdgpu_device_resume() after IP resume.
3217                          */
3218                         amdgpu_gfx_off_ctrl(adev, false);
3219                         DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3220                 }
3221
3222         }
3223
3224         return 0;
3225 }
3226
3227 /**
3228  * amdgpu_device_ip_resume - run resume for hardware IPs
3229  *
3230  * @adev: amdgpu_device pointer
3231  *
3232  * Main resume function for hardware IPs.  The hardware IPs
3233  * are split into two resume functions because they are
3234  * are also used in in recovering from a GPU reset and some additional
3235  * steps need to be take between them.  In this case (S3/S4) they are
3236  * run sequentially.
3237  * Returns 0 on success, negative error code on failure.
3238  */
3239 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3240 {
3241         int r;
3242
3243         r = amdgpu_amdkfd_resume_iommu(adev);
3244         if (r)
3245                 return r;
3246
3247         r = amdgpu_device_ip_resume_phase1(adev);
3248         if (r)
3249                 return r;
3250
3251         r = amdgpu_device_fw_loading(adev);
3252         if (r)
3253                 return r;
3254
3255         r = amdgpu_device_ip_resume_phase2(adev);
3256
3257         return r;
3258 }
3259
3260 /**
3261  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3262  *
3263  * @adev: amdgpu_device pointer
3264  *
3265  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3266  */
3267 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3268 {
3269         if (amdgpu_sriov_vf(adev)) {
3270                 if (adev->is_atom_fw) {
3271                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3272                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3273                 } else {
3274                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3275                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3276                 }
3277
3278                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3279                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3280         }
3281 }
3282
3283 /**
3284  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3285  *
3286  * @asic_type: AMD asic type
3287  *
3288  * Check if there is DC (new modesetting infrastructre) support for an asic.
3289  * returns true if DC has support, false if not.
3290  */
3291 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3292 {
3293         switch (asic_type) {
3294 #ifdef CONFIG_DRM_AMDGPU_SI
3295         case CHIP_HAINAN:
3296 #endif
3297         case CHIP_TOPAZ:
3298                 /* chips with no display hardware */
3299                 return false;
3300 #if defined(CONFIG_DRM_AMD_DC)
3301         case CHIP_TAHITI:
3302         case CHIP_PITCAIRN:
3303         case CHIP_VERDE:
3304         case CHIP_OLAND:
3305                 /*
3306                  * We have systems in the wild with these ASICs that require
3307                  * LVDS and VGA support which is not supported with DC.
3308                  *
3309                  * Fallback to the non-DC driver here by default so as not to
3310                  * cause regressions.
3311                  */
3312 #if defined(CONFIG_DRM_AMD_DC_SI)
3313                 return amdgpu_dc > 0;
3314 #else
3315                 return false;
3316 #endif
3317         case CHIP_BONAIRE:
3318         case CHIP_KAVERI:
3319         case CHIP_KABINI:
3320         case CHIP_MULLINS:
3321                 /*
3322                  * We have systems in the wild with these ASICs that require
3323                  * VGA support which is not supported with DC.
3324                  *
3325                  * Fallback to the non-DC driver here by default so as not to
3326                  * cause regressions.
3327                  */
3328                 return amdgpu_dc > 0;
3329         default:
3330                 return amdgpu_dc != 0;
3331 #else
3332         default:
3333                 if (amdgpu_dc > 0)
3334                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3335                                          "but isn't supported by ASIC, ignoring\n");
3336                 return false;
3337 #endif
3338         }
3339 }
3340
3341 /**
3342  * amdgpu_device_has_dc_support - check if dc is supported
3343  *
3344  * @adev: amdgpu_device pointer
3345  *
3346  * Returns true for supported, false for not supported
3347  */
3348 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3349 {
3350         if (amdgpu_sriov_vf(adev) ||
3351             adev->enable_virtual_display ||
3352             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3353                 return false;
3354
3355         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3356 }
3357
3358 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3359 {
3360         struct amdgpu_device *adev =
3361                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3362         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3363
3364         /* It's a bug to not have a hive within this function */
3365         if (WARN_ON(!hive))
3366                 return;
3367
3368         /*
3369          * Use task barrier to synchronize all xgmi reset works across the
3370          * hive. task_barrier_enter and task_barrier_exit will block
3371          * until all the threads running the xgmi reset works reach
3372          * those points. task_barrier_full will do both blocks.
3373          */
3374         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3375
3376                 task_barrier_enter(&hive->tb);
3377                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3378
3379                 if (adev->asic_reset_res)
3380                         goto fail;
3381
3382                 task_barrier_exit(&hive->tb);
3383                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3384
3385                 if (adev->asic_reset_res)
3386                         goto fail;
3387
3388                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3389                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3390                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3391         } else {
3392
3393                 task_barrier_full(&hive->tb);
3394                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3395         }
3396
3397 fail:
3398         if (adev->asic_reset_res)
3399                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3400                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3401         amdgpu_put_xgmi_hive(hive);
3402 }
3403
3404 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3405 {
3406         char *input = amdgpu_lockup_timeout;
3407         char *timeout_setting = NULL;
3408         int index = 0;
3409         long timeout;
3410         int ret = 0;
3411
3412         /*
3413          * By default timeout for non compute jobs is 10000
3414          * and 60000 for compute jobs.
3415          * In SR-IOV or passthrough mode, timeout for compute
3416          * jobs are 60000 by default.
3417          */
3418         adev->gfx_timeout = msecs_to_jiffies(10000);
3419         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3420         if (amdgpu_sriov_vf(adev))
3421                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3422                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3423         else
3424                 adev->compute_timeout =  msecs_to_jiffies(60000);
3425
3426         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3427                 while ((timeout_setting = strsep(&input, ",")) &&
3428                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3429                         ret = kstrtol(timeout_setting, 0, &timeout);
3430                         if (ret)
3431                                 return ret;
3432
3433                         if (timeout == 0) {
3434                                 index++;
3435                                 continue;
3436                         } else if (timeout < 0) {
3437                                 timeout = MAX_SCHEDULE_TIMEOUT;
3438                                 dev_warn(adev->dev, "lockup timeout disabled");
3439                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3440                         } else {
3441                                 timeout = msecs_to_jiffies(timeout);
3442                         }
3443
3444                         switch (index++) {
3445                         case 0:
3446                                 adev->gfx_timeout = timeout;
3447                                 break;
3448                         case 1:
3449                                 adev->compute_timeout = timeout;
3450                                 break;
3451                         case 2:
3452                                 adev->sdma_timeout = timeout;
3453                                 break;
3454                         case 3:
3455                                 adev->video_timeout = timeout;
3456                                 break;
3457                         default:
3458                                 break;
3459                         }
3460                 }
3461                 /*
3462                  * There is only one value specified and
3463                  * it should apply to all non-compute jobs.
3464                  */
3465                 if (index == 1) {
3466                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3467                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3468                                 adev->compute_timeout = adev->gfx_timeout;
3469                 }
3470         }
3471
3472         return ret;
3473 }
3474
3475 /**
3476  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3477  *
3478  * @adev: amdgpu_device pointer
3479  *
3480  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3481  */
3482 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3483 {
3484         struct iommu_domain *domain;
3485
3486         domain = iommu_get_domain_for_dev(adev->dev);
3487         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3488                 adev->ram_is_direct_mapped = true;
3489 }
3490
3491 static const struct attribute *amdgpu_dev_attributes[] = {
3492         &dev_attr_product_name.attr,
3493         &dev_attr_product_number.attr,
3494         &dev_attr_serial_number.attr,
3495         &dev_attr_pcie_replay_count.attr,
3496         NULL
3497 };
3498
3499 /**
3500  * amdgpu_device_init - initialize the driver
3501  *
3502  * @adev: amdgpu_device pointer
3503  * @flags: driver flags
3504  *
3505  * Initializes the driver info and hw (all asics).
3506  * Returns 0 for success or an error on failure.
3507  * Called at driver startup.
3508  */
3509 int amdgpu_device_init(struct amdgpu_device *adev,
3510                        uint32_t flags)
3511 {
3512         struct drm_device *ddev = adev_to_drm(adev);
3513         struct pci_dev *pdev = adev->pdev;
3514         int r, i;
3515         bool px = false;
3516         u32 max_MBps;
3517
3518         adev->shutdown = false;
3519         adev->flags = flags;
3520
3521         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3522                 adev->asic_type = amdgpu_force_asic_type;
3523         else
3524                 adev->asic_type = flags & AMD_ASIC_MASK;
3525
3526         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3527         if (amdgpu_emu_mode == 1)
3528                 adev->usec_timeout *= 10;
3529         adev->gmc.gart_size = 512 * 1024 * 1024;
3530         adev->accel_working = false;
3531         adev->num_rings = 0;
3532         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3533         adev->mman.buffer_funcs = NULL;
3534         adev->mman.buffer_funcs_ring = NULL;
3535         adev->vm_manager.vm_pte_funcs = NULL;
3536         adev->vm_manager.vm_pte_num_scheds = 0;
3537         adev->gmc.gmc_funcs = NULL;
3538         adev->harvest_ip_mask = 0x0;
3539         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3540         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3541
3542         adev->smc_rreg = &amdgpu_invalid_rreg;
3543         adev->smc_wreg = &amdgpu_invalid_wreg;
3544         adev->pcie_rreg = &amdgpu_invalid_rreg;
3545         adev->pcie_wreg = &amdgpu_invalid_wreg;
3546         adev->pciep_rreg = &amdgpu_invalid_rreg;
3547         adev->pciep_wreg = &amdgpu_invalid_wreg;
3548         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3549         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3550         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3551         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3552         adev->didt_rreg = &amdgpu_invalid_rreg;
3553         adev->didt_wreg = &amdgpu_invalid_wreg;
3554         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3555         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3556         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3557         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3558
3559         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3560                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3561                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3562
3563         /* mutex initialization are all done here so we
3564          * can recall function without having locking issues */
3565         mutex_init(&adev->firmware.mutex);
3566         mutex_init(&adev->pm.mutex);
3567         mutex_init(&adev->gfx.gpu_clock_mutex);
3568         mutex_init(&adev->srbm_mutex);
3569         mutex_init(&adev->gfx.pipe_reserve_mutex);
3570         mutex_init(&adev->gfx.gfx_off_mutex);
3571         mutex_init(&adev->grbm_idx_mutex);
3572         mutex_init(&adev->mn_lock);
3573         mutex_init(&adev->virt.vf_errors.lock);
3574         hash_init(adev->mn_hash);
3575         mutex_init(&adev->psp.mutex);
3576         mutex_init(&adev->notifier_lock);
3577         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3578         mutex_init(&adev->benchmark_mutex);
3579
3580         amdgpu_device_init_apu_flags(adev);
3581
3582         r = amdgpu_device_check_arguments(adev);
3583         if (r)
3584                 return r;
3585
3586         spin_lock_init(&adev->mmio_idx_lock);
3587         spin_lock_init(&adev->smc_idx_lock);
3588         spin_lock_init(&adev->pcie_idx_lock);
3589         spin_lock_init(&adev->uvd_ctx_idx_lock);
3590         spin_lock_init(&adev->didt_idx_lock);
3591         spin_lock_init(&adev->gc_cac_idx_lock);
3592         spin_lock_init(&adev->se_cac_idx_lock);
3593         spin_lock_init(&adev->audio_endpt_idx_lock);
3594         spin_lock_init(&adev->mm_stats.lock);
3595
3596         INIT_LIST_HEAD(&adev->shadow_list);
3597         mutex_init(&adev->shadow_list_lock);
3598
3599         INIT_LIST_HEAD(&adev->reset_list);
3600
3601         INIT_LIST_HEAD(&adev->ras_list);
3602
3603         INIT_DELAYED_WORK(&adev->delayed_init_work,
3604                           amdgpu_device_delayed_init_work_handler);
3605         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3606                           amdgpu_device_delay_enable_gfx_off);
3607
3608         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3609
3610         adev->gfx.gfx_off_req_count = 1;
3611         adev->gfx.gfx_off_residency = 0;
3612         adev->gfx.gfx_off_entrycount = 0;
3613         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3614
3615         atomic_set(&adev->throttling_logging_enabled, 1);
3616         /*
3617          * If throttling continues, logging will be performed every minute
3618          * to avoid log flooding. "-1" is subtracted since the thermal
3619          * throttling interrupt comes every second. Thus, the total logging
3620          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3621          * for throttling interrupt) = 60 seconds.
3622          */
3623         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3624         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3625
3626         /* Registers mapping */
3627         /* TODO: block userspace mapping of io register */
3628         if (adev->asic_type >= CHIP_BONAIRE) {
3629                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3630                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3631         } else {
3632                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3633                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3634         }
3635
3636         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3637                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3638
3639         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3640         if (adev->rmmio == NULL) {
3641                 return -ENOMEM;
3642         }
3643         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3644         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3645
3646         amdgpu_device_get_pcie_info(adev);
3647
3648         if (amdgpu_mcbp)
3649                 DRM_INFO("MCBP is enabled\n");
3650
3651         /*
3652          * Reset domain needs to be present early, before XGMI hive discovered
3653          * (if any) and intitialized to use reset sem and in_gpu reset flag
3654          * early on during init and before calling to RREG32.
3655          */
3656         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3657         if (!adev->reset_domain)
3658                 return -ENOMEM;
3659
3660         /* detect hw virtualization here */
3661         amdgpu_detect_virtualization(adev);
3662
3663         r = amdgpu_device_get_job_timeout_settings(adev);
3664         if (r) {
3665                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3666                 return r;
3667         }
3668
3669         /* early init functions */
3670         r = amdgpu_device_ip_early_init(adev);
3671         if (r)
3672                 return r;
3673
3674         /* Enable TMZ based on IP_VERSION */
3675         amdgpu_gmc_tmz_set(adev);
3676
3677         amdgpu_gmc_noretry_set(adev);
3678         /* Need to get xgmi info early to decide the reset behavior*/
3679         if (adev->gmc.xgmi.supported) {
3680                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3681                 if (r)
3682                         return r;
3683         }
3684
3685         /* enable PCIE atomic ops */
3686         if (amdgpu_sriov_vf(adev))
3687                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3688                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3689                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3690         else
3691                 adev->have_atomics_support =
3692                         !pci_enable_atomic_ops_to_root(adev->pdev,
3693                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3694                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3695         if (!adev->have_atomics_support)
3696                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3697
3698         /* doorbell bar mapping and doorbell index init*/
3699         amdgpu_device_doorbell_init(adev);
3700
3701         if (amdgpu_emu_mode == 1) {
3702                 /* post the asic on emulation mode */
3703                 emu_soc_asic_init(adev);
3704                 goto fence_driver_init;
3705         }
3706
3707         amdgpu_reset_init(adev);
3708
3709         /* detect if we are with an SRIOV vbios */
3710         amdgpu_device_detect_sriov_bios(adev);
3711
3712         /* check if we need to reset the asic
3713          *  E.g., driver was not cleanly unloaded previously, etc.
3714          */
3715         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3716                 if (adev->gmc.xgmi.num_physical_nodes) {
3717                         dev_info(adev->dev, "Pending hive reset.\n");
3718                         adev->gmc.xgmi.pending_reset = true;
3719                         /* Only need to init necessary block for SMU to handle the reset */
3720                         for (i = 0; i < adev->num_ip_blocks; i++) {
3721                                 if (!adev->ip_blocks[i].status.valid)
3722                                         continue;
3723                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3724                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3725                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3726                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3727                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3728                                                 adev->ip_blocks[i].version->funcs->name);
3729                                         adev->ip_blocks[i].status.hw = true;
3730                                 }
3731                         }
3732                 } else {
3733                         r = amdgpu_asic_reset(adev);
3734                         if (r) {
3735                                 dev_err(adev->dev, "asic reset on init failed\n");
3736                                 goto failed;
3737                         }
3738                 }
3739         }
3740
3741         pci_enable_pcie_error_reporting(adev->pdev);
3742
3743         /* Post card if necessary */
3744         if (amdgpu_device_need_post(adev)) {
3745                 if (!adev->bios) {
3746                         dev_err(adev->dev, "no vBIOS found\n");
3747                         r = -EINVAL;
3748                         goto failed;
3749                 }
3750                 DRM_INFO("GPU posting now...\n");
3751                 r = amdgpu_device_asic_init(adev);
3752                 if (r) {
3753                         dev_err(adev->dev, "gpu post error!\n");
3754                         goto failed;
3755                 }
3756         }
3757
3758         if (adev->is_atom_fw) {
3759                 /* Initialize clocks */
3760                 r = amdgpu_atomfirmware_get_clock_info(adev);
3761                 if (r) {
3762                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3763                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3764                         goto failed;
3765                 }
3766         } else {
3767                 /* Initialize clocks */
3768                 r = amdgpu_atombios_get_clock_info(adev);
3769                 if (r) {
3770                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3771                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3772                         goto failed;
3773                 }
3774                 /* init i2c buses */
3775                 if (!amdgpu_device_has_dc_support(adev))
3776                         amdgpu_atombios_i2c_init(adev);
3777         }
3778
3779 fence_driver_init:
3780         /* Fence driver */
3781         r = amdgpu_fence_driver_sw_init(adev);
3782         if (r) {
3783                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3784                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3785                 goto failed;
3786         }
3787
3788         /* init the mode config */
3789         drm_mode_config_init(adev_to_drm(adev));
3790
3791         r = amdgpu_device_ip_init(adev);
3792         if (r) {
3793                 /* failed in exclusive mode due to timeout */
3794                 if (amdgpu_sriov_vf(adev) &&
3795                     !amdgpu_sriov_runtime(adev) &&
3796                     amdgpu_virt_mmio_blocked(adev) &&
3797                     !amdgpu_virt_wait_reset(adev)) {
3798                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3799                         /* Don't send request since VF is inactive. */
3800                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3801                         adev->virt.ops = NULL;
3802                         r = -EAGAIN;
3803                         goto release_ras_con;
3804                 }
3805                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3806                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3807                 goto release_ras_con;
3808         }
3809
3810         amdgpu_fence_driver_hw_init(adev);
3811
3812         dev_info(adev->dev,
3813                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3814                         adev->gfx.config.max_shader_engines,
3815                         adev->gfx.config.max_sh_per_se,
3816                         adev->gfx.config.max_cu_per_sh,
3817                         adev->gfx.cu_info.number);
3818
3819         adev->accel_working = true;
3820
3821         amdgpu_vm_check_compute_bug(adev);
3822
3823         /* Initialize the buffer migration limit. */
3824         if (amdgpu_moverate >= 0)
3825                 max_MBps = amdgpu_moverate;
3826         else
3827                 max_MBps = 8; /* Allow 8 MB/s. */
3828         /* Get a log2 for easy divisions. */
3829         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3830
3831         r = amdgpu_pm_sysfs_init(adev);
3832         if (r) {
3833                 adev->pm_sysfs_en = false;
3834                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3835         } else
3836                 adev->pm_sysfs_en = true;
3837
3838         r = amdgpu_ucode_sysfs_init(adev);
3839         if (r) {
3840                 adev->ucode_sysfs_en = false;
3841                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3842         } else
3843                 adev->ucode_sysfs_en = true;
3844
3845         r = amdgpu_psp_sysfs_init(adev);
3846         if (r) {
3847                 adev->psp_sysfs_en = false;
3848                 if (!amdgpu_sriov_vf(adev))
3849                         DRM_ERROR("Creating psp sysfs failed\n");
3850         } else
3851                 adev->psp_sysfs_en = true;
3852
3853         /*
3854          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3855          * Otherwise the mgpu fan boost feature will be skipped due to the
3856          * gpu instance is counted less.
3857          */
3858         amdgpu_register_gpu_instance(adev);
3859
3860         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3861          * explicit gating rather than handling it automatically.
3862          */
3863         if (!adev->gmc.xgmi.pending_reset) {
3864                 r = amdgpu_device_ip_late_init(adev);
3865                 if (r) {
3866                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3867                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3868                         goto release_ras_con;
3869                 }
3870                 /* must succeed. */
3871                 amdgpu_ras_resume(adev);
3872                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3873                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3874         }
3875
3876         if (amdgpu_sriov_vf(adev))
3877                 flush_delayed_work(&adev->delayed_init_work);
3878
3879         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3880         if (r)
3881                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3882
3883         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3884                 r = amdgpu_pmu_init(adev);
3885         if (r)
3886                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3887
3888         /* Have stored pci confspace at hand for restore in sudden PCI error */
3889         if (amdgpu_device_cache_pci_state(adev->pdev))
3890                 pci_restore_state(pdev);
3891
3892         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3893         /* this will fail for cards that aren't VGA class devices, just
3894          * ignore it */
3895         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3896                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3897
3898         if (amdgpu_device_supports_px(ddev)) {
3899                 px = true;
3900                 vga_switcheroo_register_client(adev->pdev,
3901                                                &amdgpu_switcheroo_ops, px);
3902                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3903         }
3904
3905         if (adev->gmc.xgmi.pending_reset)
3906                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3907                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3908
3909         amdgpu_device_check_iommu_direct_map(adev);
3910
3911         return 0;
3912
3913 release_ras_con:
3914         amdgpu_release_ras_context(adev);
3915
3916 failed:
3917         amdgpu_vf_error_trans_all(adev);
3918
3919         return r;
3920 }
3921
3922 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3923 {
3924
3925         /* Clear all CPU mappings pointing to this device */
3926         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3927
3928         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3929         amdgpu_device_doorbell_fini(adev);
3930
3931         iounmap(adev->rmmio);
3932         adev->rmmio = NULL;
3933         if (adev->mman.aper_base_kaddr)
3934                 iounmap(adev->mman.aper_base_kaddr);
3935         adev->mman.aper_base_kaddr = NULL;
3936
3937         /* Memory manager related */
3938         if (!adev->gmc.xgmi.connected_to_cpu) {
3939                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3940                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3941         }
3942 }
3943
3944 /**
3945  * amdgpu_device_fini_hw - tear down the driver
3946  *
3947  * @adev: amdgpu_device pointer
3948  *
3949  * Tear down the driver info (all asics).
3950  * Called at driver shutdown.
3951  */
3952 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3953 {
3954         dev_info(adev->dev, "amdgpu: finishing device.\n");
3955         flush_delayed_work(&adev->delayed_init_work);
3956         adev->shutdown = true;
3957
3958         /* make sure IB test finished before entering exclusive mode
3959          * to avoid preemption on IB test
3960          * */
3961         if (amdgpu_sriov_vf(adev)) {
3962                 amdgpu_virt_request_full_gpu(adev, false);
3963                 amdgpu_virt_fini_data_exchange(adev);
3964         }
3965
3966         /* disable all interrupts */
3967         amdgpu_irq_disable_all(adev);
3968         if (adev->mode_info.mode_config_initialized){
3969                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3970                         drm_helper_force_disable_all(adev_to_drm(adev));
3971                 else
3972                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3973         }
3974         amdgpu_fence_driver_hw_fini(adev);
3975
3976         if (adev->mman.initialized) {
3977                 flush_delayed_work(&adev->mman.bdev.wq);
3978                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3979         }
3980
3981         if (adev->pm_sysfs_en)
3982                 amdgpu_pm_sysfs_fini(adev);
3983         if (adev->ucode_sysfs_en)
3984                 amdgpu_ucode_sysfs_fini(adev);
3985         if (adev->psp_sysfs_en)
3986                 amdgpu_psp_sysfs_fini(adev);
3987         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3988
3989         /* disable ras feature must before hw fini */
3990         amdgpu_ras_pre_fini(adev);
3991
3992         amdgpu_device_ip_fini_early(adev);
3993
3994         amdgpu_irq_fini_hw(adev);
3995
3996         if (adev->mman.initialized)
3997                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3998
3999         amdgpu_gart_dummy_page_fini(adev);
4000
4001         amdgpu_device_unmap_mmio(adev);
4002
4003 }
4004
4005 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4006 {
4007         int idx;
4008
4009         amdgpu_fence_driver_sw_fini(adev);
4010         amdgpu_device_ip_fini(adev);
4011         release_firmware(adev->firmware.gpu_info_fw);
4012         adev->firmware.gpu_info_fw = NULL;
4013         adev->accel_working = false;
4014         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4015
4016         amdgpu_reset_fini(adev);
4017
4018         /* free i2c buses */
4019         if (!amdgpu_device_has_dc_support(adev))
4020                 amdgpu_i2c_fini(adev);
4021
4022         if (amdgpu_emu_mode != 1)
4023                 amdgpu_atombios_fini(adev);
4024
4025         kfree(adev->bios);
4026         adev->bios = NULL;
4027         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4028                 vga_switcheroo_unregister_client(adev->pdev);
4029                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4030         }
4031         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4032                 vga_client_unregister(adev->pdev);
4033
4034         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4035
4036                 iounmap(adev->rmmio);
4037                 adev->rmmio = NULL;
4038                 amdgpu_device_doorbell_fini(adev);
4039                 drm_dev_exit(idx);
4040         }
4041
4042         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4043                 amdgpu_pmu_fini(adev);
4044         if (adev->mman.discovery_bin)
4045                 amdgpu_discovery_fini(adev);
4046
4047         amdgpu_reset_put_reset_domain(adev->reset_domain);
4048         adev->reset_domain = NULL;
4049
4050         kfree(adev->pci_state);
4051
4052 }
4053
4054 /**
4055  * amdgpu_device_evict_resources - evict device resources
4056  * @adev: amdgpu device object
4057  *
4058  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4059  * of the vram memory type. Mainly used for evicting device resources
4060  * at suspend time.
4061  *
4062  */
4063 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4064 {
4065         int ret;
4066
4067         /* No need to evict vram on APUs for suspend to ram or s2idle */
4068         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4069                 return 0;
4070
4071         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4072         if (ret)
4073                 DRM_WARN("evicting device resources failed\n");
4074         return ret;
4075 }
4076
4077 /*
4078  * Suspend & resume.
4079  */
4080 /**
4081  * amdgpu_device_suspend - initiate device suspend
4082  *
4083  * @dev: drm dev pointer
4084  * @fbcon : notify the fbdev of suspend
4085  *
4086  * Puts the hw in the suspend state (all asics).
4087  * Returns 0 for success or an error on failure.
4088  * Called at driver suspend.
4089  */
4090 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4091 {
4092         struct amdgpu_device *adev = drm_to_adev(dev);
4093         int r = 0;
4094
4095         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4096                 return 0;
4097
4098         adev->in_suspend = true;
4099
4100         if (amdgpu_sriov_vf(adev)) {
4101                 amdgpu_virt_fini_data_exchange(adev);
4102                 r = amdgpu_virt_request_full_gpu(adev, false);
4103                 if (r)
4104                         return r;
4105         }
4106
4107         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4108                 DRM_WARN("smart shift update failed\n");
4109
4110         drm_kms_helper_poll_disable(dev);
4111
4112         if (fbcon)
4113                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4114
4115         cancel_delayed_work_sync(&adev->delayed_init_work);
4116
4117         amdgpu_ras_suspend(adev);
4118
4119         amdgpu_device_ip_suspend_phase1(adev);
4120
4121         if (!adev->in_s0ix)
4122                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4123
4124         r = amdgpu_device_evict_resources(adev);
4125         if (r)
4126                 return r;
4127
4128         amdgpu_fence_driver_hw_fini(adev);
4129
4130         amdgpu_device_ip_suspend_phase2(adev);
4131
4132         if (amdgpu_sriov_vf(adev))
4133                 amdgpu_virt_release_full_gpu(adev, false);
4134
4135         return 0;
4136 }
4137
4138 /**
4139  * amdgpu_device_resume - initiate device resume
4140  *
4141  * @dev: drm dev pointer
4142  * @fbcon : notify the fbdev of resume
4143  *
4144  * Bring the hw back to operating state (all asics).
4145  * Returns 0 for success or an error on failure.
4146  * Called at driver resume.
4147  */
4148 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4149 {
4150         struct amdgpu_device *adev = drm_to_adev(dev);
4151         int r = 0;
4152
4153         if (amdgpu_sriov_vf(adev)) {
4154                 r = amdgpu_virt_request_full_gpu(adev, true);
4155                 if (r)
4156                         return r;
4157         }
4158
4159         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4160                 return 0;
4161
4162         if (adev->in_s0ix)
4163                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4164
4165         /* post card */
4166         if (amdgpu_device_need_post(adev)) {
4167                 r = amdgpu_device_asic_init(adev);
4168                 if (r)
4169                         dev_err(adev->dev, "amdgpu asic init failed\n");
4170         }
4171
4172         r = amdgpu_device_ip_resume(adev);
4173
4174         /* no matter what r is, always need to properly release full GPU */
4175         if (amdgpu_sriov_vf(adev)) {
4176                 amdgpu_virt_init_data_exchange(adev);
4177                 amdgpu_virt_release_full_gpu(adev, true);
4178         }
4179
4180         if (r) {
4181                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4182                 return r;
4183         }
4184         amdgpu_fence_driver_hw_init(adev);
4185
4186         r = amdgpu_device_ip_late_init(adev);
4187         if (r)
4188                 return r;
4189
4190         queue_delayed_work(system_wq, &adev->delayed_init_work,
4191                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4192
4193         if (!adev->in_s0ix) {
4194                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4195                 if (r)
4196                         return r;
4197         }
4198
4199         /* Make sure IB tests flushed */
4200         if (amdgpu_sriov_vf(adev))
4201                 amdgpu_irq_gpu_reset_resume_helper(adev);
4202         flush_delayed_work(&adev->delayed_init_work);
4203
4204         if (adev->in_s0ix) {
4205                 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4206                  * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4207                  */
4208                 amdgpu_gfx_off_ctrl(adev, true);
4209                 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4210         }
4211         if (fbcon)
4212                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4213
4214         drm_kms_helper_poll_enable(dev);
4215
4216         amdgpu_ras_resume(adev);
4217
4218         /*
4219          * Most of the connector probing functions try to acquire runtime pm
4220          * refs to ensure that the GPU is powered on when connector polling is
4221          * performed. Since we're calling this from a runtime PM callback,
4222          * trying to acquire rpm refs will cause us to deadlock.
4223          *
4224          * Since we're guaranteed to be holding the rpm lock, it's safe to
4225          * temporarily disable the rpm helpers so this doesn't deadlock us.
4226          */
4227 #ifdef CONFIG_PM
4228         dev->dev->power.disable_depth++;
4229 #endif
4230         if (!amdgpu_device_has_dc_support(adev))
4231                 drm_helper_hpd_irq_event(dev);
4232         else
4233                 drm_kms_helper_hotplug_event(dev);
4234 #ifdef CONFIG_PM
4235         dev->dev->power.disable_depth--;
4236 #endif
4237         adev->in_suspend = false;
4238
4239         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4240                 DRM_WARN("smart shift update failed\n");
4241
4242         return 0;
4243 }
4244
4245 /**
4246  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4247  *
4248  * @adev: amdgpu_device pointer
4249  *
4250  * The list of all the hardware IPs that make up the asic is walked and
4251  * the check_soft_reset callbacks are run.  check_soft_reset determines
4252  * if the asic is still hung or not.
4253  * Returns true if any of the IPs are still in a hung state, false if not.
4254  */
4255 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4256 {
4257         int i;
4258         bool asic_hang = false;
4259
4260         if (amdgpu_sriov_vf(adev))
4261                 return true;
4262
4263         if (amdgpu_asic_need_full_reset(adev))
4264                 return true;
4265
4266         for (i = 0; i < adev->num_ip_blocks; i++) {
4267                 if (!adev->ip_blocks[i].status.valid)
4268                         continue;
4269                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4270                         adev->ip_blocks[i].status.hang =
4271                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4272                 if (adev->ip_blocks[i].status.hang) {
4273                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4274                         asic_hang = true;
4275                 }
4276         }
4277         return asic_hang;
4278 }
4279
4280 /**
4281  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4282  *
4283  * @adev: amdgpu_device pointer
4284  *
4285  * The list of all the hardware IPs that make up the asic is walked and the
4286  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4287  * handles any IP specific hardware or software state changes that are
4288  * necessary for a soft reset to succeed.
4289  * Returns 0 on success, negative error code on failure.
4290  */
4291 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4292 {
4293         int i, r = 0;
4294
4295         for (i = 0; i < adev->num_ip_blocks; i++) {
4296                 if (!adev->ip_blocks[i].status.valid)
4297                         continue;
4298                 if (adev->ip_blocks[i].status.hang &&
4299                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4300                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4301                         if (r)
4302                                 return r;
4303                 }
4304         }
4305
4306         return 0;
4307 }
4308
4309 /**
4310  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4311  *
4312  * @adev: amdgpu_device pointer
4313  *
4314  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4315  * reset is necessary to recover.
4316  * Returns true if a full asic reset is required, false if not.
4317  */
4318 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4319 {
4320         int i;
4321
4322         if (amdgpu_asic_need_full_reset(adev))
4323                 return true;
4324
4325         for (i = 0; i < adev->num_ip_blocks; i++) {
4326                 if (!adev->ip_blocks[i].status.valid)
4327                         continue;
4328                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4329                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4330                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4331                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4332                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4333                         if (adev->ip_blocks[i].status.hang) {
4334                                 dev_info(adev->dev, "Some block need full reset!\n");
4335                                 return true;
4336                         }
4337                 }
4338         }
4339         return false;
4340 }
4341
4342 /**
4343  * amdgpu_device_ip_soft_reset - do a soft reset
4344  *
4345  * @adev: amdgpu_device pointer
4346  *
4347  * The list of all the hardware IPs that make up the asic is walked and the
4348  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4349  * IP specific hardware or software state changes that are necessary to soft
4350  * reset the IP.
4351  * Returns 0 on success, negative error code on failure.
4352  */
4353 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4354 {
4355         int i, r = 0;
4356
4357         for (i = 0; i < adev->num_ip_blocks; i++) {
4358                 if (!adev->ip_blocks[i].status.valid)
4359                         continue;
4360                 if (adev->ip_blocks[i].status.hang &&
4361                     adev->ip_blocks[i].version->funcs->soft_reset) {
4362                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4363                         if (r)
4364                                 return r;
4365                 }
4366         }
4367
4368         return 0;
4369 }
4370
4371 /**
4372  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4373  *
4374  * @adev: amdgpu_device pointer
4375  *
4376  * The list of all the hardware IPs that make up the asic is walked and the
4377  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4378  * handles any IP specific hardware or software state changes that are
4379  * necessary after the IP has been soft reset.
4380  * Returns 0 on success, negative error code on failure.
4381  */
4382 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4383 {
4384         int i, r = 0;
4385
4386         for (i = 0; i < adev->num_ip_blocks; i++) {
4387                 if (!adev->ip_blocks[i].status.valid)
4388                         continue;
4389                 if (adev->ip_blocks[i].status.hang &&
4390                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4391                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4392                 if (r)
4393                         return r;
4394         }
4395
4396         return 0;
4397 }
4398
4399 /**
4400  * amdgpu_device_recover_vram - Recover some VRAM contents
4401  *
4402  * @adev: amdgpu_device pointer
4403  *
4404  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4405  * restore things like GPUVM page tables after a GPU reset where
4406  * the contents of VRAM might be lost.
4407  *
4408  * Returns:
4409  * 0 on success, negative error code on failure.
4410  */
4411 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4412 {
4413         struct dma_fence *fence = NULL, *next = NULL;
4414         struct amdgpu_bo *shadow;
4415         struct amdgpu_bo_vm *vmbo;
4416         long r = 1, tmo;
4417
4418         if (amdgpu_sriov_runtime(adev))
4419                 tmo = msecs_to_jiffies(8000);
4420         else
4421                 tmo = msecs_to_jiffies(100);
4422
4423         dev_info(adev->dev, "recover vram bo from shadow start\n");
4424         mutex_lock(&adev->shadow_list_lock);
4425         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4426                 shadow = &vmbo->bo;
4427                 /* No need to recover an evicted BO */
4428                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4429                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4430                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4431                         continue;
4432
4433                 r = amdgpu_bo_restore_shadow(shadow, &next);
4434                 if (r)
4435                         break;
4436
4437                 if (fence) {
4438                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4439                         dma_fence_put(fence);
4440                         fence = next;
4441                         if (tmo == 0) {
4442                                 r = -ETIMEDOUT;
4443                                 break;
4444                         } else if (tmo < 0) {
4445                                 r = tmo;
4446                                 break;
4447                         }
4448                 } else {
4449                         fence = next;
4450                 }
4451         }
4452         mutex_unlock(&adev->shadow_list_lock);
4453
4454         if (fence)
4455                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4456         dma_fence_put(fence);
4457
4458         if (r < 0 || tmo <= 0) {
4459                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4460                 return -EIO;
4461         }
4462
4463         dev_info(adev->dev, "recover vram bo from shadow done\n");
4464         return 0;
4465 }
4466
4467
4468 /**
4469  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4470  *
4471  * @adev: amdgpu_device pointer
4472  * @from_hypervisor: request from hypervisor
4473  *
4474  * do VF FLR and reinitialize Asic
4475  * return 0 means succeeded otherwise failed
4476  */
4477 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4478                                      bool from_hypervisor)
4479 {
4480         int r;
4481         struct amdgpu_hive_info *hive = NULL;
4482         int retry_limit = 0;
4483
4484 retry:
4485         amdgpu_amdkfd_pre_reset(adev);
4486
4487         if (from_hypervisor)
4488                 r = amdgpu_virt_request_full_gpu(adev, true);
4489         else
4490                 r = amdgpu_virt_reset_gpu(adev);
4491         if (r)
4492                 return r;
4493
4494         /* Resume IP prior to SMC */
4495         r = amdgpu_device_ip_reinit_early_sriov(adev);
4496         if (r)
4497                 goto error;
4498
4499         amdgpu_virt_init_data_exchange(adev);
4500
4501         r = amdgpu_device_fw_loading(adev);
4502         if (r)
4503                 return r;
4504
4505         /* now we are okay to resume SMC/CP/SDMA */
4506         r = amdgpu_device_ip_reinit_late_sriov(adev);
4507         if (r)
4508                 goto error;
4509
4510         hive = amdgpu_get_xgmi_hive(adev);
4511         /* Update PSP FW topology after reset */
4512         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4513                 r = amdgpu_xgmi_update_topology(hive, adev);
4514
4515         if (hive)
4516                 amdgpu_put_xgmi_hive(hive);
4517
4518         if (!r) {
4519                 amdgpu_irq_gpu_reset_resume_helper(adev);
4520                 r = amdgpu_ib_ring_tests(adev);
4521
4522                 amdgpu_amdkfd_post_reset(adev);
4523         }
4524
4525 error:
4526         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4527                 amdgpu_inc_vram_lost(adev);
4528                 r = amdgpu_device_recover_vram(adev);
4529         }
4530         amdgpu_virt_release_full_gpu(adev, true);
4531
4532         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4533                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4534                         retry_limit++;
4535                         goto retry;
4536                 } else
4537                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4538         }
4539
4540         return r;
4541 }
4542
4543 /**
4544  * amdgpu_device_has_job_running - check if there is any job in mirror list
4545  *
4546  * @adev: amdgpu_device pointer
4547  *
4548  * check if there is any job in mirror list
4549  */
4550 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4551 {
4552         int i;
4553         struct drm_sched_job *job;
4554
4555         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4556                 struct amdgpu_ring *ring = adev->rings[i];
4557
4558                 if (!ring || !ring->sched.thread)
4559                         continue;
4560
4561                 spin_lock(&ring->sched.job_list_lock);
4562                 job = list_first_entry_or_null(&ring->sched.pending_list,
4563                                                struct drm_sched_job, list);
4564                 spin_unlock(&ring->sched.job_list_lock);
4565                 if (job)
4566                         return true;
4567         }
4568         return false;
4569 }
4570
4571 /**
4572  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4573  *
4574  * @adev: amdgpu_device pointer
4575  *
4576  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4577  * a hung GPU.
4578  */
4579 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4580 {
4581
4582         if (amdgpu_gpu_recovery == 0)
4583                 goto disabled;
4584
4585         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4586                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4587                 return false;
4588         }
4589
4590         if (amdgpu_sriov_vf(adev))
4591                 return true;
4592
4593         if (amdgpu_gpu_recovery == -1) {
4594                 switch (adev->asic_type) {
4595 #ifdef CONFIG_DRM_AMDGPU_SI
4596                 case CHIP_VERDE:
4597                 case CHIP_TAHITI:
4598                 case CHIP_PITCAIRN:
4599                 case CHIP_OLAND:
4600                 case CHIP_HAINAN:
4601 #endif
4602 #ifdef CONFIG_DRM_AMDGPU_CIK
4603                 case CHIP_KAVERI:
4604                 case CHIP_KABINI:
4605                 case CHIP_MULLINS:
4606 #endif
4607                 case CHIP_CARRIZO:
4608                 case CHIP_STONEY:
4609                 case CHIP_CYAN_SKILLFISH:
4610                         goto disabled;
4611                 default:
4612                         break;
4613                 }
4614         }
4615
4616         return true;
4617
4618 disabled:
4619                 dev_info(adev->dev, "GPU recovery disabled.\n");
4620                 return false;
4621 }
4622
4623 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4624 {
4625         u32 i;
4626         int ret = 0;
4627
4628         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4629
4630         dev_info(adev->dev, "GPU mode1 reset\n");
4631
4632         /* disable BM */
4633         pci_clear_master(adev->pdev);
4634
4635         amdgpu_device_cache_pci_state(adev->pdev);
4636
4637         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4638                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4639                 ret = amdgpu_dpm_mode1_reset(adev);
4640         } else {
4641                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4642                 ret = psp_gpu_reset(adev);
4643         }
4644
4645         if (ret)
4646                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4647
4648         amdgpu_device_load_pci_state(adev->pdev);
4649
4650         /* wait for asic to come out of reset */
4651         for (i = 0; i < adev->usec_timeout; i++) {
4652                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4653
4654                 if (memsize != 0xffffffff)
4655                         break;
4656                 udelay(1);
4657         }
4658
4659         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4660         return ret;
4661 }
4662
4663 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4664                                  struct amdgpu_reset_context *reset_context)
4665 {
4666         int i, r = 0;
4667         struct amdgpu_job *job = NULL;
4668         bool need_full_reset =
4669                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4670
4671         if (reset_context->reset_req_dev == adev)
4672                 job = reset_context->job;
4673
4674         if (amdgpu_sriov_vf(adev)) {
4675                 /* stop the data exchange thread */
4676                 amdgpu_virt_fini_data_exchange(adev);
4677         }
4678
4679         amdgpu_fence_driver_isr_toggle(adev, true);
4680
4681         /* block all schedulers and reset given job's ring */
4682         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4683                 struct amdgpu_ring *ring = adev->rings[i];
4684
4685                 if (!ring || !ring->sched.thread)
4686                         continue;
4687
4688                 /*clear job fence from fence drv to avoid force_completion
4689                  *leave NULL and vm flush fence in fence drv */
4690                 amdgpu_fence_driver_clear_job_fences(ring);
4691
4692                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4693                 amdgpu_fence_driver_force_completion(ring);
4694         }
4695
4696         amdgpu_fence_driver_isr_toggle(adev, false);
4697
4698         if (job && job->vm)
4699                 drm_sched_increase_karma(&job->base);
4700
4701         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4702         /* If reset handler not implemented, continue; otherwise return */
4703         if (r == -ENOSYS)
4704                 r = 0;
4705         else
4706                 return r;
4707
4708         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4709         if (!amdgpu_sriov_vf(adev)) {
4710
4711                 if (!need_full_reset)
4712                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4713
4714                 if (!need_full_reset && amdgpu_gpu_recovery) {
4715                         amdgpu_device_ip_pre_soft_reset(adev);
4716                         r = amdgpu_device_ip_soft_reset(adev);
4717                         amdgpu_device_ip_post_soft_reset(adev);
4718                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4719                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4720                                 need_full_reset = true;
4721                         }
4722                 }
4723
4724                 if (need_full_reset)
4725                         r = amdgpu_device_ip_suspend(adev);
4726                 if (need_full_reset)
4727                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4728                 else
4729                         clear_bit(AMDGPU_NEED_FULL_RESET,
4730                                   &reset_context->flags);
4731         }
4732
4733         return r;
4734 }
4735
4736 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4737 {
4738         int i;
4739
4740         lockdep_assert_held(&adev->reset_domain->sem);
4741
4742         for (i = 0; i < adev->num_regs; i++) {
4743                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4744                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4745                                              adev->reset_dump_reg_value[i]);
4746         }
4747
4748         return 0;
4749 }
4750
4751 #ifdef CONFIG_DEV_COREDUMP
4752 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4753                 size_t count, void *data, size_t datalen)
4754 {
4755         struct drm_printer p;
4756         struct amdgpu_device *adev = data;
4757         struct drm_print_iterator iter;
4758         int i;
4759
4760         iter.data = buffer;
4761         iter.offset = 0;
4762         iter.start = offset;
4763         iter.remain = count;
4764
4765         p = drm_coredump_printer(&iter);
4766
4767         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4768         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4769         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4770         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4771         if (adev->reset_task_info.pid)
4772                 drm_printf(&p, "process_name: %s PID: %d\n",
4773                            adev->reset_task_info.process_name,
4774                            adev->reset_task_info.pid);
4775
4776         if (adev->reset_vram_lost)
4777                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4778         if (adev->num_regs) {
4779                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4780
4781                 for (i = 0; i < adev->num_regs; i++)
4782                         drm_printf(&p, "0x%08x: 0x%08x\n",
4783                                    adev->reset_dump_reg_list[i],
4784                                    adev->reset_dump_reg_value[i]);
4785         }
4786
4787         return count - iter.remain;
4788 }
4789
4790 static void amdgpu_devcoredump_free(void *data)
4791 {
4792 }
4793
4794 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4795 {
4796         struct drm_device *dev = adev_to_drm(adev);
4797
4798         ktime_get_ts64(&adev->reset_time);
4799         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4800                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4801 }
4802 #endif
4803
4804 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4805                          struct amdgpu_reset_context *reset_context)
4806 {
4807         struct amdgpu_device *tmp_adev = NULL;
4808         bool need_full_reset, skip_hw_reset, vram_lost = false;
4809         int r = 0;
4810         bool gpu_reset_for_dev_remove = 0;
4811
4812         /* Try reset handler method first */
4813         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4814                                     reset_list);
4815         amdgpu_reset_reg_dumps(tmp_adev);
4816
4817         reset_context->reset_device_list = device_list_handle;
4818         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4819         /* If reset handler not implemented, continue; otherwise return */
4820         if (r == -ENOSYS)
4821                 r = 0;
4822         else
4823                 return r;
4824
4825         /* Reset handler not implemented, use the default method */
4826         need_full_reset =
4827                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4828         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4829
4830         gpu_reset_for_dev_remove =
4831                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4832                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4833
4834         /*
4835          * ASIC reset has to be done on all XGMI hive nodes ASAP
4836          * to allow proper links negotiation in FW (within 1 sec)
4837          */
4838         if (!skip_hw_reset && need_full_reset) {
4839                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4840                         /* For XGMI run all resets in parallel to speed up the process */
4841                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4842                                 tmp_adev->gmc.xgmi.pending_reset = false;
4843                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4844                                         r = -EALREADY;
4845                         } else
4846                                 r = amdgpu_asic_reset(tmp_adev);
4847
4848                         if (r) {
4849                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4850                                          r, adev_to_drm(tmp_adev)->unique);
4851                                 break;
4852                         }
4853                 }
4854
4855                 /* For XGMI wait for all resets to complete before proceed */
4856                 if (!r) {
4857                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4858                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4859                                         flush_work(&tmp_adev->xgmi_reset_work);
4860                                         r = tmp_adev->asic_reset_res;
4861                                         if (r)
4862                                                 break;
4863                                 }
4864                         }
4865                 }
4866         }
4867
4868         if (!r && amdgpu_ras_intr_triggered()) {
4869                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4870                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4871                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4872                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4873                 }
4874
4875                 amdgpu_ras_intr_cleared();
4876         }
4877
4878         /* Since the mode1 reset affects base ip blocks, the
4879          * phase1 ip blocks need to be resumed. Otherwise there
4880          * will be a BIOS signature error and the psp bootloader
4881          * can't load kdb on the next amdgpu install.
4882          */
4883         if (gpu_reset_for_dev_remove) {
4884                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4885                         amdgpu_device_ip_resume_phase1(tmp_adev);
4886
4887                 goto end;
4888         }
4889
4890         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4891                 if (need_full_reset) {
4892                         /* post card */
4893                         r = amdgpu_device_asic_init(tmp_adev);
4894                         if (r) {
4895                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4896                         } else {
4897                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4898                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4899                                 if (r)
4900                                         goto out;
4901
4902                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4903                                 if (r)
4904                                         goto out;
4905
4906                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4907 #ifdef CONFIG_DEV_COREDUMP
4908                                 tmp_adev->reset_vram_lost = vram_lost;
4909                                 memset(&tmp_adev->reset_task_info, 0,
4910                                                 sizeof(tmp_adev->reset_task_info));
4911                                 if (reset_context->job && reset_context->job->vm)
4912                                         tmp_adev->reset_task_info =
4913                                                 reset_context->job->vm->task_info;
4914                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4915 #endif
4916                                 if (vram_lost) {
4917                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4918                                         amdgpu_inc_vram_lost(tmp_adev);
4919                                 }
4920
4921                                 r = amdgpu_device_fw_loading(tmp_adev);
4922                                 if (r)
4923                                         return r;
4924
4925                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4926                                 if (r)
4927                                         goto out;
4928
4929                                 if (vram_lost)
4930                                         amdgpu_device_fill_reset_magic(tmp_adev);
4931
4932                                 /*
4933                                  * Add this ASIC as tracked as reset was already
4934                                  * complete successfully.
4935                                  */
4936                                 amdgpu_register_gpu_instance(tmp_adev);
4937
4938                                 if (!reset_context->hive &&
4939                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4940                                         amdgpu_xgmi_add_device(tmp_adev);
4941
4942                                 r = amdgpu_device_ip_late_init(tmp_adev);
4943                                 if (r)
4944                                         goto out;
4945
4946                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4947
4948                                 /*
4949                                  * The GPU enters bad state once faulty pages
4950                                  * by ECC has reached the threshold, and ras
4951                                  * recovery is scheduled next. So add one check
4952                                  * here to break recovery if it indeed exceeds
4953                                  * bad page threshold, and remind user to
4954                                  * retire this GPU or setting one bigger
4955                                  * bad_page_threshold value to fix this once
4956                                  * probing driver again.
4957                                  */
4958                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4959                                         /* must succeed. */
4960                                         amdgpu_ras_resume(tmp_adev);
4961                                 } else {
4962                                         r = -EINVAL;
4963                                         goto out;
4964                                 }
4965
4966                                 /* Update PSP FW topology after reset */
4967                                 if (reset_context->hive &&
4968                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4969                                         r = amdgpu_xgmi_update_topology(
4970                                                 reset_context->hive, tmp_adev);
4971                         }
4972                 }
4973
4974 out:
4975                 if (!r) {
4976                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4977                         r = amdgpu_ib_ring_tests(tmp_adev);
4978                         if (r) {
4979                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4980                                 need_full_reset = true;
4981                                 r = -EAGAIN;
4982                                 goto end;
4983                         }
4984                 }
4985
4986                 if (!r)
4987                         r = amdgpu_device_recover_vram(tmp_adev);
4988                 else
4989                         tmp_adev->asic_reset_res = r;
4990         }
4991
4992 end:
4993         if (need_full_reset)
4994                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4995         else
4996                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4997         return r;
4998 }
4999
5000 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5001 {
5002
5003         switch (amdgpu_asic_reset_method(adev)) {
5004         case AMD_RESET_METHOD_MODE1:
5005                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5006                 break;
5007         case AMD_RESET_METHOD_MODE2:
5008                 adev->mp1_state = PP_MP1_STATE_RESET;
5009                 break;
5010         default:
5011                 adev->mp1_state = PP_MP1_STATE_NONE;
5012                 break;
5013         }
5014 }
5015
5016 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5017 {
5018         amdgpu_vf_error_trans_all(adev);
5019         adev->mp1_state = PP_MP1_STATE_NONE;
5020 }
5021
5022 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5023 {
5024         struct pci_dev *p = NULL;
5025
5026         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5027                         adev->pdev->bus->number, 1);
5028         if (p) {
5029                 pm_runtime_enable(&(p->dev));
5030                 pm_runtime_resume(&(p->dev));
5031         }
5032 }
5033
5034 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5035 {
5036         enum amd_reset_method reset_method;
5037         struct pci_dev *p = NULL;
5038         u64 expires;
5039
5040         /*
5041          * For now, only BACO and mode1 reset are confirmed
5042          * to suffer the audio issue without proper suspended.
5043          */
5044         reset_method = amdgpu_asic_reset_method(adev);
5045         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5046              (reset_method != AMD_RESET_METHOD_MODE1))
5047                 return -EINVAL;
5048
5049         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5050                         adev->pdev->bus->number, 1);
5051         if (!p)
5052                 return -ENODEV;
5053
5054         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5055         if (!expires)
5056                 /*
5057                  * If we cannot get the audio device autosuspend delay,
5058                  * a fixed 4S interval will be used. Considering 3S is
5059                  * the audio controller default autosuspend delay setting.
5060                  * 4S used here is guaranteed to cover that.
5061                  */
5062                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5063
5064         while (!pm_runtime_status_suspended(&(p->dev))) {
5065                 if (!pm_runtime_suspend(&(p->dev)))
5066                         break;
5067
5068                 if (expires < ktime_get_mono_fast_ns()) {
5069                         dev_warn(adev->dev, "failed to suspend display audio\n");
5070                         /* TODO: abort the succeeding gpu reset? */
5071                         return -ETIMEDOUT;
5072                 }
5073         }
5074
5075         pm_runtime_disable(&(p->dev));
5076
5077         return 0;
5078 }
5079
5080 static void amdgpu_device_recheck_guilty_jobs(
5081         struct amdgpu_device *adev, struct list_head *device_list_handle,
5082         struct amdgpu_reset_context *reset_context)
5083 {
5084         int i, r = 0;
5085
5086         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5087                 struct amdgpu_ring *ring = adev->rings[i];
5088                 int ret = 0;
5089                 struct drm_sched_job *s_job;
5090
5091                 if (!ring || !ring->sched.thread)
5092                         continue;
5093
5094                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5095                                 struct drm_sched_job, list);
5096                 if (s_job == NULL)
5097                         continue;
5098
5099                 /* clear job's guilty and depend the folowing step to decide the real one */
5100                 drm_sched_reset_karma(s_job);
5101                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5102
5103                 if (!s_job->s_fence->parent) {
5104                         DRM_WARN("Failed to get a HW fence for job!");
5105                         continue;
5106                 }
5107
5108                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5109                 if (ret == 0) { /* timeout */
5110                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5111                                                 ring->sched.name, s_job->id);
5112
5113
5114                         amdgpu_fence_driver_isr_toggle(adev, true);
5115
5116                         /* Clear this failed job from fence array */
5117                         amdgpu_fence_driver_clear_job_fences(ring);
5118
5119                         amdgpu_fence_driver_isr_toggle(adev, false);
5120
5121                         /* Since the job won't signal and we go for
5122                          * another resubmit drop this parent pointer
5123                          */
5124                         dma_fence_put(s_job->s_fence->parent);
5125                         s_job->s_fence->parent = NULL;
5126
5127                         /* set guilty */
5128                         drm_sched_increase_karma(s_job);
5129                         amdgpu_reset_prepare_hwcontext(adev, reset_context);
5130 retry:
5131                         /* do hw reset */
5132                         if (amdgpu_sriov_vf(adev)) {
5133                                 amdgpu_virt_fini_data_exchange(adev);
5134                                 r = amdgpu_device_reset_sriov(adev, false);
5135                                 if (r)
5136                                         adev->asic_reset_res = r;
5137                         } else {
5138                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5139                                           &reset_context->flags);
5140                                 r = amdgpu_do_asic_reset(device_list_handle,
5141                                                          reset_context);
5142                                 if (r && r == -EAGAIN)
5143                                         goto retry;
5144                         }
5145
5146                         /*
5147                          * add reset counter so that the following
5148                          * resubmitted job could flush vmid
5149                          */
5150                         atomic_inc(&adev->gpu_reset_counter);
5151                         continue;
5152                 }
5153
5154                 /* got the hw fence, signal finished fence */
5155                 atomic_dec(ring->sched.score);
5156                 dma_fence_get(&s_job->s_fence->finished);
5157                 dma_fence_signal(&s_job->s_fence->finished);
5158                 dma_fence_put(&s_job->s_fence->finished);
5159
5160                 /* remove node from list and free the job */
5161                 spin_lock(&ring->sched.job_list_lock);
5162                 list_del_init(&s_job->list);
5163                 spin_unlock(&ring->sched.job_list_lock);
5164                 ring->sched.ops->free_job(s_job);
5165         }
5166 }
5167
5168 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5169 {
5170         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5171
5172 #if defined(CONFIG_DEBUG_FS)
5173         if (!amdgpu_sriov_vf(adev))
5174                 cancel_work(&adev->reset_work);
5175 #endif
5176
5177         if (adev->kfd.dev)
5178                 cancel_work(&adev->kfd.reset_work);
5179
5180         if (amdgpu_sriov_vf(adev))
5181                 cancel_work(&adev->virt.flr_work);
5182
5183         if (con && adev->ras_enabled)
5184                 cancel_work(&con->recovery_work);
5185
5186 }
5187
5188
5189 /**
5190  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5191  *
5192  * @adev: amdgpu_device pointer
5193  * @job: which job trigger hang
5194  *
5195  * Attempt to reset the GPU if it has hung (all asics).
5196  * Attempt to do soft-reset or full-reset and reinitialize Asic
5197  * Returns 0 for success or an error on failure.
5198  */
5199
5200 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5201                               struct amdgpu_job *job,
5202                               struct amdgpu_reset_context *reset_context)
5203 {
5204         struct list_head device_list, *device_list_handle =  NULL;
5205         bool job_signaled = false;
5206         struct amdgpu_hive_info *hive = NULL;
5207         struct amdgpu_device *tmp_adev = NULL;
5208         int i, r = 0;
5209         bool need_emergency_restart = false;
5210         bool audio_suspended = false;
5211         int tmp_vram_lost_counter;
5212         bool gpu_reset_for_dev_remove = false;
5213
5214         gpu_reset_for_dev_remove =
5215                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5216                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5217
5218         /*
5219          * Special case: RAS triggered and full reset isn't supported
5220          */
5221         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5222
5223         /*
5224          * Flush RAM to disk so that after reboot
5225          * the user can read log and see why the system rebooted.
5226          */
5227         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5228                 DRM_WARN("Emergency reboot.");
5229
5230                 ksys_sync_helper();
5231                 emergency_restart();
5232         }
5233
5234         dev_info(adev->dev, "GPU %s begin!\n",
5235                 need_emergency_restart ? "jobs stop":"reset");
5236
5237         if (!amdgpu_sriov_vf(adev))
5238                 hive = amdgpu_get_xgmi_hive(adev);
5239         if (hive)
5240                 mutex_lock(&hive->hive_lock);
5241
5242         reset_context->job = job;
5243         reset_context->hive = hive;
5244         /*
5245          * Build list of devices to reset.
5246          * In case we are in XGMI hive mode, resort the device list
5247          * to put adev in the 1st position.
5248          */
5249         INIT_LIST_HEAD(&device_list);
5250         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5251                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5252                         list_add_tail(&tmp_adev->reset_list, &device_list);
5253                         if (gpu_reset_for_dev_remove && adev->shutdown)
5254                                 tmp_adev->shutdown = true;
5255                 }
5256                 if (!list_is_first(&adev->reset_list, &device_list))
5257                         list_rotate_to_front(&adev->reset_list, &device_list);
5258                 device_list_handle = &device_list;
5259         } else {
5260                 list_add_tail(&adev->reset_list, &device_list);
5261                 device_list_handle = &device_list;
5262         }
5263
5264         /* We need to lock reset domain only once both for XGMI and single device */
5265         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5266                                     reset_list);
5267         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5268
5269         /* block all schedulers and reset given job's ring */
5270         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5271
5272                 amdgpu_device_set_mp1_state(tmp_adev);
5273
5274                 /*
5275                  * Try to put the audio codec into suspend state
5276                  * before gpu reset started.
5277                  *
5278                  * Due to the power domain of the graphics device
5279                  * is shared with AZ power domain. Without this,
5280                  * we may change the audio hardware from behind
5281                  * the audio driver's back. That will trigger
5282                  * some audio codec errors.
5283                  */
5284                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5285                         audio_suspended = true;
5286
5287                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5288
5289                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5290
5291                 if (!amdgpu_sriov_vf(tmp_adev))
5292                         amdgpu_amdkfd_pre_reset(tmp_adev);
5293
5294                 /*
5295                  * Mark these ASICs to be reseted as untracked first
5296                  * And add them back after reset completed
5297                  */
5298                 amdgpu_unregister_gpu_instance(tmp_adev);
5299
5300                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5301
5302                 /* disable ras on ALL IPs */
5303                 if (!need_emergency_restart &&
5304                       amdgpu_device_ip_need_full_reset(tmp_adev))
5305                         amdgpu_ras_suspend(tmp_adev);
5306
5307                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5308                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5309
5310                         if (!ring || !ring->sched.thread)
5311                                 continue;
5312
5313                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5314
5315                         if (need_emergency_restart)
5316                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5317                 }
5318                 atomic_inc(&tmp_adev->gpu_reset_counter);
5319         }
5320
5321         if (need_emergency_restart)
5322                 goto skip_sched_resume;
5323
5324         /*
5325          * Must check guilty signal here since after this point all old
5326          * HW fences are force signaled.
5327          *
5328          * job->base holds a reference to parent fence
5329          */
5330         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5331                 job_signaled = true;
5332                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5333                 goto skip_hw_reset;
5334         }
5335
5336 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5337         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5338                 if (gpu_reset_for_dev_remove) {
5339                         /* Workaroud for ASICs need to disable SMC first */
5340                         amdgpu_device_smu_fini_early(tmp_adev);
5341                 }
5342                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5343                 /*TODO Should we stop ?*/
5344                 if (r) {
5345                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5346                                   r, adev_to_drm(tmp_adev)->unique);
5347                         tmp_adev->asic_reset_res = r;
5348                 }
5349
5350                 /*
5351                  * Drop all pending non scheduler resets. Scheduler resets
5352                  * were already dropped during drm_sched_stop
5353                  */
5354                 amdgpu_device_stop_pending_resets(tmp_adev);
5355         }
5356
5357         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5358         /* Actual ASIC resets if needed.*/
5359         /* Host driver will handle XGMI hive reset for SRIOV */
5360         if (amdgpu_sriov_vf(adev)) {
5361                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5362                 if (r)
5363                         adev->asic_reset_res = r;
5364
5365                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5366                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5367                         amdgpu_ras_resume(adev);
5368         } else {
5369                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5370                 if (r && r == -EAGAIN)
5371                         goto retry;
5372
5373                 if (!r && gpu_reset_for_dev_remove)
5374                         goto recover_end;
5375         }
5376
5377 skip_hw_reset:
5378
5379         /* Post ASIC reset for all devs .*/
5380         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5381
5382                 /*
5383                  * Sometimes a later bad compute job can block a good gfx job as gfx
5384                  * and compute ring share internal GC HW mutually. We add an additional
5385                  * guilty jobs recheck step to find the real guilty job, it synchronously
5386                  * submits and pends for the first job being signaled. If it gets timeout,
5387                  * we identify it as a real guilty job.
5388                  */
5389                 if (amdgpu_gpu_recovery == 2 &&
5390                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5391                         amdgpu_device_recheck_guilty_jobs(
5392                                 tmp_adev, device_list_handle, reset_context);
5393
5394                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5395                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5396
5397                         if (!ring || !ring->sched.thread)
5398                                 continue;
5399
5400                         /* No point to resubmit jobs if we didn't HW reset*/
5401                         if (!tmp_adev->asic_reset_res && !job_signaled)
5402                                 drm_sched_resubmit_jobs(&ring->sched);
5403
5404                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5405                 }
5406
5407                 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5408                         amdgpu_mes_self_test(tmp_adev);
5409
5410                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5411                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5412                 }
5413
5414                 if (tmp_adev->asic_reset_res)
5415                         r = tmp_adev->asic_reset_res;
5416
5417                 tmp_adev->asic_reset_res = 0;
5418
5419                 if (r) {
5420                         /* bad news, how to tell it to userspace ? */
5421                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5422                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5423                 } else {
5424                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5425                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5426                                 DRM_WARN("smart shift update failed\n");
5427                 }
5428         }
5429
5430 skip_sched_resume:
5431         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5432                 /* unlock kfd: SRIOV would do it separately */
5433                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5434                         amdgpu_amdkfd_post_reset(tmp_adev);
5435
5436                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5437                  * need to bring up kfd here if it's not be initialized before
5438                  */
5439                 if (!adev->kfd.init_complete)
5440                         amdgpu_amdkfd_device_init(adev);
5441
5442                 if (audio_suspended)
5443                         amdgpu_device_resume_display_audio(tmp_adev);
5444
5445                 amdgpu_device_unset_mp1_state(tmp_adev);
5446         }
5447
5448 recover_end:
5449         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5450                                             reset_list);
5451         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5452
5453         if (hive) {
5454                 mutex_unlock(&hive->hive_lock);
5455                 amdgpu_put_xgmi_hive(hive);
5456         }
5457
5458         if (r)
5459                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5460
5461         atomic_set(&adev->reset_domain->reset_res, r);
5462         return r;
5463 }
5464
5465 /**
5466  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5467  *
5468  * @adev: amdgpu_device pointer
5469  *
5470  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5471  * and lanes) of the slot the device is in. Handles APUs and
5472  * virtualized environments where PCIE config space may not be available.
5473  */
5474 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5475 {
5476         struct pci_dev *pdev;
5477         enum pci_bus_speed speed_cap, platform_speed_cap;
5478         enum pcie_link_width platform_link_width;
5479
5480         if (amdgpu_pcie_gen_cap)
5481                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5482
5483         if (amdgpu_pcie_lane_cap)
5484                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5485
5486         /* covers APUs as well */
5487         if (pci_is_root_bus(adev->pdev->bus)) {
5488                 if (adev->pm.pcie_gen_mask == 0)
5489                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5490                 if (adev->pm.pcie_mlw_mask == 0)
5491                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5492                 return;
5493         }
5494
5495         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5496                 return;
5497
5498         pcie_bandwidth_available(adev->pdev, NULL,
5499                                  &platform_speed_cap, &platform_link_width);
5500
5501         if (adev->pm.pcie_gen_mask == 0) {
5502                 /* asic caps */
5503                 pdev = adev->pdev;
5504                 speed_cap = pcie_get_speed_cap(pdev);
5505                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5506                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5507                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5508                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5509                 } else {
5510                         if (speed_cap == PCIE_SPEED_32_0GT)
5511                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5512                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5513                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5514                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5515                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5516                         else if (speed_cap == PCIE_SPEED_16_0GT)
5517                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5518                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5519                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5520                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5521                         else if (speed_cap == PCIE_SPEED_8_0GT)
5522                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5523                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5524                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5525                         else if (speed_cap == PCIE_SPEED_5_0GT)
5526                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5527                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5528                         else
5529                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5530                 }
5531                 /* platform caps */
5532                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5533                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5534                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5535                 } else {
5536                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5537                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5538                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5539                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5540                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5541                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5542                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5543                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5544                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5545                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5546                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5547                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5548                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5549                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5550                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5551                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5552                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5553                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5554                         else
5555                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5556
5557                 }
5558         }
5559         if (adev->pm.pcie_mlw_mask == 0) {
5560                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5561                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5562                 } else {
5563                         switch (platform_link_width) {
5564                         case PCIE_LNK_X32:
5565                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5566                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5567                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5568                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5569                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5570                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5571                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5572                                 break;
5573                         case PCIE_LNK_X16:
5574                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5575                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5576                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5577                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5578                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5579                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5580                                 break;
5581                         case PCIE_LNK_X12:
5582                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5583                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5584                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5585                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5586                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5587                                 break;
5588                         case PCIE_LNK_X8:
5589                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5590                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5591                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5592                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5593                                 break;
5594                         case PCIE_LNK_X4:
5595                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5596                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5597                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5598                                 break;
5599                         case PCIE_LNK_X2:
5600                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5601                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5602                                 break;
5603                         case PCIE_LNK_X1:
5604                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5605                                 break;
5606                         default:
5607                                 break;
5608                         }
5609                 }
5610         }
5611 }
5612
5613 /**
5614  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5615  *
5616  * @adev: amdgpu_device pointer
5617  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5618  *
5619  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5620  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5621  * @peer_adev.
5622  */
5623 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5624                                       struct amdgpu_device *peer_adev)
5625 {
5626 #ifdef CONFIG_HSA_AMD_P2P
5627         uint64_t address_mask = peer_adev->dev->dma_mask ?
5628                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5629         resource_size_t aper_limit =
5630                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5631         bool p2p_access =
5632                 !adev->gmc.xgmi.connected_to_cpu &&
5633                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5634
5635         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5636                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5637                 !(adev->gmc.aper_base & address_mask ||
5638                   aper_limit & address_mask));
5639 #else
5640         return false;
5641 #endif
5642 }
5643
5644 int amdgpu_device_baco_enter(struct drm_device *dev)
5645 {
5646         struct amdgpu_device *adev = drm_to_adev(dev);
5647         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5648
5649         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5650                 return -ENOTSUPP;
5651
5652         if (ras && adev->ras_enabled &&
5653             adev->nbio.funcs->enable_doorbell_interrupt)
5654                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5655
5656         return amdgpu_dpm_baco_enter(adev);
5657 }
5658
5659 int amdgpu_device_baco_exit(struct drm_device *dev)
5660 {
5661         struct amdgpu_device *adev = drm_to_adev(dev);
5662         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5663         int ret = 0;
5664
5665         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5666                 return -ENOTSUPP;
5667
5668         ret = amdgpu_dpm_baco_exit(adev);
5669         if (ret)
5670                 return ret;
5671
5672         if (ras && adev->ras_enabled &&
5673             adev->nbio.funcs->enable_doorbell_interrupt)
5674                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5675
5676         if (amdgpu_passthrough(adev) &&
5677             adev->nbio.funcs->clear_doorbell_interrupt)
5678                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5679
5680         return 0;
5681 }
5682
5683 /**
5684  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5685  * @pdev: PCI device struct
5686  * @state: PCI channel state
5687  *
5688  * Description: Called when a PCI error is detected.
5689  *
5690  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5691  */
5692 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5693 {
5694         struct drm_device *dev = pci_get_drvdata(pdev);
5695         struct amdgpu_device *adev = drm_to_adev(dev);
5696         int i;
5697
5698         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5699
5700         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5701                 DRM_WARN("No support for XGMI hive yet...");
5702                 return PCI_ERS_RESULT_DISCONNECT;
5703         }
5704
5705         adev->pci_channel_state = state;
5706
5707         switch (state) {
5708         case pci_channel_io_normal:
5709                 return PCI_ERS_RESULT_CAN_RECOVER;
5710         /* Fatal error, prepare for slot reset */
5711         case pci_channel_io_frozen:
5712                 /*
5713                  * Locking adev->reset_domain->sem will prevent any external access
5714                  * to GPU during PCI error recovery
5715                  */
5716                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5717                 amdgpu_device_set_mp1_state(adev);
5718
5719                 /*
5720                  * Block any work scheduling as we do for regular GPU reset
5721                  * for the duration of the recovery
5722                  */
5723                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5724                         struct amdgpu_ring *ring = adev->rings[i];
5725
5726                         if (!ring || !ring->sched.thread)
5727                                 continue;
5728
5729                         drm_sched_stop(&ring->sched, NULL);
5730                 }
5731                 atomic_inc(&adev->gpu_reset_counter);
5732                 return PCI_ERS_RESULT_NEED_RESET;
5733         case pci_channel_io_perm_failure:
5734                 /* Permanent error, prepare for device removal */
5735                 return PCI_ERS_RESULT_DISCONNECT;
5736         }
5737
5738         return PCI_ERS_RESULT_NEED_RESET;
5739 }
5740
5741 /**
5742  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5743  * @pdev: pointer to PCI device
5744  */
5745 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5746 {
5747
5748         DRM_INFO("PCI error: mmio enabled callback!!\n");
5749
5750         /* TODO - dump whatever for debugging purposes */
5751
5752         /* This called only if amdgpu_pci_error_detected returns
5753          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5754          * works, no need to reset slot.
5755          */
5756
5757         return PCI_ERS_RESULT_RECOVERED;
5758 }
5759
5760 /**
5761  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5762  * @pdev: PCI device struct
5763  *
5764  * Description: This routine is called by the pci error recovery
5765  * code after the PCI slot has been reset, just before we
5766  * should resume normal operations.
5767  */
5768 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5769 {
5770         struct drm_device *dev = pci_get_drvdata(pdev);
5771         struct amdgpu_device *adev = drm_to_adev(dev);
5772         int r, i;
5773         struct amdgpu_reset_context reset_context;
5774         u32 memsize;
5775         struct list_head device_list;
5776
5777         DRM_INFO("PCI error: slot reset callback!!\n");
5778
5779         memset(&reset_context, 0, sizeof(reset_context));
5780
5781         INIT_LIST_HEAD(&device_list);
5782         list_add_tail(&adev->reset_list, &device_list);
5783
5784         /* wait for asic to come out of reset */
5785         msleep(500);
5786
5787         /* Restore PCI confspace */
5788         amdgpu_device_load_pci_state(pdev);
5789
5790         /* confirm  ASIC came out of reset */
5791         for (i = 0; i < adev->usec_timeout; i++) {
5792                 memsize = amdgpu_asic_get_config_memsize(adev);
5793
5794                 if (memsize != 0xffffffff)
5795                         break;
5796                 udelay(1);
5797         }
5798         if (memsize == 0xffffffff) {
5799                 r = -ETIME;
5800                 goto out;
5801         }
5802
5803         reset_context.method = AMD_RESET_METHOD_NONE;
5804         reset_context.reset_req_dev = adev;
5805         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5806         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5807
5808         adev->no_hw_access = true;
5809         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5810         adev->no_hw_access = false;
5811         if (r)
5812                 goto out;
5813
5814         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5815
5816 out:
5817         if (!r) {
5818                 if (amdgpu_device_cache_pci_state(adev->pdev))
5819                         pci_restore_state(adev->pdev);
5820
5821                 DRM_INFO("PCIe error recovery succeeded\n");
5822         } else {
5823                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5824                 amdgpu_device_unset_mp1_state(adev);
5825                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5826         }
5827
5828         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5829 }
5830
5831 /**
5832  * amdgpu_pci_resume() - resume normal ops after PCI reset
5833  * @pdev: pointer to PCI device
5834  *
5835  * Called when the error recovery driver tells us that its
5836  * OK to resume normal operation.
5837  */
5838 void amdgpu_pci_resume(struct pci_dev *pdev)
5839 {
5840         struct drm_device *dev = pci_get_drvdata(pdev);
5841         struct amdgpu_device *adev = drm_to_adev(dev);
5842         int i;
5843
5844
5845         DRM_INFO("PCI error: resume callback!!\n");
5846
5847         /* Only continue execution for the case of pci_channel_io_frozen */
5848         if (adev->pci_channel_state != pci_channel_io_frozen)
5849                 return;
5850
5851         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5852                 struct amdgpu_ring *ring = adev->rings[i];
5853
5854                 if (!ring || !ring->sched.thread)
5855                         continue;
5856
5857
5858                 drm_sched_resubmit_jobs(&ring->sched);
5859                 drm_sched_start(&ring->sched, true);
5860         }
5861
5862         amdgpu_device_unset_mp1_state(adev);
5863         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5864 }
5865
5866 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5867 {
5868         struct drm_device *dev = pci_get_drvdata(pdev);
5869         struct amdgpu_device *adev = drm_to_adev(dev);
5870         int r;
5871
5872         r = pci_save_state(pdev);
5873         if (!r) {
5874                 kfree(adev->pci_state);
5875
5876                 adev->pci_state = pci_store_saved_state(pdev);
5877
5878                 if (!adev->pci_state) {
5879                         DRM_ERROR("Failed to store PCI saved state");
5880                         return false;
5881                 }
5882         } else {
5883                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5884                 return false;
5885         }
5886
5887         return true;
5888 }
5889
5890 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5891 {
5892         struct drm_device *dev = pci_get_drvdata(pdev);
5893         struct amdgpu_device *adev = drm_to_adev(dev);
5894         int r;
5895
5896         if (!adev->pci_state)
5897                 return false;
5898
5899         r = pci_load_saved_state(pdev, adev->pci_state);
5900
5901         if (!r) {
5902                 pci_restore_state(pdev);
5903         } else {
5904                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5905                 return false;
5906         }
5907
5908         return true;
5909 }
5910
5911 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5912                 struct amdgpu_ring *ring)
5913 {
5914 #ifdef CONFIG_X86_64
5915         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5916                 return;
5917 #endif
5918         if (adev->gmc.xgmi.connected_to_cpu)
5919                 return;
5920
5921         if (ring && ring->funcs->emit_hdp_flush)
5922                 amdgpu_ring_emit_hdp_flush(ring);
5923         else
5924                 amdgpu_asic_flush_hdp(adev, ring);
5925 }
5926
5927 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5928                 struct amdgpu_ring *ring)
5929 {
5930 #ifdef CONFIG_X86_64
5931         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5932                 return;
5933 #endif
5934         if (adev->gmc.xgmi.connected_to_cpu)
5935                 return;
5936
5937         amdgpu_asic_invalidate_hdp(adev, ring);
5938 }
5939
5940 int amdgpu_in_reset(struct amdgpu_device *adev)
5941 {
5942         return atomic_read(&adev->reset_domain->in_gpu_reset);
5943         }
5944         
5945 /**
5946  * amdgpu_device_halt() - bring hardware to some kind of halt state
5947  *
5948  * @adev: amdgpu_device pointer
5949  *
5950  * Bring hardware to some kind of halt state so that no one can touch it
5951  * any more. It will help to maintain error context when error occurred.
5952  * Compare to a simple hang, the system will keep stable at least for SSH
5953  * access. Then it should be trivial to inspect the hardware state and
5954  * see what's going on. Implemented as following:
5955  *
5956  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5957  *    clears all CPU mappings to device, disallows remappings through page faults
5958  * 2. amdgpu_irq_disable_all() disables all interrupts
5959  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5960  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5961  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5962  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5963  *    flush any in flight DMA operations
5964  */
5965 void amdgpu_device_halt(struct amdgpu_device *adev)
5966 {
5967         struct pci_dev *pdev = adev->pdev;
5968         struct drm_device *ddev = adev_to_drm(adev);
5969
5970         drm_dev_unplug(ddev);
5971
5972         amdgpu_irq_disable_all(adev);
5973
5974         amdgpu_fence_driver_hw_fini(adev);
5975
5976         adev->no_hw_access = true;
5977
5978         amdgpu_device_unmap_mmio(adev);
5979
5980         pci_disable_device(pdev);
5981         pci_wait_for_pending_transaction(pdev);
5982 }
5983
5984 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5985                                 u32 reg)
5986 {
5987         unsigned long flags, address, data;
5988         u32 r;
5989
5990         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5991         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5992
5993         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5994         WREG32(address, reg * 4);
5995         (void)RREG32(address);
5996         r = RREG32(data);
5997         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5998         return r;
5999 }
6000
6001 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6002                                 u32 reg, u32 v)
6003 {
6004         unsigned long flags, address, data;
6005
6006         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6007         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6008
6009         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6010         WREG32(address, reg * 4);
6011         (void)RREG32(address);
6012         WREG32(data, v);
6013         (void)RREG32(data);
6014         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6015 }
6016
6017 /**
6018  * amdgpu_device_switch_gang - switch to a new gang
6019  * @adev: amdgpu_device pointer
6020  * @gang: the gang to switch to
6021  *
6022  * Try to switch to a new gang.
6023  * Returns: NULL if we switched to the new gang or a reference to the current
6024  * gang leader.
6025  */
6026 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6027                                             struct dma_fence *gang)
6028 {
6029         struct dma_fence *old = NULL;
6030
6031         do {
6032                 dma_fence_put(old);
6033                 rcu_read_lock();
6034                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6035                 rcu_read_unlock();
6036
6037                 if (old == gang)
6038                         break;
6039
6040                 if (!dma_fence_is_signaled(old))
6041                         return old;
6042
6043         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6044                          old, gang) != old);
6045
6046         dma_fence_put(old);
6047         return NULL;
6048 }