2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
50 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
74 #include <drm/drm_drv.h>
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
89 #define AMDGPU_RESUME_MS 2000
91 const char *amdgpu_asic_name[] = {
132 * DOC: pcie_replay_count
134 * The amdgpu driver provides a sysfs API for reporting the total number
135 * of PCIe replays (NAKs)
136 * The file pcie_replay_count is used for this and returns the total
137 * number of replays as a sum of the NAKs generated and NAKs received
140 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
141 struct device_attribute *attr, char *buf)
143 struct drm_device *ddev = dev_get_drvdata(dev);
144 struct amdgpu_device *adev = drm_to_adev(ddev);
145 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
147 return sysfs_emit(buf, "%llu\n", cnt);
150 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
151 amdgpu_device_get_pcie_replay_count, NULL);
153 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
158 * The amdgpu driver provides a sysfs API for reporting the product name
160 * The file serial_number is used for this and returns the product name
161 * as returned from the FRU.
162 * NOTE: This is only available for certain server cards
165 static ssize_t amdgpu_device_get_product_name(struct device *dev,
166 struct device_attribute *attr, char *buf)
168 struct drm_device *ddev = dev_get_drvdata(dev);
169 struct amdgpu_device *adev = drm_to_adev(ddev);
171 return sysfs_emit(buf, "%s\n", adev->product_name);
174 static DEVICE_ATTR(product_name, S_IRUGO,
175 amdgpu_device_get_product_name, NULL);
178 * DOC: product_number
180 * The amdgpu driver provides a sysfs API for reporting the part number
182 * The file serial_number is used for this and returns the part number
183 * as returned from the FRU.
184 * NOTE: This is only available for certain server cards
187 static ssize_t amdgpu_device_get_product_number(struct device *dev,
188 struct device_attribute *attr, char *buf)
190 struct drm_device *ddev = dev_get_drvdata(dev);
191 struct amdgpu_device *adev = drm_to_adev(ddev);
193 return sysfs_emit(buf, "%s\n", adev->product_number);
196 static DEVICE_ATTR(product_number, S_IRUGO,
197 amdgpu_device_get_product_number, NULL);
202 * The amdgpu driver provides a sysfs API for reporting the serial number
204 * The file serial_number is used for this and returns the serial number
205 * as returned from the FRU.
206 * NOTE: This is only available for certain server cards
209 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
210 struct device_attribute *attr, char *buf)
212 struct drm_device *ddev = dev_get_drvdata(dev);
213 struct amdgpu_device *adev = drm_to_adev(ddev);
215 return sysfs_emit(buf, "%s\n", adev->serial);
218 static DEVICE_ATTR(serial_number, S_IRUGO,
219 amdgpu_device_get_serial_number, NULL);
222 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
224 * @dev: drm_device pointer
226 * Returns true if the device is a dGPU with ATPX power control,
227 * otherwise return false.
229 bool amdgpu_device_supports_px(struct drm_device *dev)
231 struct amdgpu_device *adev = drm_to_adev(dev);
233 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
239 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
241 * @dev: drm_device pointer
243 * Returns true if the device is a dGPU with ACPI power control,
244 * otherwise return false.
246 bool amdgpu_device_supports_boco(struct drm_device *dev)
248 struct amdgpu_device *adev = drm_to_adev(dev);
251 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
257 * amdgpu_device_supports_baco - Does the device support BACO
259 * @dev: drm_device pointer
261 * Returns true if the device supporte BACO,
262 * otherwise return false.
264 bool amdgpu_device_supports_baco(struct drm_device *dev)
266 struct amdgpu_device *adev = drm_to_adev(dev);
268 return amdgpu_asic_supports_baco(adev);
272 * amdgpu_device_supports_smart_shift - Is the device dGPU with
273 * smart shift support
275 * @dev: drm_device pointer
277 * Returns true if the device is a dGPU with Smart Shift support,
278 * otherwise returns false.
280 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
282 return (amdgpu_device_supports_boco(dev) &&
283 amdgpu_acpi_is_power_shift_control_supported());
287 * VRAM access helper functions
291 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
293 * @adev: amdgpu_device pointer
294 * @pos: offset of the buffer in vram
295 * @buf: virtual address of the buffer in system memory
296 * @size: read/write size, sizeof(@buf) must > @size
297 * @write: true - write to vram, otherwise - read from vram
299 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
300 void *buf, size_t size, bool write)
303 uint32_t hi = ~0, tmp = 0;
304 uint32_t *data = buf;
308 if (!drm_dev_enter(&adev->ddev, &idx))
311 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
313 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
314 for (last = pos + size; pos < last; pos += 4) {
317 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
319 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
323 WREG32_NO_KIQ(mmMM_DATA, *data++);
325 *data++ = RREG32_NO_KIQ(mmMM_DATA);
328 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
333 * amdgpu_device_vram_access - access vram by vram aperature
335 * @adev: amdgpu_device pointer
336 * @pos: offset of the buffer in vram
337 * @buf: virtual address of the buffer in system memory
338 * @size: read/write size, sizeof(@buf) must > @size
339 * @write: true - write to vram, otherwise - read from vram
341 * The return value means how many bytes have been transferred.
343 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
344 void *buf, size_t size, bool write)
351 if (!adev->mman.aper_base_kaddr)
354 last = min(pos + size, adev->gmc.visible_vram_size);
356 addr = adev->mman.aper_base_kaddr + pos;
360 memcpy_toio(addr, buf, count);
362 amdgpu_device_flush_hdp(adev, NULL);
364 amdgpu_device_invalidate_hdp(adev, NULL);
366 memcpy_fromio(buf, addr, count);
378 * amdgpu_device_vram_access - read/write a buffer in vram
380 * @adev: amdgpu_device pointer
381 * @pos: offset of the buffer in vram
382 * @buf: virtual address of the buffer in system memory
383 * @size: read/write size, sizeof(@buf) must > @size
384 * @write: true - write to vram, otherwise - read from vram
386 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
387 void *buf, size_t size, bool write)
391 /* try to using vram apreature to access vram first */
392 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 /* using MM to access rest vram */
398 amdgpu_device_mm_access(adev, pos, buf, size, write);
403 * register access helper functions.
406 /* Check if hw access should be skipped because of hotplug or device error */
407 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
409 if (adev->no_hw_access)
412 #ifdef CONFIG_LOCKDEP
414 * This is a bit complicated to understand, so worth a comment. What we assert
415 * here is that the GPU reset is not running on another thread in parallel.
417 * For this we trylock the read side of the reset semaphore, if that succeeds
418 * we know that the reset is not running in paralell.
420 * If the trylock fails we assert that we are either already holding the read
421 * side of the lock or are the reset thread itself and hold the write side of
425 if (down_read_trylock(&adev->reset_sem))
426 up_read(&adev->reset_sem);
428 lockdep_assert_held(&adev->reset_sem);
435 * amdgpu_device_rreg - read a memory mapped IO or indirect register
437 * @adev: amdgpu_device pointer
438 * @reg: dword aligned register offset
439 * @acc_flags: access flags which require special behavior
441 * Returns the 32 bit value from the offset specified.
443 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
444 uint32_t reg, uint32_t acc_flags)
448 if (amdgpu_device_skip_hw_access(adev))
451 if ((reg * 4) < adev->rmmio_size) {
452 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
453 amdgpu_sriov_runtime(adev) &&
454 down_read_trylock(&adev->reset_sem)) {
455 ret = amdgpu_kiq_rreg(adev, reg);
456 up_read(&adev->reset_sem);
458 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 ret = adev->pcie_rreg(adev, reg * 4);
464 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
470 * MMIO register read with bytes helper functions
471 * @offset:bytes offset from MMIO start
476 * amdgpu_mm_rreg8 - read a memory mapped IO register
478 * @adev: amdgpu_device pointer
479 * @offset: byte aligned register offset
481 * Returns the 8 bit value from the offset specified.
483 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
485 if (amdgpu_device_skip_hw_access(adev))
488 if (offset < adev->rmmio_size)
489 return (readb(adev->rmmio + offset));
494 * MMIO register write with bytes helper functions
495 * @offset:bytes offset from MMIO start
496 * @value: the value want to be written to the register
500 * amdgpu_mm_wreg8 - read a memory mapped IO register
502 * @adev: amdgpu_device pointer
503 * @offset: byte aligned register offset
504 * @value: 8 bit value to write
506 * Writes the value specified to the offset specified.
508 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
510 if (amdgpu_device_skip_hw_access(adev))
513 if (offset < adev->rmmio_size)
514 writeb(value, adev->rmmio + offset);
520 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
522 * @adev: amdgpu_device pointer
523 * @reg: dword aligned register offset
524 * @v: 32 bit value to write to the register
525 * @acc_flags: access flags which require special behavior
527 * Writes the value specified to the offset specified.
529 void amdgpu_device_wreg(struct amdgpu_device *adev,
530 uint32_t reg, uint32_t v,
533 if (amdgpu_device_skip_hw_access(adev))
536 if ((reg * 4) < adev->rmmio_size) {
537 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
538 amdgpu_sriov_runtime(adev) &&
539 down_read_trylock(&adev->reset_sem)) {
540 amdgpu_kiq_wreg(adev, reg, v);
541 up_read(&adev->reset_sem);
543 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 adev->pcie_wreg(adev, reg * 4, v);
549 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
553 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
555 * this function is invoked only the debugfs register access
557 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
558 uint32_t reg, uint32_t v)
560 if (amdgpu_device_skip_hw_access(adev))
563 if (amdgpu_sriov_fullaccess(adev) &&
564 adev->gfx.rlc.funcs &&
565 adev->gfx.rlc.funcs->is_rlcg_access_range) {
566 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
567 return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
569 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
574 * amdgpu_mm_rdoorbell - read a doorbell dword
576 * @adev: amdgpu_device pointer
577 * @index: doorbell index
579 * Returns the value in the doorbell aperture at the
580 * requested doorbell index (CIK).
582 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
584 if (amdgpu_device_skip_hw_access(adev))
587 if (index < adev->doorbell.num_doorbells) {
588 return readl(adev->doorbell.ptr + index);
590 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
596 * amdgpu_mm_wdoorbell - write a doorbell dword
598 * @adev: amdgpu_device pointer
599 * @index: doorbell index
602 * Writes @v to the doorbell aperture at the
603 * requested doorbell index (CIK).
605 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
607 if (amdgpu_device_skip_hw_access(adev))
610 if (index < adev->doorbell.num_doorbells) {
611 writel(v, adev->doorbell.ptr + index);
613 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
618 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
620 * @adev: amdgpu_device pointer
621 * @index: doorbell index
623 * Returns the value in the doorbell aperture at the
624 * requested doorbell index (VEGA10+).
626 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
628 if (amdgpu_device_skip_hw_access(adev))
631 if (index < adev->doorbell.num_doorbells) {
632 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
634 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
640 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
642 * @adev: amdgpu_device pointer
643 * @index: doorbell index
646 * Writes @v to the doorbell aperture at the
647 * requested doorbell index (VEGA10+).
649 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
651 if (amdgpu_device_skip_hw_access(adev))
654 if (index < adev->doorbell.num_doorbells) {
655 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
657 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
662 * amdgpu_device_indirect_rreg - read an indirect register
664 * @adev: amdgpu_device pointer
665 * @pcie_index: mmio register offset
666 * @pcie_data: mmio register offset
667 * @reg_addr: indirect register address to read from
669 * Returns the value of indirect register @reg_addr
671 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
672 u32 pcie_index, u32 pcie_data,
677 void __iomem *pcie_index_offset;
678 void __iomem *pcie_data_offset;
680 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
681 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
682 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
684 writel(reg_addr, pcie_index_offset);
685 readl(pcie_index_offset);
686 r = readl(pcie_data_offset);
687 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
693 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
695 * @adev: amdgpu_device pointer
696 * @pcie_index: mmio register offset
697 * @pcie_data: mmio register offset
698 * @reg_addr: indirect register address to read from
700 * Returns the value of indirect register @reg_addr
702 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
703 u32 pcie_index, u32 pcie_data,
708 void __iomem *pcie_index_offset;
709 void __iomem *pcie_data_offset;
711 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
712 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
713 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715 /* read low 32 bits */
716 writel(reg_addr, pcie_index_offset);
717 readl(pcie_index_offset);
718 r = readl(pcie_data_offset);
719 /* read high 32 bits */
720 writel(reg_addr + 4, pcie_index_offset);
721 readl(pcie_index_offset);
722 r |= ((u64)readl(pcie_data_offset) << 32);
723 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
729 * amdgpu_device_indirect_wreg - write an indirect register address
731 * @adev: amdgpu_device pointer
732 * @pcie_index: mmio register offset
733 * @pcie_data: mmio register offset
734 * @reg_addr: indirect register offset
735 * @reg_data: indirect register data
738 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
739 u32 pcie_index, u32 pcie_data,
740 u32 reg_addr, u32 reg_data)
743 void __iomem *pcie_index_offset;
744 void __iomem *pcie_data_offset;
746 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
747 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
748 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
750 writel(reg_addr, pcie_index_offset);
751 readl(pcie_index_offset);
752 writel(reg_data, pcie_data_offset);
753 readl(pcie_data_offset);
754 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
758 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
760 * @adev: amdgpu_device pointer
761 * @pcie_index: mmio register offset
762 * @pcie_data: mmio register offset
763 * @reg_addr: indirect register offset
764 * @reg_data: indirect register data
767 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
768 u32 pcie_index, u32 pcie_data,
769 u32 reg_addr, u64 reg_data)
772 void __iomem *pcie_index_offset;
773 void __iomem *pcie_data_offset;
775 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
776 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
777 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
779 /* write low 32 bits */
780 writel(reg_addr, pcie_index_offset);
781 readl(pcie_index_offset);
782 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
783 readl(pcie_data_offset);
784 /* write high 32 bits */
785 writel(reg_addr + 4, pcie_index_offset);
786 readl(pcie_index_offset);
787 writel((u32)(reg_data >> 32), pcie_data_offset);
788 readl(pcie_data_offset);
789 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
793 * amdgpu_invalid_rreg - dummy reg read function
795 * @adev: amdgpu_device pointer
796 * @reg: offset of register
798 * Dummy register read function. Used for register blocks
799 * that certain asics don't have (all asics).
800 * Returns the value in the register.
802 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
804 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
810 * amdgpu_invalid_wreg - dummy reg write function
812 * @adev: amdgpu_device pointer
813 * @reg: offset of register
814 * @v: value to write to the register
816 * Dummy register read function. Used for register blocks
817 * that certain asics don't have (all asics).
819 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
821 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
827 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
829 * @adev: amdgpu_device pointer
830 * @reg: offset of register
832 * Dummy register read function. Used for register blocks
833 * that certain asics don't have (all asics).
834 * Returns the value in the register.
836 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
838 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
844 * amdgpu_invalid_wreg64 - dummy reg write function
846 * @adev: amdgpu_device pointer
847 * @reg: offset of register
848 * @v: value to write to the register
850 * Dummy register read function. Used for register blocks
851 * that certain asics don't have (all asics).
853 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
855 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
861 * amdgpu_block_invalid_rreg - dummy reg read function
863 * @adev: amdgpu_device pointer
864 * @block: offset of instance
865 * @reg: offset of register
867 * Dummy register read function. Used for register blocks
868 * that certain asics don't have (all asics).
869 * Returns the value in the register.
871 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
872 uint32_t block, uint32_t reg)
874 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
881 * amdgpu_block_invalid_wreg - dummy reg write function
883 * @adev: amdgpu_device pointer
884 * @block: offset of instance
885 * @reg: offset of register
886 * @v: value to write to the register
888 * Dummy register read function. Used for register blocks
889 * that certain asics don't have (all asics).
891 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
893 uint32_t reg, uint32_t v)
895 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
901 * amdgpu_device_asic_init - Wrapper for atom asic_init
903 * @adev: amdgpu_device pointer
905 * Does any asic specific work and then calls atom asic init.
907 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
909 amdgpu_asic_pre_asic_init(adev);
911 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
915 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
917 * @adev: amdgpu_device pointer
919 * Allocates a scratch page of VRAM for use by various things in the
922 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
924 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
925 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
926 &adev->vram_scratch.robj,
927 &adev->vram_scratch.gpu_addr,
928 (void **)&adev->vram_scratch.ptr);
932 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
934 * @adev: amdgpu_device pointer
936 * Frees the VRAM scratch page.
938 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
940 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
944 * amdgpu_device_program_register_sequence - program an array of registers.
946 * @adev: amdgpu_device pointer
947 * @registers: pointer to the register array
948 * @array_size: size of the register array
950 * Programs an array or registers with and and or masks.
951 * This is a helper for setting golden registers.
953 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
954 const u32 *registers,
955 const u32 array_size)
957 u32 tmp, reg, and_mask, or_mask;
963 for (i = 0; i < array_size; i +=3) {
964 reg = registers[i + 0];
965 and_mask = registers[i + 1];
966 or_mask = registers[i + 2];
968 if (and_mask == 0xffffffff) {
973 if (adev->family >= AMDGPU_FAMILY_AI)
974 tmp |= (or_mask & and_mask);
983 * amdgpu_device_pci_config_reset - reset the GPU
985 * @adev: amdgpu_device pointer
987 * Resets the GPU using the pci config reset sequence.
988 * Only applicable to asics prior to vega10.
990 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
992 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
996 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
998 * @adev: amdgpu_device pointer
1000 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1002 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1004 return pci_reset_function(adev->pdev);
1008 * GPU doorbell aperture helpers function.
1011 * amdgpu_device_doorbell_init - Init doorbell driver information.
1013 * @adev: amdgpu_device pointer
1015 * Init doorbell driver information (CIK)
1016 * Returns 0 on success, error on failure.
1018 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1021 /* No doorbell on SI hardware generation */
1022 if (adev->asic_type < CHIP_BONAIRE) {
1023 adev->doorbell.base = 0;
1024 adev->doorbell.size = 0;
1025 adev->doorbell.num_doorbells = 0;
1026 adev->doorbell.ptr = NULL;
1030 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1033 amdgpu_asic_init_doorbell_index(adev);
1035 /* doorbell bar mapping */
1036 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1037 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1039 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1040 adev->doorbell_index.max_assignment+1);
1041 if (adev->doorbell.num_doorbells == 0)
1044 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1045 * paging queue doorbell use the second page. The
1046 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1047 * doorbells are in the first page. So with paging queue enabled,
1048 * the max num_doorbells should + 1 page (0x400 in dword)
1050 if (adev->asic_type >= CHIP_VEGA10)
1051 adev->doorbell.num_doorbells += 0x400;
1053 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1054 adev->doorbell.num_doorbells *
1056 if (adev->doorbell.ptr == NULL)
1063 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1065 * @adev: amdgpu_device pointer
1067 * Tear down doorbell driver information (CIK)
1069 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1071 iounmap(adev->doorbell.ptr);
1072 adev->doorbell.ptr = NULL;
1078 * amdgpu_device_wb_*()
1079 * Writeback is the method by which the GPU updates special pages in memory
1080 * with the status of certain GPU events (fences, ring pointers,etc.).
1084 * amdgpu_device_wb_fini - Disable Writeback and free memory
1086 * @adev: amdgpu_device pointer
1088 * Disables Writeback and frees the Writeback memory (all asics).
1089 * Used at driver shutdown.
1091 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1093 if (adev->wb.wb_obj) {
1094 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1096 (void **)&adev->wb.wb);
1097 adev->wb.wb_obj = NULL;
1102 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1104 * @adev: amdgpu_device pointer
1106 * Initializes writeback and allocates writeback memory (all asics).
1107 * Used at driver startup.
1108 * Returns 0 on success or an -error on failure.
1110 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1114 if (adev->wb.wb_obj == NULL) {
1115 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1116 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1117 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1118 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1119 (void **)&adev->wb.wb);
1121 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1125 adev->wb.num_wb = AMDGPU_MAX_WB;
1126 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1128 /* clear wb memory */
1129 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1136 * amdgpu_device_wb_get - Allocate a wb entry
1138 * @adev: amdgpu_device pointer
1141 * Allocate a wb slot for use by the driver (all asics).
1142 * Returns 0 on success or -EINVAL on failure.
1144 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1146 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1148 if (offset < adev->wb.num_wb) {
1149 __set_bit(offset, adev->wb.used);
1150 *wb = offset << 3; /* convert to dw offset */
1158 * amdgpu_device_wb_free - Free a wb entry
1160 * @adev: amdgpu_device pointer
1163 * Free a wb slot allocated for use by the driver (all asics)
1165 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1168 if (wb < adev->wb.num_wb)
1169 __clear_bit(wb, adev->wb.used);
1173 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1175 * @adev: amdgpu_device pointer
1177 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1178 * to fail, but if any of the BARs is not accessible after the size we abort
1179 * driver loading by returning -ENODEV.
1181 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1183 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1184 struct pci_bus *root;
1185 struct resource *res;
1191 if (amdgpu_sriov_vf(adev))
1194 /* skip if the bios has already enabled large BAR */
1195 if (adev->gmc.real_vram_size &&
1196 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1199 /* Check if the root BUS has 64bit memory resources */
1200 root = adev->pdev->bus;
1201 while (root->parent)
1202 root = root->parent;
1204 pci_bus_for_each_resource(root, res, i) {
1205 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1206 res->start > 0x100000000ull)
1210 /* Trying to resize is pointless without a root hub window above 4GB */
1214 /* Limit the BAR size to what is available */
1215 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1218 /* Disable memory decoding while we change the BAR addresses and size */
1219 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1220 pci_write_config_word(adev->pdev, PCI_COMMAND,
1221 cmd & ~PCI_COMMAND_MEMORY);
1223 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1224 amdgpu_device_doorbell_fini(adev);
1225 if (adev->asic_type >= CHIP_BONAIRE)
1226 pci_release_resource(adev->pdev, 2);
1228 pci_release_resource(adev->pdev, 0);
1230 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1232 DRM_INFO("Not enough PCI address space for a large BAR.");
1233 else if (r && r != -ENOTSUPP)
1234 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1236 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1238 /* When the doorbell or fb BAR isn't available we have no chance of
1241 r = amdgpu_device_doorbell_init(adev);
1242 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1245 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1251 * GPU helpers function.
1254 * amdgpu_device_need_post - check if the hw need post or not
1256 * @adev: amdgpu_device pointer
1258 * Check if the asic has been initialized (all asics) at driver startup
1259 * or post is needed if hw reset is performed.
1260 * Returns true if need or false if not.
1262 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1266 if (amdgpu_sriov_vf(adev))
1269 if (amdgpu_passthrough(adev)) {
1270 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1271 * some old smc fw still need driver do vPost otherwise gpu hang, while
1272 * those smc fw version above 22.15 doesn't have this flaw, so we force
1273 * vpost executed for smc version below 22.15
1275 if (adev->asic_type == CHIP_FIJI) {
1278 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1279 /* force vPost if error occured */
1283 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1284 if (fw_ver < 0x00160e00)
1289 /* Don't post if we need to reset whole hive on init */
1290 if (adev->gmc.xgmi.pending_reset)
1293 if (adev->has_hw_reset) {
1294 adev->has_hw_reset = false;
1298 /* bios scratch used on CIK+ */
1299 if (adev->asic_type >= CHIP_BONAIRE)
1300 return amdgpu_atombios_scratch_need_asic_init(adev);
1302 /* check MEM_SIZE for older asics */
1303 reg = amdgpu_asic_get_config_memsize(adev);
1305 if ((reg != 0) && (reg != 0xffffffff))
1311 /* if we get transitioned to only one device, take VGA back */
1313 * amdgpu_device_vga_set_decode - enable/disable vga decode
1315 * @pdev: PCI device pointer
1316 * @state: enable/disable vga decode
1318 * Enable/disable vga decode (all asics).
1319 * Returns VGA resource flags.
1321 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1324 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1325 amdgpu_asic_set_vga_state(adev, state);
1327 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1328 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1330 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1334 * amdgpu_device_check_block_size - validate the vm block size
1336 * @adev: amdgpu_device pointer
1338 * Validates the vm block size specified via module parameter.
1339 * The vm block size defines number of bits in page table versus page directory,
1340 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1341 * page table and the remaining bits are in the page directory.
1343 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1345 /* defines number of bits in page table versus page directory,
1346 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1347 * page table and the remaining bits are in the page directory */
1348 if (amdgpu_vm_block_size == -1)
1351 if (amdgpu_vm_block_size < 9) {
1352 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1353 amdgpu_vm_block_size);
1354 amdgpu_vm_block_size = -1;
1359 * amdgpu_device_check_vm_size - validate the vm size
1361 * @adev: amdgpu_device pointer
1363 * Validates the vm size in GB specified via module parameter.
1364 * The VM size is the size of the GPU virtual memory space in GB.
1366 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1368 /* no need to check the default value */
1369 if (amdgpu_vm_size == -1)
1372 if (amdgpu_vm_size < 1) {
1373 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1375 amdgpu_vm_size = -1;
1379 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1382 bool is_os_64 = (sizeof(void *) == 8);
1383 uint64_t total_memory;
1384 uint64_t dram_size_seven_GB = 0x1B8000000;
1385 uint64_t dram_size_three_GB = 0xB8000000;
1387 if (amdgpu_smu_memory_pool_size == 0)
1391 DRM_WARN("Not 64-bit OS, feature not supported\n");
1395 total_memory = (uint64_t)si.totalram * si.mem_unit;
1397 if ((amdgpu_smu_memory_pool_size == 1) ||
1398 (amdgpu_smu_memory_pool_size == 2)) {
1399 if (total_memory < dram_size_three_GB)
1401 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1402 (amdgpu_smu_memory_pool_size == 8)) {
1403 if (total_memory < dram_size_seven_GB)
1406 DRM_WARN("Smu memory pool size not supported\n");
1409 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1414 DRM_WARN("No enough system memory\n");
1416 adev->pm.smu_prv_buffer_size = 0;
1419 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1421 if (!(adev->flags & AMD_IS_APU) ||
1422 adev->asic_type < CHIP_RAVEN)
1425 switch (adev->asic_type) {
1427 if (adev->pdev->device == 0x15dd)
1428 adev->apu_flags |= AMD_APU_IS_RAVEN;
1429 if (adev->pdev->device == 0x15d8)
1430 adev->apu_flags |= AMD_APU_IS_PICASSO;
1433 if ((adev->pdev->device == 0x1636) ||
1434 (adev->pdev->device == 0x164c))
1435 adev->apu_flags |= AMD_APU_IS_RENOIR;
1437 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1440 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1442 case CHIP_YELLOW_CARP:
1444 case CHIP_CYAN_SKILLFISH:
1445 if (adev->pdev->device == 0x13FE)
1446 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1456 * amdgpu_device_check_arguments - validate module params
1458 * @adev: amdgpu_device pointer
1460 * Validates certain module parameters and updates
1461 * the associated values used by the driver (all asics).
1463 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1465 if (amdgpu_sched_jobs < 4) {
1466 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1468 amdgpu_sched_jobs = 4;
1469 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1470 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1472 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1475 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1476 /* gart size must be greater or equal to 32M */
1477 dev_warn(adev->dev, "gart size (%d) too small\n",
1479 amdgpu_gart_size = -1;
1482 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1483 /* gtt size must be greater or equal to 32M */
1484 dev_warn(adev->dev, "gtt size (%d) too small\n",
1486 amdgpu_gtt_size = -1;
1489 /* valid range is between 4 and 9 inclusive */
1490 if (amdgpu_vm_fragment_size != -1 &&
1491 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1492 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1493 amdgpu_vm_fragment_size = -1;
1496 if (amdgpu_sched_hw_submission < 2) {
1497 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1498 amdgpu_sched_hw_submission);
1499 amdgpu_sched_hw_submission = 2;
1500 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1501 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1502 amdgpu_sched_hw_submission);
1503 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1506 amdgpu_device_check_smu_prv_buffer_size(adev);
1508 amdgpu_device_check_vm_size(adev);
1510 amdgpu_device_check_block_size(adev);
1512 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514 amdgpu_gmc_tmz_set(adev);
1516 amdgpu_gmc_noretry_set(adev);
1522 * amdgpu_switcheroo_set_state - set switcheroo state
1524 * @pdev: pci dev pointer
1525 * @state: vga_switcheroo state
1527 * Callback for the switcheroo driver. Suspends or resumes the
1528 * the asics before or after it is powered up using ACPI methods.
1530 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1531 enum vga_switcheroo_state state)
1533 struct drm_device *dev = pci_get_drvdata(pdev);
1536 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1539 if (state == VGA_SWITCHEROO_ON) {
1540 pr_info("switched on\n");
1541 /* don't suspend or resume card normally */
1542 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1544 pci_set_power_state(pdev, PCI_D0);
1545 amdgpu_device_load_pci_state(pdev);
1546 r = pci_enable_device(pdev);
1548 DRM_WARN("pci_enable_device failed (%d)\n", r);
1549 amdgpu_device_resume(dev, true);
1551 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1553 pr_info("switched off\n");
1554 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1555 amdgpu_device_suspend(dev, true);
1556 amdgpu_device_cache_pci_state(pdev);
1557 /* Shut down the device */
1558 pci_disable_device(pdev);
1559 pci_set_power_state(pdev, PCI_D3cold);
1560 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1565 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1567 * @pdev: pci dev pointer
1569 * Callback for the switcheroo driver. Check of the switcheroo
1570 * state can be changed.
1571 * Returns true if the state can be changed, false if not.
1573 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1575 struct drm_device *dev = pci_get_drvdata(pdev);
1578 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1579 * locking inversion with the driver load path. And the access here is
1580 * completely racy anyway. So don't bother with locking for now.
1582 return atomic_read(&dev->open_count) == 0;
1585 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1586 .set_gpu_state = amdgpu_switcheroo_set_state,
1588 .can_switch = amdgpu_switcheroo_can_switch,
1592 * amdgpu_device_ip_set_clockgating_state - set the CG state
1594 * @dev: amdgpu_device pointer
1595 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1596 * @state: clockgating state (gate or ungate)
1598 * Sets the requested clockgating state for all instances of
1599 * the hardware IP specified.
1600 * Returns the error code from the last instance.
1602 int amdgpu_device_ip_set_clockgating_state(void *dev,
1603 enum amd_ip_block_type block_type,
1604 enum amd_clockgating_state state)
1606 struct amdgpu_device *adev = dev;
1609 for (i = 0; i < adev->num_ip_blocks; i++) {
1610 if (!adev->ip_blocks[i].status.valid)
1612 if (adev->ip_blocks[i].version->type != block_type)
1614 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1616 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1617 (void *)adev, state);
1619 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1620 adev->ip_blocks[i].version->funcs->name, r);
1626 * amdgpu_device_ip_set_powergating_state - set the PG state
1628 * @dev: amdgpu_device pointer
1629 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1630 * @state: powergating state (gate or ungate)
1632 * Sets the requested powergating state for all instances of
1633 * the hardware IP specified.
1634 * Returns the error code from the last instance.
1636 int amdgpu_device_ip_set_powergating_state(void *dev,
1637 enum amd_ip_block_type block_type,
1638 enum amd_powergating_state state)
1640 struct amdgpu_device *adev = dev;
1643 for (i = 0; i < adev->num_ip_blocks; i++) {
1644 if (!adev->ip_blocks[i].status.valid)
1646 if (adev->ip_blocks[i].version->type != block_type)
1648 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1650 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1651 (void *)adev, state);
1653 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1654 adev->ip_blocks[i].version->funcs->name, r);
1660 * amdgpu_device_ip_get_clockgating_state - get the CG state
1662 * @adev: amdgpu_device pointer
1663 * @flags: clockgating feature flags
1665 * Walks the list of IPs on the device and updates the clockgating
1666 * flags for each IP.
1667 * Updates @flags with the feature flags for each hardware IP where
1668 * clockgating is enabled.
1670 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1675 for (i = 0; i < adev->num_ip_blocks; i++) {
1676 if (!adev->ip_blocks[i].status.valid)
1678 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1679 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1684 * amdgpu_device_ip_wait_for_idle - wait for idle
1686 * @adev: amdgpu_device pointer
1687 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689 * Waits for the request hardware IP to be idle.
1690 * Returns 0 for success or a negative error code on failure.
1692 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1693 enum amd_ip_block_type block_type)
1697 for (i = 0; i < adev->num_ip_blocks; i++) {
1698 if (!adev->ip_blocks[i].status.valid)
1700 if (adev->ip_blocks[i].version->type == block_type) {
1701 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1712 * amdgpu_device_ip_is_idle - is the hardware IP idle
1714 * @adev: amdgpu_device pointer
1715 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1717 * Check if the hardware IP is idle or not.
1718 * Returns true if it the IP is idle, false if not.
1720 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1721 enum amd_ip_block_type block_type)
1725 for (i = 0; i < adev->num_ip_blocks; i++) {
1726 if (!adev->ip_blocks[i].status.valid)
1728 if (adev->ip_blocks[i].version->type == block_type)
1729 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1736 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1738 * @adev: amdgpu_device pointer
1739 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1741 * Returns a pointer to the hardware IP block structure
1742 * if it exists for the asic, otherwise NULL.
1744 struct amdgpu_ip_block *
1745 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1746 enum amd_ip_block_type type)
1750 for (i = 0; i < adev->num_ip_blocks; i++)
1751 if (adev->ip_blocks[i].version->type == type)
1752 return &adev->ip_blocks[i];
1758 * amdgpu_device_ip_block_version_cmp
1760 * @adev: amdgpu_device pointer
1761 * @type: enum amd_ip_block_type
1762 * @major: major version
1763 * @minor: minor version
1765 * return 0 if equal or greater
1766 * return 1 if smaller or the ip_block doesn't exist
1768 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1769 enum amd_ip_block_type type,
1770 u32 major, u32 minor)
1772 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1774 if (ip_block && ((ip_block->version->major > major) ||
1775 ((ip_block->version->major == major) &&
1776 (ip_block->version->minor >= minor))))
1783 * amdgpu_device_ip_block_add
1785 * @adev: amdgpu_device pointer
1786 * @ip_block_version: pointer to the IP to add
1788 * Adds the IP block driver information to the collection of IPs
1791 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1792 const struct amdgpu_ip_block_version *ip_block_version)
1794 if (!ip_block_version)
1797 switch (ip_block_version->type) {
1798 case AMD_IP_BLOCK_TYPE_VCN:
1799 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1802 case AMD_IP_BLOCK_TYPE_JPEG:
1803 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1810 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1811 ip_block_version->funcs->name);
1813 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1819 * amdgpu_device_enable_virtual_display - enable virtual display feature
1821 * @adev: amdgpu_device pointer
1823 * Enabled the virtual display feature if the user has enabled it via
1824 * the module parameter virtual_display. This feature provides a virtual
1825 * display hardware on headless boards or in virtualized environments.
1826 * This function parses and validates the configuration string specified by
1827 * the user and configues the virtual display configuration (number of
1828 * virtual connectors, crtcs, etc.) specified.
1830 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1832 adev->enable_virtual_display = false;
1834 if (amdgpu_virtual_display) {
1835 const char *pci_address_name = pci_name(adev->pdev);
1836 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1838 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1839 pciaddstr_tmp = pciaddstr;
1840 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1841 pciaddname = strsep(&pciaddname_tmp, ",");
1842 if (!strcmp("all", pciaddname)
1843 || !strcmp(pci_address_name, pciaddname)) {
1847 adev->enable_virtual_display = true;
1850 res = kstrtol(pciaddname_tmp, 10,
1858 adev->mode_info.num_crtc = num_crtc;
1860 adev->mode_info.num_crtc = 1;
1866 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1867 amdgpu_virtual_display, pci_address_name,
1868 adev->enable_virtual_display, adev->mode_info.num_crtc);
1875 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1877 * @adev: amdgpu_device pointer
1879 * Parses the asic configuration parameters specified in the gpu info
1880 * firmware and makes them availale to the driver for use in configuring
1882 * Returns 0 on success, -EINVAL on failure.
1884 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1886 const char *chip_name;
1889 const struct gpu_info_firmware_header_v1_0 *hdr;
1891 adev->firmware.gpu_info_fw = NULL;
1893 if (adev->mman.discovery_bin) {
1894 amdgpu_discovery_get_gfx_info(adev);
1897 * FIXME: The bounding box is still needed by Navi12, so
1898 * temporarily read it from gpu_info firmware. Should be droped
1899 * when DAL no longer needs it.
1901 if (adev->asic_type != CHIP_NAVI12)
1905 switch (adev->asic_type) {
1906 #ifdef CONFIG_DRM_AMDGPU_SI
1913 #ifdef CONFIG_DRM_AMDGPU_CIK
1923 case CHIP_POLARIS10:
1924 case CHIP_POLARIS11:
1925 case CHIP_POLARIS12:
1930 case CHIP_ALDEBARAN:
1931 case CHIP_SIENNA_CICHLID:
1932 case CHIP_NAVY_FLOUNDER:
1933 case CHIP_DIMGREY_CAVEFISH:
1934 case CHIP_BEIGE_GOBY:
1938 chip_name = "vega10";
1941 chip_name = "vega12";
1944 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1945 chip_name = "raven2";
1946 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1947 chip_name = "picasso";
1949 chip_name = "raven";
1952 chip_name = "arcturus";
1955 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1956 chip_name = "renoir";
1958 chip_name = "green_sardine";
1961 chip_name = "navi10";
1964 chip_name = "navi14";
1967 chip_name = "navi12";
1970 chip_name = "vangogh";
1972 case CHIP_YELLOW_CARP:
1973 chip_name = "yellow_carp";
1977 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1978 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1981 "Failed to load gpu_info firmware \"%s\"\n",
1985 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1988 "Failed to validate gpu_info firmware \"%s\"\n",
1993 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1994 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1996 switch (hdr->version_major) {
1999 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2000 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2001 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2004 * Should be droped when DAL no longer needs it.
2006 if (adev->asic_type == CHIP_NAVI12)
2007 goto parse_soc_bounding_box;
2009 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2010 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2011 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2012 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2013 adev->gfx.config.max_texture_channel_caches =
2014 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2015 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2016 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2017 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2018 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2019 adev->gfx.config.double_offchip_lds_buf =
2020 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2021 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2022 adev->gfx.cu_info.max_waves_per_simd =
2023 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2024 adev->gfx.cu_info.max_scratch_slots_per_cu =
2025 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2026 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2027 if (hdr->version_minor >= 1) {
2028 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2029 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2030 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2031 adev->gfx.config.num_sc_per_sh =
2032 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2033 adev->gfx.config.num_packer_per_sc =
2034 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2037 parse_soc_bounding_box:
2039 * soc bounding box info is not integrated in disocovery table,
2040 * we always need to parse it from gpu info firmware if needed.
2042 if (hdr->version_minor == 2) {
2043 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2044 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2045 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2046 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2052 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2061 * amdgpu_device_ip_early_init - run early init for hardware IPs
2063 * @adev: amdgpu_device pointer
2065 * Early initialization pass for hardware IPs. The hardware IPs that make
2066 * up each asic are discovered each IP's early_init callback is run. This
2067 * is the first stage in initializing the asic.
2068 * Returns 0 on success, negative error code on failure.
2070 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2074 amdgpu_device_enable_virtual_display(adev);
2076 if (amdgpu_sriov_vf(adev)) {
2077 r = amdgpu_virt_request_full_gpu(adev, true);
2082 switch (adev->asic_type) {
2083 #ifdef CONFIG_DRM_AMDGPU_SI
2089 adev->family = AMDGPU_FAMILY_SI;
2090 r = si_set_ip_blocks(adev);
2095 #ifdef CONFIG_DRM_AMDGPU_CIK
2101 if (adev->flags & AMD_IS_APU)
2102 adev->family = AMDGPU_FAMILY_KV;
2104 adev->family = AMDGPU_FAMILY_CI;
2106 r = cik_set_ip_blocks(adev);
2114 case CHIP_POLARIS10:
2115 case CHIP_POLARIS11:
2116 case CHIP_POLARIS12:
2120 if (adev->flags & AMD_IS_APU)
2121 adev->family = AMDGPU_FAMILY_CZ;
2123 adev->family = AMDGPU_FAMILY_VI;
2125 r = vi_set_ip_blocks(adev);
2135 case CHIP_ALDEBARAN:
2136 if (adev->flags & AMD_IS_APU)
2137 adev->family = AMDGPU_FAMILY_RV;
2139 adev->family = AMDGPU_FAMILY_AI;
2141 r = soc15_set_ip_blocks(adev);
2148 case CHIP_SIENNA_CICHLID:
2149 case CHIP_NAVY_FLOUNDER:
2150 case CHIP_DIMGREY_CAVEFISH:
2151 case CHIP_BEIGE_GOBY:
2153 case CHIP_YELLOW_CARP:
2154 case CHIP_CYAN_SKILLFISH:
2155 if (adev->asic_type == CHIP_VANGOGH)
2156 adev->family = AMDGPU_FAMILY_VGH;
2157 else if (adev->asic_type == CHIP_YELLOW_CARP)
2158 adev->family = AMDGPU_FAMILY_YC;
2160 adev->family = AMDGPU_FAMILY_NV;
2162 r = nv_set_ip_blocks(adev);
2167 /* FIXME: not supported yet */
2171 amdgpu_amdkfd_device_probe(adev);
2173 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2174 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2175 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2176 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2177 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2179 for (i = 0; i < adev->num_ip_blocks; i++) {
2180 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2181 DRM_ERROR("disabled ip block: %d <%s>\n",
2182 i, adev->ip_blocks[i].version->funcs->name);
2183 adev->ip_blocks[i].status.valid = false;
2185 if (adev->ip_blocks[i].version->funcs->early_init) {
2186 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2188 adev->ip_blocks[i].status.valid = false;
2190 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2191 adev->ip_blocks[i].version->funcs->name, r);
2194 adev->ip_blocks[i].status.valid = true;
2197 adev->ip_blocks[i].status.valid = true;
2200 /* get the vbios after the asic_funcs are set up */
2201 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2202 r = amdgpu_device_parse_gpu_info_fw(adev);
2207 if (!amdgpu_get_bios(adev))
2210 r = amdgpu_atombios_init(adev);
2212 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2213 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2217 /*get pf2vf msg info at it's earliest time*/
2218 if (amdgpu_sriov_vf(adev))
2219 amdgpu_virt_init_data_exchange(adev);
2224 adev->cg_flags &= amdgpu_cg_mask;
2225 adev->pg_flags &= amdgpu_pg_mask;
2230 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2234 for (i = 0; i < adev->num_ip_blocks; i++) {
2235 if (!adev->ip_blocks[i].status.sw)
2237 if (adev->ip_blocks[i].status.hw)
2239 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2240 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2242 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2244 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2245 adev->ip_blocks[i].version->funcs->name, r);
2248 adev->ip_blocks[i].status.hw = true;
2255 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2259 for (i = 0; i < adev->num_ip_blocks; i++) {
2260 if (!adev->ip_blocks[i].status.sw)
2262 if (adev->ip_blocks[i].status.hw)
2264 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2266 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2267 adev->ip_blocks[i].version->funcs->name, r);
2270 adev->ip_blocks[i].status.hw = true;
2276 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2280 uint32_t smu_version;
2282 if (adev->asic_type >= CHIP_VEGA10) {
2283 for (i = 0; i < adev->num_ip_blocks; i++) {
2284 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2287 if (!adev->ip_blocks[i].status.sw)
2290 /* no need to do the fw loading again if already done*/
2291 if (adev->ip_blocks[i].status.hw == true)
2294 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2295 r = adev->ip_blocks[i].version->funcs->resume(adev);
2297 DRM_ERROR("resume of IP block <%s> failed %d\n",
2298 adev->ip_blocks[i].version->funcs->name, r);
2302 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2304 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2305 adev->ip_blocks[i].version->funcs->name, r);
2310 adev->ip_blocks[i].status.hw = true;
2315 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2316 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2322 * amdgpu_device_ip_init - run init for hardware IPs
2324 * @adev: amdgpu_device pointer
2326 * Main initialization pass for hardware IPs. The list of all the hardware
2327 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2328 * are run. sw_init initializes the software state associated with each IP
2329 * and hw_init initializes the hardware associated with each IP.
2330 * Returns 0 on success, negative error code on failure.
2332 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2336 r = amdgpu_ras_init(adev);
2340 for (i = 0; i < adev->num_ip_blocks; i++) {
2341 if (!adev->ip_blocks[i].status.valid)
2343 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2345 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2346 adev->ip_blocks[i].version->funcs->name, r);
2349 adev->ip_blocks[i].status.sw = true;
2351 /* need to do gmc hw init early so we can allocate gpu mem */
2352 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2353 r = amdgpu_device_vram_scratch_init(adev);
2355 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2358 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2360 DRM_ERROR("hw_init %d failed %d\n", i, r);
2363 r = amdgpu_device_wb_init(adev);
2365 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2368 adev->ip_blocks[i].status.hw = true;
2370 /* right after GMC hw init, we create CSA */
2371 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2372 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2373 AMDGPU_GEM_DOMAIN_VRAM,
2376 DRM_ERROR("allocate CSA failed %d\n", r);
2383 if (amdgpu_sriov_vf(adev))
2384 amdgpu_virt_init_data_exchange(adev);
2386 r = amdgpu_ib_pool_init(adev);
2388 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2389 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2393 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2397 r = amdgpu_device_ip_hw_init_phase1(adev);
2401 r = amdgpu_device_fw_loading(adev);
2405 r = amdgpu_device_ip_hw_init_phase2(adev);
2410 * retired pages will be loaded from eeprom and reserved here,
2411 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2412 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2413 * for I2C communication which only true at this point.
2415 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2416 * failure from bad gpu situation and stop amdgpu init process
2417 * accordingly. For other failed cases, it will still release all
2418 * the resource and print error message, rather than returning one
2419 * negative value to upper level.
2421 * Note: theoretically, this should be called before all vram allocations
2422 * to protect retired page from abusing
2424 r = amdgpu_ras_recovery_init(adev);
2428 if (adev->gmc.xgmi.num_physical_nodes > 1)
2429 amdgpu_xgmi_add_device(adev);
2431 /* Don't init kfd if whole hive need to be reset during init */
2432 if (!adev->gmc.xgmi.pending_reset)
2433 amdgpu_amdkfd_device_init(adev);
2435 amdgpu_fru_get_product_info(adev);
2438 if (amdgpu_sriov_vf(adev))
2439 amdgpu_virt_release_full_gpu(adev, true);
2445 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2447 * @adev: amdgpu_device pointer
2449 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2450 * this function before a GPU reset. If the value is retained after a
2451 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2453 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2455 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2459 * amdgpu_device_check_vram_lost - check if vram is valid
2461 * @adev: amdgpu_device pointer
2463 * Checks the reset magic value written to the gart pointer in VRAM.
2464 * The driver calls this after a GPU reset to see if the contents of
2465 * VRAM is lost or now.
2466 * returns true if vram is lost, false if not.
2468 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2470 if (memcmp(adev->gart.ptr, adev->reset_magic,
2471 AMDGPU_RESET_MAGIC_NUM))
2474 if (!amdgpu_in_reset(adev))
2478 * For all ASICs with baco/mode1 reset, the VRAM is
2479 * always assumed to be lost.
2481 switch (amdgpu_asic_reset_method(adev)) {
2482 case AMD_RESET_METHOD_BACO:
2483 case AMD_RESET_METHOD_MODE1:
2491 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2493 * @adev: amdgpu_device pointer
2494 * @state: clockgating state (gate or ungate)
2496 * The list of all the hardware IPs that make up the asic is walked and the
2497 * set_clockgating_state callbacks are run.
2498 * Late initialization pass enabling clockgating for hardware IPs.
2499 * Fini or suspend, pass disabling clockgating for hardware IPs.
2500 * Returns 0 on success, negative error code on failure.
2503 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2504 enum amd_clockgating_state state)
2508 if (amdgpu_emu_mode == 1)
2511 for (j = 0; j < adev->num_ip_blocks; j++) {
2512 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2513 if (!adev->ip_blocks[i].status.late_initialized)
2515 /* skip CG for GFX on S0ix */
2516 if (adev->in_s0ix &&
2517 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2519 /* skip CG for VCE/UVD, it's handled specially */
2520 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2521 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2522 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2523 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2524 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2525 /* enable clockgating to save power */
2526 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2529 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2530 adev->ip_blocks[i].version->funcs->name, r);
2539 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2540 enum amd_powergating_state state)
2544 if (amdgpu_emu_mode == 1)
2547 for (j = 0; j < adev->num_ip_blocks; j++) {
2548 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2549 if (!adev->ip_blocks[i].status.late_initialized)
2551 /* skip PG for GFX on S0ix */
2552 if (adev->in_s0ix &&
2553 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2555 /* skip CG for VCE/UVD, it's handled specially */
2556 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2557 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2558 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2559 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2560 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2561 /* enable powergating to save power */
2562 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2565 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2566 adev->ip_blocks[i].version->funcs->name, r);
2574 static int amdgpu_device_enable_mgpu_fan_boost(void)
2576 struct amdgpu_gpu_instance *gpu_ins;
2577 struct amdgpu_device *adev;
2580 mutex_lock(&mgpu_info.mutex);
2583 * MGPU fan boost feature should be enabled
2584 * only when there are two or more dGPUs in
2587 if (mgpu_info.num_dgpu < 2)
2590 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2591 gpu_ins = &(mgpu_info.gpu_ins[i]);
2592 adev = gpu_ins->adev;
2593 if (!(adev->flags & AMD_IS_APU) &&
2594 !gpu_ins->mgpu_fan_enabled) {
2595 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2599 gpu_ins->mgpu_fan_enabled = 1;
2604 mutex_unlock(&mgpu_info.mutex);
2610 * amdgpu_device_ip_late_init - run late init for hardware IPs
2612 * @adev: amdgpu_device pointer
2614 * Late initialization pass for hardware IPs. The list of all the hardware
2615 * IPs that make up the asic is walked and the late_init callbacks are run.
2616 * late_init covers any special initialization that an IP requires
2617 * after all of the have been initialized or something that needs to happen
2618 * late in the init process.
2619 * Returns 0 on success, negative error code on failure.
2621 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2623 struct amdgpu_gpu_instance *gpu_instance;
2626 for (i = 0; i < adev->num_ip_blocks; i++) {
2627 if (!adev->ip_blocks[i].status.hw)
2629 if (adev->ip_blocks[i].version->funcs->late_init) {
2630 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2632 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2633 adev->ip_blocks[i].version->funcs->name, r);
2637 adev->ip_blocks[i].status.late_initialized = true;
2640 amdgpu_ras_set_error_query_ready(adev, true);
2642 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2643 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2645 amdgpu_device_fill_reset_magic(adev);
2647 r = amdgpu_device_enable_mgpu_fan_boost();
2649 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2651 /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2652 if (adev->asic_type == CHIP_ARCTURUS &&
2653 amdgpu_passthrough(adev) &&
2654 adev->gmc.xgmi.num_physical_nodes > 1)
2655 smu_set_light_sbr(&adev->smu, true);
2657 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2658 mutex_lock(&mgpu_info.mutex);
2661 * Reset device p-state to low as this was booted with high.
2663 * This should be performed only after all devices from the same
2664 * hive get initialized.
2666 * However, it's unknown how many device in the hive in advance.
2667 * As this is counted one by one during devices initializations.
2669 * So, we wait for all XGMI interlinked devices initialized.
2670 * This may bring some delays as those devices may come from
2671 * different hives. But that should be OK.
2673 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2674 for (i = 0; i < mgpu_info.num_gpu; i++) {
2675 gpu_instance = &(mgpu_info.gpu_ins[i]);
2676 if (gpu_instance->adev->flags & AMD_IS_APU)
2679 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2680 AMDGPU_XGMI_PSTATE_MIN);
2682 DRM_ERROR("pstate setting failed (%d).\n", r);
2688 mutex_unlock(&mgpu_info.mutex);
2694 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2698 for (i = 0; i < adev->num_ip_blocks; i++) {
2699 if (!adev->ip_blocks[i].version->funcs->early_fini)
2702 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2704 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2705 adev->ip_blocks[i].version->funcs->name, r);
2709 amdgpu_amdkfd_suspend(adev, false);
2711 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2712 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2714 /* need to disable SMC first */
2715 for (i = 0; i < adev->num_ip_blocks; i++) {
2716 if (!adev->ip_blocks[i].status.hw)
2718 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2719 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2720 /* XXX handle errors */
2722 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2723 adev->ip_blocks[i].version->funcs->name, r);
2725 adev->ip_blocks[i].status.hw = false;
2730 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2731 if (!adev->ip_blocks[i].status.hw)
2734 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2735 /* XXX handle errors */
2737 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2738 adev->ip_blocks[i].version->funcs->name, r);
2741 adev->ip_blocks[i].status.hw = false;
2748 * amdgpu_device_ip_fini - run fini for hardware IPs
2750 * @adev: amdgpu_device pointer
2752 * Main teardown pass for hardware IPs. The list of all the hardware
2753 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2754 * are run. hw_fini tears down the hardware associated with each IP
2755 * and sw_fini tears down any software state associated with each IP.
2756 * Returns 0 on success, negative error code on failure.
2758 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2762 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2763 amdgpu_virt_release_ras_err_handler_data(adev);
2765 amdgpu_ras_pre_fini(adev);
2767 if (adev->gmc.xgmi.num_physical_nodes > 1)
2768 amdgpu_xgmi_remove_device(adev);
2770 amdgpu_amdkfd_device_fini_sw(adev);
2772 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2773 if (!adev->ip_blocks[i].status.sw)
2776 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2777 amdgpu_ucode_free_bo(adev);
2778 amdgpu_free_static_csa(&adev->virt.csa_obj);
2779 amdgpu_device_wb_fini(adev);
2780 amdgpu_device_vram_scratch_fini(adev);
2781 amdgpu_ib_pool_fini(adev);
2784 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2785 /* XXX handle errors */
2787 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2788 adev->ip_blocks[i].version->funcs->name, r);
2790 adev->ip_blocks[i].status.sw = false;
2791 adev->ip_blocks[i].status.valid = false;
2794 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2795 if (!adev->ip_blocks[i].status.late_initialized)
2797 if (adev->ip_blocks[i].version->funcs->late_fini)
2798 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2799 adev->ip_blocks[i].status.late_initialized = false;
2802 amdgpu_ras_fini(adev);
2804 if (amdgpu_sriov_vf(adev))
2805 if (amdgpu_virt_release_full_gpu(adev, false))
2806 DRM_ERROR("failed to release exclusive mode on fini\n");
2812 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2814 * @work: work_struct.
2816 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2818 struct amdgpu_device *adev =
2819 container_of(work, struct amdgpu_device, delayed_init_work.work);
2822 r = amdgpu_ib_ring_tests(adev);
2824 DRM_ERROR("ib ring test failed (%d).\n", r);
2827 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2829 struct amdgpu_device *adev =
2830 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2832 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2833 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2835 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2836 adev->gfx.gfx_off_state = true;
2840 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2842 * @adev: amdgpu_device pointer
2844 * Main suspend function for hardware IPs. The list of all the hardware
2845 * IPs that make up the asic is walked, clockgating is disabled and the
2846 * suspend callbacks are run. suspend puts the hardware and software state
2847 * in each IP into a state suitable for suspend.
2848 * Returns 0 on success, negative error code on failure.
2850 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2854 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2855 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2857 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2858 if (!adev->ip_blocks[i].status.valid)
2861 /* displays are handled separately */
2862 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2865 /* XXX handle errors */
2866 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2867 /* XXX handle errors */
2869 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2870 adev->ip_blocks[i].version->funcs->name, r);
2874 adev->ip_blocks[i].status.hw = false;
2881 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2883 * @adev: amdgpu_device pointer
2885 * Main suspend function for hardware IPs. The list of all the hardware
2886 * IPs that make up the asic is walked, clockgating is disabled and the
2887 * suspend callbacks are run. suspend puts the hardware and software state
2888 * in each IP into a state suitable for suspend.
2889 * Returns 0 on success, negative error code on failure.
2891 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2896 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2898 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2899 if (!adev->ip_blocks[i].status.valid)
2901 /* displays are handled in phase1 */
2902 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2904 /* PSP lost connection when err_event_athub occurs */
2905 if (amdgpu_ras_intr_triggered() &&
2906 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2907 adev->ip_blocks[i].status.hw = false;
2911 /* skip unnecessary suspend if we do not initialize them yet */
2912 if (adev->gmc.xgmi.pending_reset &&
2913 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2914 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2915 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2916 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2917 adev->ip_blocks[i].status.hw = false;
2921 /* skip suspend of gfx and psp for S0ix
2922 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2923 * like at runtime. PSP is also part of the always on hardware
2924 * so no need to suspend it.
2926 if (adev->in_s0ix &&
2927 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2928 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2931 /* XXX handle errors */
2932 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2933 /* XXX handle errors */
2935 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2936 adev->ip_blocks[i].version->funcs->name, r);
2938 adev->ip_blocks[i].status.hw = false;
2939 /* handle putting the SMC in the appropriate state */
2940 if(!amdgpu_sriov_vf(adev)){
2941 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2942 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2944 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2945 adev->mp1_state, r);
2956 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2958 * @adev: amdgpu_device pointer
2960 * Main suspend function for hardware IPs. The list of all the hardware
2961 * IPs that make up the asic is walked, clockgating is disabled and the
2962 * suspend callbacks are run. suspend puts the hardware and software state
2963 * in each IP into a state suitable for suspend.
2964 * Returns 0 on success, negative error code on failure.
2966 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2970 if (amdgpu_sriov_vf(adev)) {
2971 amdgpu_virt_fini_data_exchange(adev);
2972 amdgpu_virt_request_full_gpu(adev, false);
2975 r = amdgpu_device_ip_suspend_phase1(adev);
2978 r = amdgpu_device_ip_suspend_phase2(adev);
2980 if (amdgpu_sriov_vf(adev))
2981 amdgpu_virt_release_full_gpu(adev, false);
2986 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2990 static enum amd_ip_block_type ip_order[] = {
2991 AMD_IP_BLOCK_TYPE_GMC,
2992 AMD_IP_BLOCK_TYPE_COMMON,
2993 AMD_IP_BLOCK_TYPE_PSP,
2994 AMD_IP_BLOCK_TYPE_IH,
2997 for (i = 0; i < adev->num_ip_blocks; i++) {
2999 struct amdgpu_ip_block *block;
3001 block = &adev->ip_blocks[i];
3002 block->status.hw = false;
3004 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3006 if (block->version->type != ip_order[j] ||
3007 !block->status.valid)
3010 r = block->version->funcs->hw_init(adev);
3011 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3014 block->status.hw = true;
3021 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3025 static enum amd_ip_block_type ip_order[] = {
3026 AMD_IP_BLOCK_TYPE_SMC,
3027 AMD_IP_BLOCK_TYPE_DCE,
3028 AMD_IP_BLOCK_TYPE_GFX,
3029 AMD_IP_BLOCK_TYPE_SDMA,
3030 AMD_IP_BLOCK_TYPE_UVD,
3031 AMD_IP_BLOCK_TYPE_VCE,
3032 AMD_IP_BLOCK_TYPE_VCN
3035 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3037 struct amdgpu_ip_block *block;
3039 for (j = 0; j < adev->num_ip_blocks; j++) {
3040 block = &adev->ip_blocks[j];
3042 if (block->version->type != ip_order[i] ||
3043 !block->status.valid ||
3047 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3048 r = block->version->funcs->resume(adev);
3050 r = block->version->funcs->hw_init(adev);
3052 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3055 block->status.hw = true;
3063 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3065 * @adev: amdgpu_device pointer
3067 * First resume function for hardware IPs. The list of all the hardware
3068 * IPs that make up the asic is walked and the resume callbacks are run for
3069 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3070 * after a suspend and updates the software state as necessary. This
3071 * function is also used for restoring the GPU after a GPU reset.
3072 * Returns 0 on success, negative error code on failure.
3074 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3078 for (i = 0; i < adev->num_ip_blocks; i++) {
3079 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3081 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3082 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3083 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3085 r = adev->ip_blocks[i].version->funcs->resume(adev);
3087 DRM_ERROR("resume of IP block <%s> failed %d\n",
3088 adev->ip_blocks[i].version->funcs->name, r);
3091 adev->ip_blocks[i].status.hw = true;
3099 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3101 * @adev: amdgpu_device pointer
3103 * First resume function for hardware IPs. The list of all the hardware
3104 * IPs that make up the asic is walked and the resume callbacks are run for
3105 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3106 * functional state after a suspend and updates the software state as
3107 * necessary. This function is also used for restoring the GPU after a GPU
3109 * Returns 0 on success, negative error code on failure.
3111 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3115 for (i = 0; i < adev->num_ip_blocks; i++) {
3116 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3118 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3119 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3120 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3121 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3123 r = adev->ip_blocks[i].version->funcs->resume(adev);
3125 DRM_ERROR("resume of IP block <%s> failed %d\n",
3126 adev->ip_blocks[i].version->funcs->name, r);
3129 adev->ip_blocks[i].status.hw = true;
3136 * amdgpu_device_ip_resume - run resume for hardware IPs
3138 * @adev: amdgpu_device pointer
3140 * Main resume function for hardware IPs. The hardware IPs
3141 * are split into two resume functions because they are
3142 * are also used in in recovering from a GPU reset and some additional
3143 * steps need to be take between them. In this case (S3/S4) they are
3145 * Returns 0 on success, negative error code on failure.
3147 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3151 r = amdgpu_device_ip_resume_phase1(adev);
3155 r = amdgpu_device_fw_loading(adev);
3159 r = amdgpu_device_ip_resume_phase2(adev);
3165 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3167 * @adev: amdgpu_device pointer
3169 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3171 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3173 if (amdgpu_sriov_vf(adev)) {
3174 if (adev->is_atom_fw) {
3175 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3176 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3178 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3179 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3182 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3183 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3188 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3190 * @asic_type: AMD asic type
3192 * Check if there is DC (new modesetting infrastructre) support for an asic.
3193 * returns true if DC has support, false if not.
3195 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3197 switch (asic_type) {
3198 #if defined(CONFIG_DRM_AMD_DC)
3199 #if defined(CONFIG_DRM_AMD_DC_SI)
3210 * We have systems in the wild with these ASICs that require
3211 * LVDS and VGA support which is not supported with DC.
3213 * Fallback to the non-DC driver here by default so as not to
3214 * cause regressions.
3216 return amdgpu_dc > 0;
3220 case CHIP_POLARIS10:
3221 case CHIP_POLARIS11:
3222 case CHIP_POLARIS12:
3229 #if defined(CONFIG_DRM_AMD_DC_DCN)
3235 case CHIP_SIENNA_CICHLID:
3236 case CHIP_NAVY_FLOUNDER:
3237 case CHIP_DIMGREY_CAVEFISH:
3238 case CHIP_BEIGE_GOBY:
3240 case CHIP_YELLOW_CARP:
3242 return amdgpu_dc != 0;
3246 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3247 "but isn't supported by ASIC, ignoring\n");
3253 * amdgpu_device_has_dc_support - check if dc is supported
3255 * @adev: amdgpu_device pointer
3257 * Returns true for supported, false for not supported
3259 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3261 if (amdgpu_sriov_vf(adev) ||
3262 adev->enable_virtual_display ||
3263 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3266 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3269 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3271 struct amdgpu_device *adev =
3272 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3273 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3275 /* It's a bug to not have a hive within this function */
3280 * Use task barrier to synchronize all xgmi reset works across the
3281 * hive. task_barrier_enter and task_barrier_exit will block
3282 * until all the threads running the xgmi reset works reach
3283 * those points. task_barrier_full will do both blocks.
3285 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3287 task_barrier_enter(&hive->tb);
3288 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3290 if (adev->asic_reset_res)
3293 task_barrier_exit(&hive->tb);
3294 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3296 if (adev->asic_reset_res)
3299 if (adev->mmhub.ras_funcs &&
3300 adev->mmhub.ras_funcs->reset_ras_error_count)
3301 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3304 task_barrier_full(&hive->tb);
3305 adev->asic_reset_res = amdgpu_asic_reset(adev);
3309 if (adev->asic_reset_res)
3310 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3311 adev->asic_reset_res, adev_to_drm(adev)->unique);
3312 amdgpu_put_xgmi_hive(hive);
3315 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3317 char *input = amdgpu_lockup_timeout;
3318 char *timeout_setting = NULL;
3324 * By default timeout for non compute jobs is 10000
3325 * and 60000 for compute jobs.
3326 * In SR-IOV or passthrough mode, timeout for compute
3327 * jobs are 60000 by default.
3329 adev->gfx_timeout = msecs_to_jiffies(10000);
3330 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3331 if (amdgpu_sriov_vf(adev))
3332 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3333 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3335 adev->compute_timeout = msecs_to_jiffies(60000);
3337 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3338 while ((timeout_setting = strsep(&input, ",")) &&
3339 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3340 ret = kstrtol(timeout_setting, 0, &timeout);
3347 } else if (timeout < 0) {
3348 timeout = MAX_SCHEDULE_TIMEOUT;
3350 timeout = msecs_to_jiffies(timeout);
3355 adev->gfx_timeout = timeout;
3358 adev->compute_timeout = timeout;
3361 adev->sdma_timeout = timeout;
3364 adev->video_timeout = timeout;
3371 * There is only one value specified and
3372 * it should apply to all non-compute jobs.
3375 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3376 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3377 adev->compute_timeout = adev->gfx_timeout;
3384 static const struct attribute *amdgpu_dev_attributes[] = {
3385 &dev_attr_product_name.attr,
3386 &dev_attr_product_number.attr,
3387 &dev_attr_serial_number.attr,
3388 &dev_attr_pcie_replay_count.attr,
3393 * amdgpu_device_init - initialize the driver
3395 * @adev: amdgpu_device pointer
3396 * @flags: driver flags
3398 * Initializes the driver info and hw (all asics).
3399 * Returns 0 for success or an error on failure.
3400 * Called at driver startup.
3402 int amdgpu_device_init(struct amdgpu_device *adev,
3405 struct drm_device *ddev = adev_to_drm(adev);
3406 struct pci_dev *pdev = adev->pdev;
3411 adev->shutdown = false;
3412 adev->flags = flags;
3414 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3415 adev->asic_type = amdgpu_force_asic_type;
3417 adev->asic_type = flags & AMD_ASIC_MASK;
3419 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3420 if (amdgpu_emu_mode == 1)
3421 adev->usec_timeout *= 10;
3422 adev->gmc.gart_size = 512 * 1024 * 1024;
3423 adev->accel_working = false;
3424 adev->num_rings = 0;
3425 adev->mman.buffer_funcs = NULL;
3426 adev->mman.buffer_funcs_ring = NULL;
3427 adev->vm_manager.vm_pte_funcs = NULL;
3428 adev->vm_manager.vm_pte_num_scheds = 0;
3429 adev->gmc.gmc_funcs = NULL;
3430 adev->harvest_ip_mask = 0x0;
3431 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3432 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3434 adev->smc_rreg = &amdgpu_invalid_rreg;
3435 adev->smc_wreg = &amdgpu_invalid_wreg;
3436 adev->pcie_rreg = &amdgpu_invalid_rreg;
3437 adev->pcie_wreg = &amdgpu_invalid_wreg;
3438 adev->pciep_rreg = &amdgpu_invalid_rreg;
3439 adev->pciep_wreg = &amdgpu_invalid_wreg;
3440 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3441 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3442 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3443 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3444 adev->didt_rreg = &amdgpu_invalid_rreg;
3445 adev->didt_wreg = &amdgpu_invalid_wreg;
3446 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3447 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3448 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3449 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3451 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3452 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3453 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3455 /* mutex initialization are all done here so we
3456 * can recall function without having locking issues */
3457 mutex_init(&adev->firmware.mutex);
3458 mutex_init(&adev->pm.mutex);
3459 mutex_init(&adev->gfx.gpu_clock_mutex);
3460 mutex_init(&adev->srbm_mutex);
3461 mutex_init(&adev->gfx.pipe_reserve_mutex);
3462 mutex_init(&adev->gfx.gfx_off_mutex);
3463 mutex_init(&adev->grbm_idx_mutex);
3464 mutex_init(&adev->mn_lock);
3465 mutex_init(&adev->virt.vf_errors.lock);
3466 hash_init(adev->mn_hash);
3467 atomic_set(&adev->in_gpu_reset, 0);
3468 init_rwsem(&adev->reset_sem);
3469 mutex_init(&adev->psp.mutex);
3470 mutex_init(&adev->notifier_lock);
3472 r = amdgpu_device_init_apu_flags(adev);
3476 r = amdgpu_device_check_arguments(adev);
3480 spin_lock_init(&adev->mmio_idx_lock);
3481 spin_lock_init(&adev->smc_idx_lock);
3482 spin_lock_init(&adev->pcie_idx_lock);
3483 spin_lock_init(&adev->uvd_ctx_idx_lock);
3484 spin_lock_init(&adev->didt_idx_lock);
3485 spin_lock_init(&adev->gc_cac_idx_lock);
3486 spin_lock_init(&adev->se_cac_idx_lock);
3487 spin_lock_init(&adev->audio_endpt_idx_lock);
3488 spin_lock_init(&adev->mm_stats.lock);
3490 INIT_LIST_HEAD(&adev->shadow_list);
3491 mutex_init(&adev->shadow_list_lock);
3493 INIT_LIST_HEAD(&adev->reset_list);
3495 INIT_DELAYED_WORK(&adev->delayed_init_work,
3496 amdgpu_device_delayed_init_work_handler);
3497 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3498 amdgpu_device_delay_enable_gfx_off);
3500 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3502 adev->gfx.gfx_off_req_count = 1;
3503 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3505 atomic_set(&adev->throttling_logging_enabled, 1);
3507 * If throttling continues, logging will be performed every minute
3508 * to avoid log flooding. "-1" is subtracted since the thermal
3509 * throttling interrupt comes every second. Thus, the total logging
3510 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3511 * for throttling interrupt) = 60 seconds.
3513 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3514 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3516 /* Registers mapping */
3517 /* TODO: block userspace mapping of io register */
3518 if (adev->asic_type >= CHIP_BONAIRE) {
3519 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3520 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3522 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3523 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3526 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3527 if (adev->rmmio == NULL) {
3530 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3531 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3533 /* enable PCIE atomic ops */
3534 r = pci_enable_atomic_ops_to_root(adev->pdev,
3535 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3536 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3538 adev->have_atomics_support = false;
3539 DRM_INFO("PCIE atomic ops is not supported\n");
3541 adev->have_atomics_support = true;
3544 amdgpu_device_get_pcie_info(adev);
3547 DRM_INFO("MCBP is enabled\n");
3549 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3550 adev->enable_mes = true;
3552 /* detect hw virtualization here */
3553 amdgpu_detect_virtualization(adev);
3555 r = amdgpu_device_get_job_timeout_settings(adev);
3557 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3561 /* early init functions */
3562 r = amdgpu_device_ip_early_init(adev);
3566 /* doorbell bar mapping and doorbell index init*/
3567 amdgpu_device_doorbell_init(adev);
3569 if (amdgpu_emu_mode == 1) {
3570 /* post the asic on emulation mode */
3571 emu_soc_asic_init(adev);
3572 goto fence_driver_init;
3575 amdgpu_reset_init(adev);
3577 /* detect if we are with an SRIOV vbios */
3578 amdgpu_device_detect_sriov_bios(adev);
3580 /* check if we need to reset the asic
3581 * E.g., driver was not cleanly unloaded previously, etc.
3583 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3584 if (adev->gmc.xgmi.num_physical_nodes) {
3585 dev_info(adev->dev, "Pending hive reset.\n");
3586 adev->gmc.xgmi.pending_reset = true;
3587 /* Only need to init necessary block for SMU to handle the reset */
3588 for (i = 0; i < adev->num_ip_blocks; i++) {
3589 if (!adev->ip_blocks[i].status.valid)
3591 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3592 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3593 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3594 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3595 DRM_DEBUG("IP %s disabled for hw_init.\n",
3596 adev->ip_blocks[i].version->funcs->name);
3597 adev->ip_blocks[i].status.hw = true;
3601 r = amdgpu_asic_reset(adev);
3603 dev_err(adev->dev, "asic reset on init failed\n");
3609 pci_enable_pcie_error_reporting(adev->pdev);
3611 /* Post card if necessary */
3612 if (amdgpu_device_need_post(adev)) {
3614 dev_err(adev->dev, "no vBIOS found\n");
3618 DRM_INFO("GPU posting now...\n");
3619 r = amdgpu_device_asic_init(adev);
3621 dev_err(adev->dev, "gpu post error!\n");
3626 if (adev->is_atom_fw) {
3627 /* Initialize clocks */
3628 r = amdgpu_atomfirmware_get_clock_info(adev);
3630 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3631 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3635 /* Initialize clocks */
3636 r = amdgpu_atombios_get_clock_info(adev);
3638 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3639 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3642 /* init i2c buses */
3643 if (!amdgpu_device_has_dc_support(adev))
3644 amdgpu_atombios_i2c_init(adev);
3649 r = amdgpu_fence_driver_sw_init(adev);
3651 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3652 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3656 /* init the mode config */
3657 drm_mode_config_init(adev_to_drm(adev));
3659 r = amdgpu_device_ip_init(adev);
3661 /* failed in exclusive mode due to timeout */
3662 if (amdgpu_sriov_vf(adev) &&
3663 !amdgpu_sriov_runtime(adev) &&
3664 amdgpu_virt_mmio_blocked(adev) &&
3665 !amdgpu_virt_wait_reset(adev)) {
3666 dev_err(adev->dev, "VF exclusive mode timeout\n");
3667 /* Don't send request since VF is inactive. */
3668 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3669 adev->virt.ops = NULL;
3671 goto release_ras_con;
3673 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3674 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3675 goto release_ras_con;
3678 amdgpu_fence_driver_hw_init(adev);
3681 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3682 adev->gfx.config.max_shader_engines,
3683 adev->gfx.config.max_sh_per_se,
3684 adev->gfx.config.max_cu_per_sh,
3685 adev->gfx.cu_info.number);
3687 adev->accel_working = true;
3689 amdgpu_vm_check_compute_bug(adev);
3691 /* Initialize the buffer migration limit. */
3692 if (amdgpu_moverate >= 0)
3693 max_MBps = amdgpu_moverate;
3695 max_MBps = 8; /* Allow 8 MB/s. */
3696 /* Get a log2 for easy divisions. */
3697 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3699 amdgpu_fbdev_init(adev);
3701 r = amdgpu_pm_sysfs_init(adev);
3703 adev->pm_sysfs_en = false;
3704 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3706 adev->pm_sysfs_en = true;
3708 r = amdgpu_ucode_sysfs_init(adev);
3710 adev->ucode_sysfs_en = false;
3711 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3713 adev->ucode_sysfs_en = true;
3715 if ((amdgpu_testing & 1)) {
3716 if (adev->accel_working)
3717 amdgpu_test_moves(adev);
3719 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3721 if (amdgpu_benchmarking) {
3722 if (adev->accel_working)
3723 amdgpu_benchmark(adev, amdgpu_benchmarking);
3725 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3729 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3730 * Otherwise the mgpu fan boost feature will be skipped due to the
3731 * gpu instance is counted less.
3733 amdgpu_register_gpu_instance(adev);
3735 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3736 * explicit gating rather than handling it automatically.
3738 if (!adev->gmc.xgmi.pending_reset) {
3739 r = amdgpu_device_ip_late_init(adev);
3741 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3742 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3743 goto release_ras_con;
3746 amdgpu_ras_resume(adev);
3747 queue_delayed_work(system_wq, &adev->delayed_init_work,
3748 msecs_to_jiffies(AMDGPU_RESUME_MS));
3751 if (amdgpu_sriov_vf(adev))
3752 flush_delayed_work(&adev->delayed_init_work);
3754 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3756 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3758 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3759 r = amdgpu_pmu_init(adev);
3761 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3763 /* Have stored pci confspace at hand for restore in sudden PCI error */
3764 if (amdgpu_device_cache_pci_state(adev->pdev))
3765 pci_restore_state(pdev);
3767 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3768 /* this will fail for cards that aren't VGA class devices, just
3770 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3771 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3773 if (amdgpu_device_supports_px(ddev)) {
3775 vga_switcheroo_register_client(adev->pdev,
3776 &amdgpu_switcheroo_ops, px);
3777 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3780 if (adev->gmc.xgmi.pending_reset)
3781 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3782 msecs_to_jiffies(AMDGPU_RESUME_MS));
3787 amdgpu_release_ras_context(adev);
3790 amdgpu_vf_error_trans_all(adev);
3795 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3797 /* Clear all CPU mappings pointing to this device */
3798 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3800 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3801 amdgpu_device_doorbell_fini(adev);
3803 iounmap(adev->rmmio);
3805 if (adev->mman.aper_base_kaddr)
3806 iounmap(adev->mman.aper_base_kaddr);
3807 adev->mman.aper_base_kaddr = NULL;
3809 /* Memory manager related */
3810 if (!adev->gmc.xgmi.connected_to_cpu) {
3811 arch_phys_wc_del(adev->gmc.vram_mtrr);
3812 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3817 * amdgpu_device_fini - tear down the driver
3819 * @adev: amdgpu_device pointer
3821 * Tear down the driver info (all asics).
3822 * Called at driver shutdown.
3824 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3826 dev_info(adev->dev, "amdgpu: finishing device.\n");
3827 flush_delayed_work(&adev->delayed_init_work);
3828 if (adev->mman.initialized) {
3829 flush_delayed_work(&adev->mman.bdev.wq);
3830 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3832 adev->shutdown = true;
3834 /* make sure IB test finished before entering exclusive mode
3835 * to avoid preemption on IB test
3837 if (amdgpu_sriov_vf(adev)) {
3838 amdgpu_virt_request_full_gpu(adev, false);
3839 amdgpu_virt_fini_data_exchange(adev);
3842 /* disable all interrupts */
3843 amdgpu_irq_disable_all(adev);
3844 if (adev->mode_info.mode_config_initialized){
3845 if (!amdgpu_device_has_dc_support(adev))
3846 drm_helper_force_disable_all(adev_to_drm(adev));
3848 drm_atomic_helper_shutdown(adev_to_drm(adev));
3850 amdgpu_fence_driver_hw_fini(adev);
3852 if (adev->pm_sysfs_en)
3853 amdgpu_pm_sysfs_fini(adev);
3854 if (adev->ucode_sysfs_en)
3855 amdgpu_ucode_sysfs_fini(adev);
3856 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3858 amdgpu_fbdev_fini(adev);
3860 amdgpu_irq_fini_hw(adev);
3862 amdgpu_device_ip_fini_early(adev);
3864 amdgpu_gart_dummy_page_fini(adev);
3866 amdgpu_device_unmap_mmio(adev);
3869 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3871 amdgpu_device_ip_fini(adev);
3872 amdgpu_fence_driver_sw_fini(adev);
3873 release_firmware(adev->firmware.gpu_info_fw);
3874 adev->firmware.gpu_info_fw = NULL;
3875 adev->accel_working = false;
3877 amdgpu_reset_fini(adev);
3879 /* free i2c buses */
3880 if (!amdgpu_device_has_dc_support(adev))
3881 amdgpu_i2c_fini(adev);
3883 if (amdgpu_emu_mode != 1)
3884 amdgpu_atombios_fini(adev);
3888 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3889 vga_switcheroo_unregister_client(adev->pdev);
3890 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3892 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3893 vga_client_unregister(adev->pdev);
3895 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3896 amdgpu_pmu_fini(adev);
3897 if (adev->mman.discovery_bin)
3898 amdgpu_discovery_fini(adev);
3900 kfree(adev->pci_state);
3909 * amdgpu_device_suspend - initiate device suspend
3911 * @dev: drm dev pointer
3912 * @fbcon : notify the fbdev of suspend
3914 * Puts the hw in the suspend state (all asics).
3915 * Returns 0 for success or an error on failure.
3916 * Called at driver suspend.
3918 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3920 struct amdgpu_device *adev = drm_to_adev(dev);
3922 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3925 adev->in_suspend = true;
3927 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3928 DRM_WARN("smart shift update failed\n");
3930 drm_kms_helper_poll_disable(dev);
3933 amdgpu_fbdev_set_suspend(adev, 1);
3935 cancel_delayed_work_sync(&adev->delayed_init_work);
3937 amdgpu_ras_suspend(adev);
3939 amdgpu_device_ip_suspend_phase1(adev);
3942 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3944 /* evict vram memory */
3945 amdgpu_bo_evict_vram(adev);
3947 amdgpu_fence_driver_hw_fini(adev);
3949 amdgpu_device_ip_suspend_phase2(adev);
3950 /* evict remaining vram memory
3951 * This second call to evict vram is to evict the gart page table
3954 amdgpu_bo_evict_vram(adev);
3960 * amdgpu_device_resume - initiate device resume
3962 * @dev: drm dev pointer
3963 * @fbcon : notify the fbdev of resume
3965 * Bring the hw back to operating state (all asics).
3966 * Returns 0 for success or an error on failure.
3967 * Called at driver resume.
3969 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3971 struct amdgpu_device *adev = drm_to_adev(dev);
3974 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3978 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3981 if (amdgpu_device_need_post(adev)) {
3982 r = amdgpu_device_asic_init(adev);
3984 dev_err(adev->dev, "amdgpu asic init failed\n");
3987 r = amdgpu_device_ip_resume(adev);
3989 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3992 amdgpu_fence_driver_hw_init(adev);
3994 r = amdgpu_device_ip_late_init(adev);
3998 queue_delayed_work(system_wq, &adev->delayed_init_work,
3999 msecs_to_jiffies(AMDGPU_RESUME_MS));
4001 if (!adev->in_s0ix) {
4002 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4007 /* Make sure IB tests flushed */
4008 flush_delayed_work(&adev->delayed_init_work);
4011 amdgpu_fbdev_set_suspend(adev, 0);
4013 drm_kms_helper_poll_enable(dev);
4015 amdgpu_ras_resume(adev);
4018 * Most of the connector probing functions try to acquire runtime pm
4019 * refs to ensure that the GPU is powered on when connector polling is
4020 * performed. Since we're calling this from a runtime PM callback,
4021 * trying to acquire rpm refs will cause us to deadlock.
4023 * Since we're guaranteed to be holding the rpm lock, it's safe to
4024 * temporarily disable the rpm helpers so this doesn't deadlock us.
4027 dev->dev->power.disable_depth++;
4029 if (!amdgpu_device_has_dc_support(adev))
4030 drm_helper_hpd_irq_event(dev);
4032 drm_kms_helper_hotplug_event(dev);
4034 dev->dev->power.disable_depth--;
4036 adev->in_suspend = false;
4038 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4039 DRM_WARN("smart shift update failed\n");
4045 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4047 * @adev: amdgpu_device pointer
4049 * The list of all the hardware IPs that make up the asic is walked and
4050 * the check_soft_reset callbacks are run. check_soft_reset determines
4051 * if the asic is still hung or not.
4052 * Returns true if any of the IPs are still in a hung state, false if not.
4054 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4057 bool asic_hang = false;
4059 if (amdgpu_sriov_vf(adev))
4062 if (amdgpu_asic_need_full_reset(adev))
4065 for (i = 0; i < adev->num_ip_blocks; i++) {
4066 if (!adev->ip_blocks[i].status.valid)
4068 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4069 adev->ip_blocks[i].status.hang =
4070 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4071 if (adev->ip_blocks[i].status.hang) {
4072 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4080 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4082 * @adev: amdgpu_device pointer
4084 * The list of all the hardware IPs that make up the asic is walked and the
4085 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4086 * handles any IP specific hardware or software state changes that are
4087 * necessary for a soft reset to succeed.
4088 * Returns 0 on success, negative error code on failure.
4090 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4094 for (i = 0; i < adev->num_ip_blocks; i++) {
4095 if (!adev->ip_blocks[i].status.valid)
4097 if (adev->ip_blocks[i].status.hang &&
4098 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4099 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4109 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4111 * @adev: amdgpu_device pointer
4113 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4114 * reset is necessary to recover.
4115 * Returns true if a full asic reset is required, false if not.
4117 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4121 if (amdgpu_asic_need_full_reset(adev))
4124 for (i = 0; i < adev->num_ip_blocks; i++) {
4125 if (!adev->ip_blocks[i].status.valid)
4127 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4128 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4129 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4130 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4131 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4132 if (adev->ip_blocks[i].status.hang) {
4133 dev_info(adev->dev, "Some block need full reset!\n");
4142 * amdgpu_device_ip_soft_reset - do a soft reset
4144 * @adev: amdgpu_device pointer
4146 * The list of all the hardware IPs that make up the asic is walked and the
4147 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4148 * IP specific hardware or software state changes that are necessary to soft
4150 * Returns 0 on success, negative error code on failure.
4152 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4156 for (i = 0; i < adev->num_ip_blocks; i++) {
4157 if (!adev->ip_blocks[i].status.valid)
4159 if (adev->ip_blocks[i].status.hang &&
4160 adev->ip_blocks[i].version->funcs->soft_reset) {
4161 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4171 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4173 * @adev: amdgpu_device pointer
4175 * The list of all the hardware IPs that make up the asic is walked and the
4176 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4177 * handles any IP specific hardware or software state changes that are
4178 * necessary after the IP has been soft reset.
4179 * Returns 0 on success, negative error code on failure.
4181 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4185 for (i = 0; i < adev->num_ip_blocks; i++) {
4186 if (!adev->ip_blocks[i].status.valid)
4188 if (adev->ip_blocks[i].status.hang &&
4189 adev->ip_blocks[i].version->funcs->post_soft_reset)
4190 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4199 * amdgpu_device_recover_vram - Recover some VRAM contents
4201 * @adev: amdgpu_device pointer
4203 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4204 * restore things like GPUVM page tables after a GPU reset where
4205 * the contents of VRAM might be lost.
4208 * 0 on success, negative error code on failure.
4210 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4212 struct dma_fence *fence = NULL, *next = NULL;
4213 struct amdgpu_bo *shadow;
4214 struct amdgpu_bo_vm *vmbo;
4217 if (amdgpu_sriov_runtime(adev))
4218 tmo = msecs_to_jiffies(8000);
4220 tmo = msecs_to_jiffies(100);
4222 dev_info(adev->dev, "recover vram bo from shadow start\n");
4223 mutex_lock(&adev->shadow_list_lock);
4224 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4226 /* No need to recover an evicted BO */
4227 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4228 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4229 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4232 r = amdgpu_bo_restore_shadow(shadow, &next);
4237 tmo = dma_fence_wait_timeout(fence, false, tmo);
4238 dma_fence_put(fence);
4243 } else if (tmo < 0) {
4251 mutex_unlock(&adev->shadow_list_lock);
4254 tmo = dma_fence_wait_timeout(fence, false, tmo);
4255 dma_fence_put(fence);
4257 if (r < 0 || tmo <= 0) {
4258 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4262 dev_info(adev->dev, "recover vram bo from shadow done\n");
4268 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4270 * @adev: amdgpu_device pointer
4271 * @from_hypervisor: request from hypervisor
4273 * do VF FLR and reinitialize Asic
4274 * return 0 means succeeded otherwise failed
4276 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4277 bool from_hypervisor)
4281 if (from_hypervisor)
4282 r = amdgpu_virt_request_full_gpu(adev, true);
4284 r = amdgpu_virt_reset_gpu(adev);
4288 amdgpu_amdkfd_pre_reset(adev);
4290 /* Resume IP prior to SMC */
4291 r = amdgpu_device_ip_reinit_early_sriov(adev);
4295 amdgpu_virt_init_data_exchange(adev);
4296 /* we need recover gart prior to run SMC/CP/SDMA resume */
4297 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4299 r = amdgpu_device_fw_loading(adev);
4303 /* now we are okay to resume SMC/CP/SDMA */
4304 r = amdgpu_device_ip_reinit_late_sriov(adev);
4308 amdgpu_irq_gpu_reset_resume_helper(adev);
4309 r = amdgpu_ib_ring_tests(adev);
4310 amdgpu_amdkfd_post_reset(adev);
4313 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4314 amdgpu_inc_vram_lost(adev);
4315 r = amdgpu_device_recover_vram(adev);
4317 amdgpu_virt_release_full_gpu(adev, true);
4323 * amdgpu_device_has_job_running - check if there is any job in mirror list
4325 * @adev: amdgpu_device pointer
4327 * check if there is any job in mirror list
4329 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4332 struct drm_sched_job *job;
4334 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4335 struct amdgpu_ring *ring = adev->rings[i];
4337 if (!ring || !ring->sched.thread)
4340 spin_lock(&ring->sched.job_list_lock);
4341 job = list_first_entry_or_null(&ring->sched.pending_list,
4342 struct drm_sched_job, list);
4343 spin_unlock(&ring->sched.job_list_lock);
4351 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4353 * @adev: amdgpu_device pointer
4355 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4358 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4360 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4361 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4365 if (amdgpu_gpu_recovery == 0)
4368 if (amdgpu_sriov_vf(adev))
4371 if (amdgpu_gpu_recovery == -1) {
4372 switch (adev->asic_type) {
4378 case CHIP_POLARIS10:
4379 case CHIP_POLARIS11:
4380 case CHIP_POLARIS12:
4391 case CHIP_SIENNA_CICHLID:
4392 case CHIP_NAVY_FLOUNDER:
4393 case CHIP_DIMGREY_CAVEFISH:
4394 case CHIP_BEIGE_GOBY:
4396 case CHIP_ALDEBARAN:
4406 dev_info(adev->dev, "GPU recovery disabled.\n");
4410 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4415 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4417 dev_info(adev->dev, "GPU mode1 reset\n");
4420 pci_clear_master(adev->pdev);
4422 amdgpu_device_cache_pci_state(adev->pdev);
4424 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4425 dev_info(adev->dev, "GPU smu mode1 reset\n");
4426 ret = amdgpu_dpm_mode1_reset(adev);
4428 dev_info(adev->dev, "GPU psp mode1 reset\n");
4429 ret = psp_gpu_reset(adev);
4433 dev_err(adev->dev, "GPU mode1 reset failed\n");
4435 amdgpu_device_load_pci_state(adev->pdev);
4437 /* wait for asic to come out of reset */
4438 for (i = 0; i < adev->usec_timeout; i++) {
4439 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4441 if (memsize != 0xffffffff)
4446 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4450 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4451 struct amdgpu_reset_context *reset_context)
4454 struct amdgpu_job *job = NULL;
4455 bool need_full_reset =
4456 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4458 if (reset_context->reset_req_dev == adev)
4459 job = reset_context->job;
4461 /* no need to dump if device is not in good state during probe period */
4462 if (!adev->gmc.xgmi.pending_reset)
4463 amdgpu_debugfs_wait_dump(adev);
4465 if (amdgpu_sriov_vf(adev)) {
4466 /* stop the data exchange thread */
4467 amdgpu_virt_fini_data_exchange(adev);
4470 /* block all schedulers and reset given job's ring */
4471 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4472 struct amdgpu_ring *ring = adev->rings[i];
4474 if (!ring || !ring->sched.thread)
4477 /*clear job fence from fence drv to avoid force_completion
4478 *leave NULL and vm flush fence in fence drv */
4479 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4480 struct dma_fence *old, **ptr;
4482 ptr = &ring->fence_drv.fences[j];
4483 old = rcu_dereference_protected(*ptr, 1);
4484 if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4485 RCU_INIT_POINTER(*ptr, NULL);
4488 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4489 amdgpu_fence_driver_force_completion(ring);
4493 drm_sched_increase_karma(&job->base);
4495 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4496 /* If reset handler not implemented, continue; otherwise return */
4502 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4503 if (!amdgpu_sriov_vf(adev)) {
4505 if (!need_full_reset)
4506 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4508 if (!need_full_reset) {
4509 amdgpu_device_ip_pre_soft_reset(adev);
4510 r = amdgpu_device_ip_soft_reset(adev);
4511 amdgpu_device_ip_post_soft_reset(adev);
4512 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4513 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4514 need_full_reset = true;
4518 if (need_full_reset)
4519 r = amdgpu_device_ip_suspend(adev);
4520 if (need_full_reset)
4521 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4523 clear_bit(AMDGPU_NEED_FULL_RESET,
4524 &reset_context->flags);
4530 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4531 struct amdgpu_reset_context *reset_context)
4533 struct amdgpu_device *tmp_adev = NULL;
4534 bool need_full_reset, skip_hw_reset, vram_lost = false;
4537 /* Try reset handler method first */
4538 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4540 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4541 /* If reset handler not implemented, continue; otherwise return */
4547 /* Reset handler not implemented, use the default method */
4549 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4550 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4553 * ASIC reset has to be done on all XGMI hive nodes ASAP
4554 * to allow proper links negotiation in FW (within 1 sec)
4556 if (!skip_hw_reset && need_full_reset) {
4557 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4558 /* For XGMI run all resets in parallel to speed up the process */
4559 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4560 tmp_adev->gmc.xgmi.pending_reset = false;
4561 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4564 r = amdgpu_asic_reset(tmp_adev);
4567 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4568 r, adev_to_drm(tmp_adev)->unique);
4573 /* For XGMI wait for all resets to complete before proceed */
4575 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4576 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4577 flush_work(&tmp_adev->xgmi_reset_work);
4578 r = tmp_adev->asic_reset_res;
4586 if (!r && amdgpu_ras_intr_triggered()) {
4587 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4588 if (tmp_adev->mmhub.ras_funcs &&
4589 tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4590 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4593 amdgpu_ras_intr_cleared();
4596 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4597 if (need_full_reset) {
4599 r = amdgpu_device_asic_init(tmp_adev);
4601 dev_warn(tmp_adev->dev, "asic atom init failed!");
4603 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4604 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4608 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4610 DRM_INFO("VRAM is lost due to GPU reset!\n");
4611 amdgpu_inc_vram_lost(tmp_adev);
4614 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4618 r = amdgpu_device_fw_loading(tmp_adev);
4622 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4627 amdgpu_device_fill_reset_magic(tmp_adev);
4630 * Add this ASIC as tracked as reset was already
4631 * complete successfully.
4633 amdgpu_register_gpu_instance(tmp_adev);
4635 if (!reset_context->hive &&
4636 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4637 amdgpu_xgmi_add_device(tmp_adev);
4639 r = amdgpu_device_ip_late_init(tmp_adev);
4643 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4646 * The GPU enters bad state once faulty pages
4647 * by ECC has reached the threshold, and ras
4648 * recovery is scheduled next. So add one check
4649 * here to break recovery if it indeed exceeds
4650 * bad page threshold, and remind user to
4651 * retire this GPU or setting one bigger
4652 * bad_page_threshold value to fix this once
4653 * probing driver again.
4655 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4657 amdgpu_ras_resume(tmp_adev);
4663 /* Update PSP FW topology after reset */
4664 if (reset_context->hive &&
4665 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4666 r = amdgpu_xgmi_update_topology(
4667 reset_context->hive, tmp_adev);
4673 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4674 r = amdgpu_ib_ring_tests(tmp_adev);
4676 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4677 need_full_reset = true;
4684 r = amdgpu_device_recover_vram(tmp_adev);
4686 tmp_adev->asic_reset_res = r;
4690 if (need_full_reset)
4691 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4693 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4697 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4698 struct amdgpu_hive_info *hive)
4700 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4704 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4706 down_write(&adev->reset_sem);
4709 switch (amdgpu_asic_reset_method(adev)) {
4710 case AMD_RESET_METHOD_MODE1:
4711 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4713 case AMD_RESET_METHOD_MODE2:
4714 adev->mp1_state = PP_MP1_STATE_RESET;
4717 adev->mp1_state = PP_MP1_STATE_NONE;
4724 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4726 amdgpu_vf_error_trans_all(adev);
4727 adev->mp1_state = PP_MP1_STATE_NONE;
4728 atomic_set(&adev->in_gpu_reset, 0);
4729 up_write(&adev->reset_sem);
4733 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4734 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4736 * unlock won't require roll back.
4738 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4740 struct amdgpu_device *tmp_adev = NULL;
4742 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4744 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4747 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4748 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4751 } else if (!amdgpu_device_lock_adev(adev, hive))
4756 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4758 * if the lockup iteration break in the middle of a hive,
4759 * it may means there may has a race issue,
4760 * or a hive device locked up independently.
4761 * we may be in trouble and may not, so will try to roll back
4762 * the lock and give out a warnning.
4764 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4765 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4766 amdgpu_device_unlock_adev(tmp_adev);
4772 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4774 struct pci_dev *p = NULL;
4776 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4777 adev->pdev->bus->number, 1);
4779 pm_runtime_enable(&(p->dev));
4780 pm_runtime_resume(&(p->dev));
4784 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4786 enum amd_reset_method reset_method;
4787 struct pci_dev *p = NULL;
4791 * For now, only BACO and mode1 reset are confirmed
4792 * to suffer the audio issue without proper suspended.
4794 reset_method = amdgpu_asic_reset_method(adev);
4795 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4796 (reset_method != AMD_RESET_METHOD_MODE1))
4799 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4800 adev->pdev->bus->number, 1);
4804 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4807 * If we cannot get the audio device autosuspend delay,
4808 * a fixed 4S interval will be used. Considering 3S is
4809 * the audio controller default autosuspend delay setting.
4810 * 4S used here is guaranteed to cover that.
4812 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4814 while (!pm_runtime_status_suspended(&(p->dev))) {
4815 if (!pm_runtime_suspend(&(p->dev)))
4818 if (expires < ktime_get_mono_fast_ns()) {
4819 dev_warn(adev->dev, "failed to suspend display audio\n");
4820 /* TODO: abort the succeeding gpu reset? */
4825 pm_runtime_disable(&(p->dev));
4830 static void amdgpu_device_recheck_guilty_jobs(
4831 struct amdgpu_device *adev, struct list_head *device_list_handle,
4832 struct amdgpu_reset_context *reset_context)
4836 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4837 struct amdgpu_ring *ring = adev->rings[i];
4839 struct drm_sched_job *s_job;
4841 if (!ring || !ring->sched.thread)
4844 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4845 struct drm_sched_job, list);
4849 /* clear job's guilty and depend the folowing step to decide the real one */
4850 drm_sched_reset_karma(s_job);
4851 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4853 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4854 if (ret == 0) { /* timeout */
4855 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4856 ring->sched.name, s_job->id);
4859 drm_sched_increase_karma(s_job);
4862 if (amdgpu_sriov_vf(adev)) {
4863 amdgpu_virt_fini_data_exchange(adev);
4864 r = amdgpu_device_reset_sriov(adev, false);
4866 adev->asic_reset_res = r;
4868 clear_bit(AMDGPU_SKIP_HW_RESET,
4869 &reset_context->flags);
4870 r = amdgpu_do_asic_reset(device_list_handle,
4872 if (r && r == -EAGAIN)
4877 * add reset counter so that the following
4878 * resubmitted job could flush vmid
4880 atomic_inc(&adev->gpu_reset_counter);
4884 /* got the hw fence, signal finished fence */
4885 atomic_dec(ring->sched.score);
4886 dma_fence_get(&s_job->s_fence->finished);
4887 dma_fence_signal(&s_job->s_fence->finished);
4888 dma_fence_put(&s_job->s_fence->finished);
4890 /* remove node from list and free the job */
4891 spin_lock(&ring->sched.job_list_lock);
4892 list_del_init(&s_job->list);
4893 spin_unlock(&ring->sched.job_list_lock);
4894 ring->sched.ops->free_job(s_job);
4899 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4901 * @adev: amdgpu_device pointer
4902 * @job: which job trigger hang
4904 * Attempt to reset the GPU if it has hung (all asics).
4905 * Attempt to do soft-reset or full-reset and reinitialize Asic
4906 * Returns 0 for success or an error on failure.
4909 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4910 struct amdgpu_job *job)
4912 struct list_head device_list, *device_list_handle = NULL;
4913 bool job_signaled = false;
4914 struct amdgpu_hive_info *hive = NULL;
4915 struct amdgpu_device *tmp_adev = NULL;
4917 bool need_emergency_restart = false;
4918 bool audio_suspended = false;
4919 int tmp_vram_lost_counter;
4920 struct amdgpu_reset_context reset_context;
4922 memset(&reset_context, 0, sizeof(reset_context));
4925 * Special case: RAS triggered and full reset isn't supported
4927 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4930 * Flush RAM to disk so that after reboot
4931 * the user can read log and see why the system rebooted.
4933 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4934 DRM_WARN("Emergency reboot.");
4937 emergency_restart();
4940 dev_info(adev->dev, "GPU %s begin!\n",
4941 need_emergency_restart ? "jobs stop":"reset");
4944 * Here we trylock to avoid chain of resets executing from
4945 * either trigger by jobs on different adevs in XGMI hive or jobs on
4946 * different schedulers for same device while this TO handler is running.
4947 * We always reset all schedulers for device and all devices for XGMI
4948 * hive so that should take care of them too.
4950 hive = amdgpu_get_xgmi_hive(adev);
4952 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4953 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4954 job ? job->base.id : -1, hive->hive_id);
4955 amdgpu_put_xgmi_hive(hive);
4957 drm_sched_increase_karma(&job->base);
4960 mutex_lock(&hive->hive_lock);
4963 reset_context.method = AMD_RESET_METHOD_NONE;
4964 reset_context.reset_req_dev = adev;
4965 reset_context.job = job;
4966 reset_context.hive = hive;
4967 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4970 * lock the device before we try to operate the linked list
4971 * if didn't get the device lock, don't touch the linked list since
4972 * others may iterating it.
4974 r = amdgpu_device_lock_hive_adev(adev, hive);
4976 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4977 job ? job->base.id : -1);
4979 /* even we skipped this reset, still need to set the job to guilty */
4981 drm_sched_increase_karma(&job->base);
4986 * Build list of devices to reset.
4987 * In case we are in XGMI hive mode, resort the device list
4988 * to put adev in the 1st position.
4990 INIT_LIST_HEAD(&device_list);
4991 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4992 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4993 list_add_tail(&tmp_adev->reset_list, &device_list);
4994 if (!list_is_first(&adev->reset_list, &device_list))
4995 list_rotate_to_front(&adev->reset_list, &device_list);
4996 device_list_handle = &device_list;
4998 list_add_tail(&adev->reset_list, &device_list);
4999 device_list_handle = &device_list;
5002 /* block all schedulers and reset given job's ring */
5003 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5005 * Try to put the audio codec into suspend state
5006 * before gpu reset started.
5008 * Due to the power domain of the graphics device
5009 * is shared with AZ power domain. Without this,
5010 * we may change the audio hardware from behind
5011 * the audio driver's back. That will trigger
5012 * some audio codec errors.
5014 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5015 audio_suspended = true;
5017 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5019 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5021 if (!amdgpu_sriov_vf(tmp_adev))
5022 amdgpu_amdkfd_pre_reset(tmp_adev);
5025 * Mark these ASICs to be reseted as untracked first
5026 * And add them back after reset completed
5028 amdgpu_unregister_gpu_instance(tmp_adev);
5030 amdgpu_fbdev_set_suspend(tmp_adev, 1);
5032 /* disable ras on ALL IPs */
5033 if (!need_emergency_restart &&
5034 amdgpu_device_ip_need_full_reset(tmp_adev))
5035 amdgpu_ras_suspend(tmp_adev);
5037 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5038 struct amdgpu_ring *ring = tmp_adev->rings[i];
5040 if (!ring || !ring->sched.thread)
5043 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5045 if (need_emergency_restart)
5046 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5048 atomic_inc(&tmp_adev->gpu_reset_counter);
5051 if (need_emergency_restart)
5052 goto skip_sched_resume;
5055 * Must check guilty signal here since after this point all old
5056 * HW fences are force signaled.
5058 * job->base holds a reference to parent fence
5060 if (job && job->base.s_fence->parent &&
5061 dma_fence_is_signaled(job->base.s_fence->parent)) {
5062 job_signaled = true;
5063 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5067 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5068 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5069 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5070 /*TODO Should we stop ?*/
5072 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5073 r, adev_to_drm(tmp_adev)->unique);
5074 tmp_adev->asic_reset_res = r;
5078 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5079 /* Actual ASIC resets if needed.*/
5080 /* TODO Implement XGMI hive reset logic for SRIOV */
5081 if (amdgpu_sriov_vf(adev)) {
5082 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5084 adev->asic_reset_res = r;
5086 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5087 if (r && r == -EAGAIN)
5093 /* Post ASIC reset for all devs .*/
5094 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5097 * Sometimes a later bad compute job can block a good gfx job as gfx
5098 * and compute ring share internal GC HW mutually. We add an additional
5099 * guilty jobs recheck step to find the real guilty job, it synchronously
5100 * submits and pends for the first job being signaled. If it gets timeout,
5101 * we identify it as a real guilty job.
5103 if (amdgpu_gpu_recovery == 2 &&
5104 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5105 amdgpu_device_recheck_guilty_jobs(
5106 tmp_adev, device_list_handle, &reset_context);
5108 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5109 struct amdgpu_ring *ring = tmp_adev->rings[i];
5111 if (!ring || !ring->sched.thread)
5114 /* No point to resubmit jobs if we didn't HW reset*/
5115 if (!tmp_adev->asic_reset_res && !job_signaled)
5116 drm_sched_resubmit_jobs(&ring->sched);
5118 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5121 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5122 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5125 tmp_adev->asic_reset_res = 0;
5128 /* bad news, how to tell it to userspace ? */
5129 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5130 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5132 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5133 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5134 DRM_WARN("smart shift update failed\n");
5139 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5140 /* unlock kfd: SRIOV would do it separately */
5141 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5142 amdgpu_amdkfd_post_reset(tmp_adev);
5144 /* kfd_post_reset will do nothing if kfd device is not initialized,
5145 * need to bring up kfd here if it's not be initialized before
5147 if (!adev->kfd.init_complete)
5148 amdgpu_amdkfd_device_init(adev);
5150 if (audio_suspended)
5151 amdgpu_device_resume_display_audio(tmp_adev);
5152 amdgpu_device_unlock_adev(tmp_adev);
5157 atomic_set(&hive->in_reset, 0);
5158 mutex_unlock(&hive->hive_lock);
5159 amdgpu_put_xgmi_hive(hive);
5162 if (r && r != -EAGAIN)
5163 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5168 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5170 * @adev: amdgpu_device pointer
5172 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5173 * and lanes) of the slot the device is in. Handles APUs and
5174 * virtualized environments where PCIE config space may not be available.
5176 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5178 struct pci_dev *pdev;
5179 enum pci_bus_speed speed_cap, platform_speed_cap;
5180 enum pcie_link_width platform_link_width;
5182 if (amdgpu_pcie_gen_cap)
5183 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5185 if (amdgpu_pcie_lane_cap)
5186 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5188 /* covers APUs as well */
5189 if (pci_is_root_bus(adev->pdev->bus)) {
5190 if (adev->pm.pcie_gen_mask == 0)
5191 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5192 if (adev->pm.pcie_mlw_mask == 0)
5193 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5197 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5200 pcie_bandwidth_available(adev->pdev, NULL,
5201 &platform_speed_cap, &platform_link_width);
5203 if (adev->pm.pcie_gen_mask == 0) {
5206 speed_cap = pcie_get_speed_cap(pdev);
5207 if (speed_cap == PCI_SPEED_UNKNOWN) {
5208 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5209 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5210 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5212 if (speed_cap == PCIE_SPEED_32_0GT)
5213 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5214 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5215 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5216 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5217 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5218 else if (speed_cap == PCIE_SPEED_16_0GT)
5219 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5220 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5221 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5222 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5223 else if (speed_cap == PCIE_SPEED_8_0GT)
5224 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5225 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5226 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5227 else if (speed_cap == PCIE_SPEED_5_0GT)
5228 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5229 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5231 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5234 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5235 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5236 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5238 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5239 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5240 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5241 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5242 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5243 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5244 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5245 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5246 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5247 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5248 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5249 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5250 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5251 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5252 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5253 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5254 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5255 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5257 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5261 if (adev->pm.pcie_mlw_mask == 0) {
5262 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5263 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5265 switch (platform_link_width) {
5267 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5268 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5269 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5270 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5271 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5272 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5273 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5276 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5277 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5278 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5279 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5280 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5281 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5284 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5288 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5291 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5292 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5293 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5294 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5297 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5298 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5299 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5302 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5303 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5306 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5315 int amdgpu_device_baco_enter(struct drm_device *dev)
5317 struct amdgpu_device *adev = drm_to_adev(dev);
5318 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5320 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5323 if (ras && adev->ras_enabled &&
5324 adev->nbio.funcs->enable_doorbell_interrupt)
5325 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5327 return amdgpu_dpm_baco_enter(adev);
5330 int amdgpu_device_baco_exit(struct drm_device *dev)
5332 struct amdgpu_device *adev = drm_to_adev(dev);
5333 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5336 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5339 ret = amdgpu_dpm_baco_exit(adev);
5343 if (ras && adev->ras_enabled &&
5344 adev->nbio.funcs->enable_doorbell_interrupt)
5345 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5347 if (amdgpu_passthrough(adev) &&
5348 adev->nbio.funcs->clear_doorbell_interrupt)
5349 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5354 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5358 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5359 struct amdgpu_ring *ring = adev->rings[i];
5361 if (!ring || !ring->sched.thread)
5364 cancel_delayed_work_sync(&ring->sched.work_tdr);
5369 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5370 * @pdev: PCI device struct
5371 * @state: PCI channel state
5373 * Description: Called when a PCI error is detected.
5375 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5377 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5379 struct drm_device *dev = pci_get_drvdata(pdev);
5380 struct amdgpu_device *adev = drm_to_adev(dev);
5383 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5385 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5386 DRM_WARN("No support for XGMI hive yet...");
5387 return PCI_ERS_RESULT_DISCONNECT;
5391 case pci_channel_io_normal:
5392 return PCI_ERS_RESULT_CAN_RECOVER;
5393 /* Fatal error, prepare for slot reset */
5394 case pci_channel_io_frozen:
5396 * Cancel and wait for all TDRs in progress if failing to
5397 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5399 * Locking adev->reset_sem will prevent any external access
5400 * to GPU during PCI error recovery
5402 while (!amdgpu_device_lock_adev(adev, NULL))
5403 amdgpu_cancel_all_tdr(adev);
5406 * Block any work scheduling as we do for regular GPU reset
5407 * for the duration of the recovery
5409 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5410 struct amdgpu_ring *ring = adev->rings[i];
5412 if (!ring || !ring->sched.thread)
5415 drm_sched_stop(&ring->sched, NULL);
5417 atomic_inc(&adev->gpu_reset_counter);
5418 return PCI_ERS_RESULT_NEED_RESET;
5419 case pci_channel_io_perm_failure:
5420 /* Permanent error, prepare for device removal */
5421 return PCI_ERS_RESULT_DISCONNECT;
5424 return PCI_ERS_RESULT_NEED_RESET;
5428 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5429 * @pdev: pointer to PCI device
5431 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5434 DRM_INFO("PCI error: mmio enabled callback!!\n");
5436 /* TODO - dump whatever for debugging purposes */
5438 /* This called only if amdgpu_pci_error_detected returns
5439 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5440 * works, no need to reset slot.
5443 return PCI_ERS_RESULT_RECOVERED;
5447 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5448 * @pdev: PCI device struct
5450 * Description: This routine is called by the pci error recovery
5451 * code after the PCI slot has been reset, just before we
5452 * should resume normal operations.
5454 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5456 struct drm_device *dev = pci_get_drvdata(pdev);
5457 struct amdgpu_device *adev = drm_to_adev(dev);
5459 struct amdgpu_reset_context reset_context;
5461 struct list_head device_list;
5463 DRM_INFO("PCI error: slot reset callback!!\n");
5465 memset(&reset_context, 0, sizeof(reset_context));
5467 INIT_LIST_HEAD(&device_list);
5468 list_add_tail(&adev->reset_list, &device_list);
5470 /* wait for asic to come out of reset */
5473 /* Restore PCI confspace */
5474 amdgpu_device_load_pci_state(pdev);
5476 /* confirm ASIC came out of reset */
5477 for (i = 0; i < adev->usec_timeout; i++) {
5478 memsize = amdgpu_asic_get_config_memsize(adev);
5480 if (memsize != 0xffffffff)
5484 if (memsize == 0xffffffff) {
5489 reset_context.method = AMD_RESET_METHOD_NONE;
5490 reset_context.reset_req_dev = adev;
5491 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5492 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5494 adev->no_hw_access = true;
5495 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5496 adev->no_hw_access = false;
5500 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5504 if (amdgpu_device_cache_pci_state(adev->pdev))
5505 pci_restore_state(adev->pdev);
5507 DRM_INFO("PCIe error recovery succeeded\n");
5509 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5510 amdgpu_device_unlock_adev(adev);
5513 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5517 * amdgpu_pci_resume() - resume normal ops after PCI reset
5518 * @pdev: pointer to PCI device
5520 * Called when the error recovery driver tells us that its
5521 * OK to resume normal operation.
5523 void amdgpu_pci_resume(struct pci_dev *pdev)
5525 struct drm_device *dev = pci_get_drvdata(pdev);
5526 struct amdgpu_device *adev = drm_to_adev(dev);
5530 DRM_INFO("PCI error: resume callback!!\n");
5532 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5533 struct amdgpu_ring *ring = adev->rings[i];
5535 if (!ring || !ring->sched.thread)
5539 drm_sched_resubmit_jobs(&ring->sched);
5540 drm_sched_start(&ring->sched, true);
5543 amdgpu_device_unlock_adev(adev);
5546 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5548 struct drm_device *dev = pci_get_drvdata(pdev);
5549 struct amdgpu_device *adev = drm_to_adev(dev);
5552 r = pci_save_state(pdev);
5554 kfree(adev->pci_state);
5556 adev->pci_state = pci_store_saved_state(pdev);
5558 if (!adev->pci_state) {
5559 DRM_ERROR("Failed to store PCI saved state");
5563 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5570 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5572 struct drm_device *dev = pci_get_drvdata(pdev);
5573 struct amdgpu_device *adev = drm_to_adev(dev);
5576 if (!adev->pci_state)
5579 r = pci_load_saved_state(pdev, adev->pci_state);
5582 pci_restore_state(pdev);
5584 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5591 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5592 struct amdgpu_ring *ring)
5594 #ifdef CONFIG_X86_64
5595 if (adev->flags & AMD_IS_APU)
5598 if (adev->gmc.xgmi.connected_to_cpu)
5601 if (ring && ring->funcs->emit_hdp_flush)
5602 amdgpu_ring_emit_hdp_flush(ring);
5604 amdgpu_asic_flush_hdp(adev, ring);
5607 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5608 struct amdgpu_ring *ring)
5610 #ifdef CONFIG_X86_64
5611 if (adev->flags & AMD_IS_APU)
5614 if (adev->gmc.xgmi.connected_to_cpu)
5617 amdgpu_asic_invalidate_hdp(adev, ring);