2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
55 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
78 #include <drm/drm_drv.h>
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
88 #define AMDGPU_RESUME_MS 2000
89 #define AMDGPU_MAX_RETRY_LIMIT 2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
92 const char *amdgpu_asic_name[] = {
134 * DOC: pcie_replay_count
136 * The amdgpu driver provides a sysfs API for reporting the total number
137 * of PCIe replays (NAKs)
138 * The file pcie_replay_count is used for this and returns the total
139 * number of replays as a sum of the NAKs generated and NAKs received
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 struct device_attribute *attr, char *buf)
145 struct drm_device *ddev = dev_get_drvdata(dev);
146 struct amdgpu_device *adev = drm_to_adev(ddev);
147 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
149 return sysfs_emit(buf, "%llu\n", cnt);
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 amdgpu_device_get_pcie_replay_count, NULL);
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
160 * The amdgpu driver provides a sysfs API for reporting the product name
162 * The file serial_number is used for this and returns the product name
163 * as returned from the FRU.
164 * NOTE: This is only available for certain server cards
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 struct device_attribute *attr, char *buf)
170 struct drm_device *ddev = dev_get_drvdata(dev);
171 struct amdgpu_device *adev = drm_to_adev(ddev);
173 return sysfs_emit(buf, "%s\n", adev->product_name);
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 amdgpu_device_get_product_name, NULL);
180 * DOC: product_number
182 * The amdgpu driver provides a sysfs API for reporting the part number
184 * The file serial_number is used for this and returns the part number
185 * as returned from the FRU.
186 * NOTE: This is only available for certain server cards
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 struct device_attribute *attr, char *buf)
192 struct drm_device *ddev = dev_get_drvdata(dev);
193 struct amdgpu_device *adev = drm_to_adev(ddev);
195 return sysfs_emit(buf, "%s\n", adev->product_number);
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 amdgpu_device_get_product_number, NULL);
204 * The amdgpu driver provides a sysfs API for reporting the serial number
206 * The file serial_number is used for this and returns the serial number
207 * as returned from the FRU.
208 * NOTE: This is only available for certain server cards
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 struct device_attribute *attr, char *buf)
214 struct drm_device *ddev = dev_get_drvdata(dev);
215 struct amdgpu_device *adev = drm_to_adev(ddev);
217 return sysfs_emit(buf, "%s\n", adev->serial);
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 amdgpu_device_get_serial_number, NULL);
224 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
226 * @dev: drm_device pointer
228 * Returns true if the device is a dGPU with ATPX power control,
229 * otherwise return false.
231 bool amdgpu_device_supports_px(struct drm_device *dev)
233 struct amdgpu_device *adev = drm_to_adev(dev);
235 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
241 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
243 * @dev: drm_device pointer
245 * Returns true if the device is a dGPU with ACPI power control,
246 * otherwise return false.
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
250 struct amdgpu_device *adev = drm_to_adev(dev);
253 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
259 * amdgpu_device_supports_baco - Does the device support BACO
261 * @dev: drm_device pointer
263 * Returns true if the device supporte BACO,
264 * otherwise return false.
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
268 struct amdgpu_device *adev = drm_to_adev(dev);
270 return amdgpu_asic_supports_baco(adev);
274 * amdgpu_device_supports_smart_shift - Is the device dGPU with
275 * smart shift support
277 * @dev: drm_device pointer
279 * Returns true if the device is a dGPU with Smart Shift support,
280 * otherwise returns false.
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
284 return (amdgpu_device_supports_boco(dev) &&
285 amdgpu_acpi_is_power_shift_control_supported());
289 * VRAM access helper functions
293 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
295 * @adev: amdgpu_device pointer
296 * @pos: offset of the buffer in vram
297 * @buf: virtual address of the buffer in system memory
298 * @size: read/write size, sizeof(@buf) must > @size
299 * @write: true - write to vram, otherwise - read from vram
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 void *buf, size_t size, bool write)
305 uint32_t hi = ~0, tmp = 0;
306 uint32_t *data = buf;
310 if (!drm_dev_enter(adev_to_drm(adev), &idx))
313 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
315 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 for (last = pos + size; pos < last; pos += 4) {
319 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
321 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
325 WREG32_NO_KIQ(mmMM_DATA, *data++);
327 *data++ = RREG32_NO_KIQ(mmMM_DATA);
330 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
335 * amdgpu_device_aper_access - access vram by vram aperature
337 * @adev: amdgpu_device pointer
338 * @pos: offset of the buffer in vram
339 * @buf: virtual address of the buffer in system memory
340 * @size: read/write size, sizeof(@buf) must > @size
341 * @write: true - write to vram, otherwise - read from vram
343 * The return value means how many bytes have been transferred.
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 void *buf, size_t size, bool write)
353 if (!adev->mman.aper_base_kaddr)
356 last = min(pos + size, adev->gmc.visible_vram_size);
358 addr = adev->mman.aper_base_kaddr + pos;
362 memcpy_toio(addr, buf, count);
364 amdgpu_device_flush_hdp(adev, NULL);
366 amdgpu_device_invalidate_hdp(adev, NULL);
368 memcpy_fromio(buf, addr, count);
380 * amdgpu_device_vram_access - read/write a buffer in vram
382 * @adev: amdgpu_device pointer
383 * @pos: offset of the buffer in vram
384 * @buf: virtual address of the buffer in system memory
385 * @size: read/write size, sizeof(@buf) must > @size
386 * @write: true - write to vram, otherwise - read from vram
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 void *buf, size_t size, bool write)
393 /* try to using vram apreature to access vram first */
394 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
397 /* using MM to access rest vram */
400 amdgpu_device_mm_access(adev, pos, buf, size, write);
405 * register access helper functions.
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
411 if (adev->no_hw_access)
414 #ifdef CONFIG_LOCKDEP
416 * This is a bit complicated to understand, so worth a comment. What we assert
417 * here is that the GPU reset is not running on another thread in parallel.
419 * For this we trylock the read side of the reset semaphore, if that succeeds
420 * we know that the reset is not running in paralell.
422 * If the trylock fails we assert that we are either already holding the read
423 * side of the lock or are the reset thread itself and hold the write side of
427 if (down_read_trylock(&adev->reset_domain->sem))
428 up_read(&adev->reset_domain->sem);
430 lockdep_assert_held(&adev->reset_domain->sem);
437 * amdgpu_device_rreg - read a memory mapped IO or indirect register
439 * @adev: amdgpu_device pointer
440 * @reg: dword aligned register offset
441 * @acc_flags: access flags which require special behavior
443 * Returns the 32 bit value from the offset specified.
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 uint32_t reg, uint32_t acc_flags)
450 if (amdgpu_device_skip_hw_access(adev))
453 if ((reg * 4) < adev->rmmio_size) {
454 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 amdgpu_sriov_runtime(adev) &&
456 down_read_trylock(&adev->reset_domain->sem)) {
457 ret = amdgpu_kiq_rreg(adev, reg);
458 up_read(&adev->reset_domain->sem);
460 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
463 ret = adev->pcie_rreg(adev, reg * 4);
466 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
472 * MMIO register read with bytes helper functions
473 * @offset:bytes offset from MMIO start
478 * amdgpu_mm_rreg8 - read a memory mapped IO register
480 * @adev: amdgpu_device pointer
481 * @offset: byte aligned register offset
483 * Returns the 8 bit value from the offset specified.
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
487 if (amdgpu_device_skip_hw_access(adev))
490 if (offset < adev->rmmio_size)
491 return (readb(adev->rmmio + offset));
496 * MMIO register write with bytes helper functions
497 * @offset:bytes offset from MMIO start
498 * @value: the value want to be written to the register
502 * amdgpu_mm_wreg8 - read a memory mapped IO register
504 * @adev: amdgpu_device pointer
505 * @offset: byte aligned register offset
506 * @value: 8 bit value to write
508 * Writes the value specified to the offset specified.
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
512 if (amdgpu_device_skip_hw_access(adev))
515 if (offset < adev->rmmio_size)
516 writeb(value, adev->rmmio + offset);
522 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
524 * @adev: amdgpu_device pointer
525 * @reg: dword aligned register offset
526 * @v: 32 bit value to write to the register
527 * @acc_flags: access flags which require special behavior
529 * Writes the value specified to the offset specified.
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 uint32_t reg, uint32_t v,
535 if (amdgpu_device_skip_hw_access(adev))
538 if ((reg * 4) < adev->rmmio_size) {
539 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 amdgpu_sriov_runtime(adev) &&
541 down_read_trylock(&adev->reset_domain->sem)) {
542 amdgpu_kiq_wreg(adev, reg, v);
543 up_read(&adev->reset_domain->sem);
545 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
548 adev->pcie_wreg(adev, reg * 4, v);
551 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
555 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
557 * @adev: amdgpu_device pointer
558 * @reg: mmio/rlc register
561 * this function is invoked only for the debugfs register access
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 uint32_t reg, uint32_t v)
566 if (amdgpu_device_skip_hw_access(adev))
569 if (amdgpu_sriov_fullaccess(adev) &&
570 adev->gfx.rlc.funcs &&
571 adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 } else if ((reg * 4) >= adev->rmmio_size) {
575 adev->pcie_wreg(adev, reg * 4, v);
577 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
582 * amdgpu_mm_rdoorbell - read a doorbell dword
584 * @adev: amdgpu_device pointer
585 * @index: doorbell index
587 * Returns the value in the doorbell aperture at the
588 * requested doorbell index (CIK).
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
592 if (amdgpu_device_skip_hw_access(adev))
595 if (index < adev->doorbell.num_doorbells) {
596 return readl(adev->doorbell.ptr + index);
598 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
604 * amdgpu_mm_wdoorbell - write a doorbell dword
606 * @adev: amdgpu_device pointer
607 * @index: doorbell index
610 * Writes @v to the doorbell aperture at the
611 * requested doorbell index (CIK).
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
615 if (amdgpu_device_skip_hw_access(adev))
618 if (index < adev->doorbell.num_doorbells) {
619 writel(v, adev->doorbell.ptr + index);
621 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
626 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
628 * @adev: amdgpu_device pointer
629 * @index: doorbell index
631 * Returns the value in the doorbell aperture at the
632 * requested doorbell index (VEGA10+).
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
636 if (amdgpu_device_skip_hw_access(adev))
639 if (index < adev->doorbell.num_doorbells) {
640 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
642 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
648 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
650 * @adev: amdgpu_device pointer
651 * @index: doorbell index
654 * Writes @v to the doorbell aperture at the
655 * requested doorbell index (VEGA10+).
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
659 if (amdgpu_device_skip_hw_access(adev))
662 if (index < adev->doorbell.num_doorbells) {
663 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
665 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
670 * amdgpu_device_indirect_rreg - read an indirect register
672 * @adev: amdgpu_device pointer
673 * @pcie_index: mmio register offset
674 * @pcie_data: mmio register offset
675 * @reg_addr: indirect register address to read from
677 * Returns the value of indirect register @reg_addr
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 u32 pcie_index, u32 pcie_data,
685 void __iomem *pcie_index_offset;
686 void __iomem *pcie_data_offset;
688 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
692 writel(reg_addr, pcie_index_offset);
693 readl(pcie_index_offset);
694 r = readl(pcie_data_offset);
695 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
701 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
703 * @adev: amdgpu_device pointer
704 * @pcie_index: mmio register offset
705 * @pcie_data: mmio register offset
706 * @reg_addr: indirect register address to read from
708 * Returns the value of indirect register @reg_addr
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 u32 pcie_index, u32 pcie_data,
716 void __iomem *pcie_index_offset;
717 void __iomem *pcie_data_offset;
719 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
723 /* read low 32 bits */
724 writel(reg_addr, pcie_index_offset);
725 readl(pcie_index_offset);
726 r = readl(pcie_data_offset);
727 /* read high 32 bits */
728 writel(reg_addr + 4, pcie_index_offset);
729 readl(pcie_index_offset);
730 r |= ((u64)readl(pcie_data_offset) << 32);
731 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
737 * amdgpu_device_indirect_wreg - write an indirect register address
739 * @adev: amdgpu_device pointer
740 * @pcie_index: mmio register offset
741 * @pcie_data: mmio register offset
742 * @reg_addr: indirect register offset
743 * @reg_data: indirect register data
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 u32 pcie_index, u32 pcie_data,
748 u32 reg_addr, u32 reg_data)
751 void __iomem *pcie_index_offset;
752 void __iomem *pcie_data_offset;
754 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
758 writel(reg_addr, pcie_index_offset);
759 readl(pcie_index_offset);
760 writel(reg_data, pcie_data_offset);
761 readl(pcie_data_offset);
762 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
766 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
768 * @adev: amdgpu_device pointer
769 * @pcie_index: mmio register offset
770 * @pcie_data: mmio register offset
771 * @reg_addr: indirect register offset
772 * @reg_data: indirect register data
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 u32 pcie_index, u32 pcie_data,
777 u32 reg_addr, u64 reg_data)
780 void __iomem *pcie_index_offset;
781 void __iomem *pcie_data_offset;
783 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
787 /* write low 32 bits */
788 writel(reg_addr, pcie_index_offset);
789 readl(pcie_index_offset);
790 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 readl(pcie_data_offset);
792 /* write high 32 bits */
793 writel(reg_addr + 4, pcie_index_offset);
794 readl(pcie_index_offset);
795 writel((u32)(reg_data >> 32), pcie_data_offset);
796 readl(pcie_data_offset);
797 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
801 * amdgpu_invalid_rreg - dummy reg read function
803 * @adev: amdgpu_device pointer
804 * @reg: offset of register
806 * Dummy register read function. Used for register blocks
807 * that certain asics don't have (all asics).
808 * Returns the value in the register.
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
812 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
818 * amdgpu_invalid_wreg - dummy reg write function
820 * @adev: amdgpu_device pointer
821 * @reg: offset of register
822 * @v: value to write to the register
824 * Dummy register read function. Used for register blocks
825 * that certain asics don't have (all asics).
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
829 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
835 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
837 * @adev: amdgpu_device pointer
838 * @reg: offset of register
840 * Dummy register read function. Used for register blocks
841 * that certain asics don't have (all asics).
842 * Returns the value in the register.
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
846 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
852 * amdgpu_invalid_wreg64 - dummy reg write function
854 * @adev: amdgpu_device pointer
855 * @reg: offset of register
856 * @v: value to write to the register
858 * Dummy register read function. Used for register blocks
859 * that certain asics don't have (all asics).
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
863 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
869 * amdgpu_block_invalid_rreg - dummy reg read function
871 * @adev: amdgpu_device pointer
872 * @block: offset of instance
873 * @reg: offset of register
875 * Dummy register read function. Used for register blocks
876 * that certain asics don't have (all asics).
877 * Returns the value in the register.
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 uint32_t block, uint32_t reg)
882 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
889 * amdgpu_block_invalid_wreg - dummy reg write function
891 * @adev: amdgpu_device pointer
892 * @block: offset of instance
893 * @reg: offset of register
894 * @v: value to write to the register
896 * Dummy register read function. Used for register blocks
897 * that certain asics don't have (all asics).
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
901 uint32_t reg, uint32_t v)
903 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
909 * amdgpu_device_asic_init - Wrapper for atom asic_init
911 * @adev: amdgpu_device pointer
913 * Does any asic specific work and then calls atom asic init.
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
917 amdgpu_asic_pre_asic_init(adev);
919 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920 return amdgpu_atomfirmware_asic_init(adev, true);
922 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
926 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
928 * @adev: amdgpu_device pointer
930 * Allocates a scratch page of VRAM for use by various things in the
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
935 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937 &adev->vram_scratch.robj,
938 &adev->vram_scratch.gpu_addr,
939 (void **)&adev->vram_scratch.ptr);
943 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
945 * @adev: amdgpu_device pointer
947 * Frees the VRAM scratch page.
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
951 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
955 * amdgpu_device_program_register_sequence - program an array of registers.
957 * @adev: amdgpu_device pointer
958 * @registers: pointer to the register array
959 * @array_size: size of the register array
961 * Programs an array or registers with and and or masks.
962 * This is a helper for setting golden registers.
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 const u32 *registers,
966 const u32 array_size)
968 u32 tmp, reg, and_mask, or_mask;
974 for (i = 0; i < array_size; i +=3) {
975 reg = registers[i + 0];
976 and_mask = registers[i + 1];
977 or_mask = registers[i + 2];
979 if (and_mask == 0xffffffff) {
984 if (adev->family >= AMDGPU_FAMILY_AI)
985 tmp |= (or_mask & and_mask);
994 * amdgpu_device_pci_config_reset - reset the GPU
996 * @adev: amdgpu_device pointer
998 * Resets the GPU using the pci config reset sequence.
999 * Only applicable to asics prior to vega10.
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1003 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1007 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1009 * @adev: amdgpu_device pointer
1011 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1015 return pci_reset_function(adev->pdev);
1019 * GPU doorbell aperture helpers function.
1022 * amdgpu_device_doorbell_init - Init doorbell driver information.
1024 * @adev: amdgpu_device pointer
1026 * Init doorbell driver information (CIK)
1027 * Returns 0 on success, error on failure.
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1032 /* No doorbell on SI hardware generation */
1033 if (adev->asic_type < CHIP_BONAIRE) {
1034 adev->doorbell.base = 0;
1035 adev->doorbell.size = 0;
1036 adev->doorbell.num_doorbells = 0;
1037 adev->doorbell.ptr = NULL;
1041 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1044 amdgpu_asic_init_doorbell_index(adev);
1046 /* doorbell bar mapping */
1047 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1050 if (adev->enable_mes) {
1051 adev->doorbell.num_doorbells =
1052 adev->doorbell.size / sizeof(u32);
1054 adev->doorbell.num_doorbells =
1055 min_t(u32, adev->doorbell.size / sizeof(u32),
1056 adev->doorbell_index.max_assignment+1);
1057 if (adev->doorbell.num_doorbells == 0)
1060 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061 * paging queue doorbell use the second page. The
1062 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063 * doorbells are in the first page. So with paging queue enabled,
1064 * the max num_doorbells should + 1 page (0x400 in dword)
1066 if (adev->asic_type >= CHIP_VEGA10)
1067 adev->doorbell.num_doorbells += 0x400;
1070 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071 adev->doorbell.num_doorbells *
1073 if (adev->doorbell.ptr == NULL)
1080 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1082 * @adev: amdgpu_device pointer
1084 * Tear down doorbell driver information (CIK)
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1088 iounmap(adev->doorbell.ptr);
1089 adev->doorbell.ptr = NULL;
1095 * amdgpu_device_wb_*()
1096 * Writeback is the method by which the GPU updates special pages in memory
1097 * with the status of certain GPU events (fences, ring pointers,etc.).
1101 * amdgpu_device_wb_fini - Disable Writeback and free memory
1103 * @adev: amdgpu_device pointer
1105 * Disables Writeback and frees the Writeback memory (all asics).
1106 * Used at driver shutdown.
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1110 if (adev->wb.wb_obj) {
1111 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1113 (void **)&adev->wb.wb);
1114 adev->wb.wb_obj = NULL;
1119 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1121 * @adev: amdgpu_device pointer
1123 * Initializes writeback and allocates writeback memory (all asics).
1124 * Used at driver startup.
1125 * Returns 0 on success or an -error on failure.
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1131 if (adev->wb.wb_obj == NULL) {
1132 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136 (void **)&adev->wb.wb);
1138 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1142 adev->wb.num_wb = AMDGPU_MAX_WB;
1143 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1145 /* clear wb memory */
1146 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1153 * amdgpu_device_wb_get - Allocate a wb entry
1155 * @adev: amdgpu_device pointer
1158 * Allocate a wb slot for use by the driver (all asics).
1159 * Returns 0 on success or -EINVAL on failure.
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1163 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1165 if (offset < adev->wb.num_wb) {
1166 __set_bit(offset, adev->wb.used);
1167 *wb = offset << 3; /* convert to dw offset */
1175 * amdgpu_device_wb_free - Free a wb entry
1177 * @adev: amdgpu_device pointer
1180 * Free a wb slot allocated for use by the driver (all asics)
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1185 if (wb < adev->wb.num_wb)
1186 __clear_bit(wb, adev->wb.used);
1190 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1192 * @adev: amdgpu_device pointer
1194 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195 * to fail, but if any of the BARs is not accessible after the size we abort
1196 * driver loading by returning -ENODEV.
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1200 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201 struct pci_bus *root;
1202 struct resource *res;
1208 if (amdgpu_sriov_vf(adev))
1211 /* skip if the bios has already enabled large BAR */
1212 if (adev->gmc.real_vram_size &&
1213 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1216 /* Check if the root BUS has 64bit memory resources */
1217 root = adev->pdev->bus;
1218 while (root->parent)
1219 root = root->parent;
1221 pci_bus_for_each_resource(root, res, i) {
1222 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223 res->start > 0x100000000ull)
1227 /* Trying to resize is pointless without a root hub window above 4GB */
1231 /* Limit the BAR size to what is available */
1232 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1235 /* Disable memory decoding while we change the BAR addresses and size */
1236 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237 pci_write_config_word(adev->pdev, PCI_COMMAND,
1238 cmd & ~PCI_COMMAND_MEMORY);
1240 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241 amdgpu_device_doorbell_fini(adev);
1242 if (adev->asic_type >= CHIP_BONAIRE)
1243 pci_release_resource(adev->pdev, 2);
1245 pci_release_resource(adev->pdev, 0);
1247 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1249 DRM_INFO("Not enough PCI address space for a large BAR.");
1250 else if (r && r != -ENOTSUPP)
1251 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1253 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1255 /* When the doorbell or fb BAR isn't available we have no chance of
1258 r = amdgpu_device_doorbell_init(adev);
1259 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1262 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1268 * GPU helpers function.
1271 * amdgpu_device_need_post - check if the hw need post or not
1273 * @adev: amdgpu_device pointer
1275 * Check if the asic has been initialized (all asics) at driver startup
1276 * or post is needed if hw reset is performed.
1277 * Returns true if need or false if not.
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1283 if (amdgpu_sriov_vf(adev))
1286 if (amdgpu_passthrough(adev)) {
1287 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288 * some old smc fw still need driver do vPost otherwise gpu hang, while
1289 * those smc fw version above 22.15 doesn't have this flaw, so we force
1290 * vpost executed for smc version below 22.15
1292 if (adev->asic_type == CHIP_FIJI) {
1295 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296 /* force vPost if error occured */
1300 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301 if (fw_ver < 0x00160e00)
1306 /* Don't post if we need to reset whole hive on init */
1307 if (adev->gmc.xgmi.pending_reset)
1310 if (adev->has_hw_reset) {
1311 adev->has_hw_reset = false;
1315 /* bios scratch used on CIK+ */
1316 if (adev->asic_type >= CHIP_BONAIRE)
1317 return amdgpu_atombios_scratch_need_asic_init(adev);
1319 /* check MEM_SIZE for older asics */
1320 reg = amdgpu_asic_get_config_memsize(adev);
1322 if ((reg != 0) && (reg != 0xffffffff))
1329 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1331 * @adev: amdgpu_device pointer
1333 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334 * be set for this device.
1336 * Returns true if it should be used or false if not.
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1340 switch (amdgpu_aspm) {
1350 return pcie_aspm_enabled(adev->pdev);
1353 /* if we get transitioned to only one device, take VGA back */
1355 * amdgpu_device_vga_set_decode - enable/disable vga decode
1357 * @pdev: PCI device pointer
1358 * @state: enable/disable vga decode
1360 * Enable/disable vga decode (all asics).
1361 * Returns VGA resource flags.
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1366 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367 amdgpu_asic_set_vga_state(adev, state);
1369 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1372 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1376 * amdgpu_device_check_block_size - validate the vm block size
1378 * @adev: amdgpu_device pointer
1380 * Validates the vm block size specified via module parameter.
1381 * The vm block size defines number of bits in page table versus page directory,
1382 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383 * page table and the remaining bits are in the page directory.
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1387 /* defines number of bits in page table versus page directory,
1388 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389 * page table and the remaining bits are in the page directory */
1390 if (amdgpu_vm_block_size == -1)
1393 if (amdgpu_vm_block_size < 9) {
1394 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395 amdgpu_vm_block_size);
1396 amdgpu_vm_block_size = -1;
1401 * amdgpu_device_check_vm_size - validate the vm size
1403 * @adev: amdgpu_device pointer
1405 * Validates the vm size in GB specified via module parameter.
1406 * The VM size is the size of the GPU virtual memory space in GB.
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1410 /* no need to check the default value */
1411 if (amdgpu_vm_size == -1)
1414 if (amdgpu_vm_size < 1) {
1415 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1417 amdgpu_vm_size = -1;
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1424 bool is_os_64 = (sizeof(void *) == 8);
1425 uint64_t total_memory;
1426 uint64_t dram_size_seven_GB = 0x1B8000000;
1427 uint64_t dram_size_three_GB = 0xB8000000;
1429 if (amdgpu_smu_memory_pool_size == 0)
1433 DRM_WARN("Not 64-bit OS, feature not supported\n");
1437 total_memory = (uint64_t)si.totalram * si.mem_unit;
1439 if ((amdgpu_smu_memory_pool_size == 1) ||
1440 (amdgpu_smu_memory_pool_size == 2)) {
1441 if (total_memory < dram_size_three_GB)
1443 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444 (amdgpu_smu_memory_pool_size == 8)) {
1445 if (total_memory < dram_size_seven_GB)
1448 DRM_WARN("Smu memory pool size not supported\n");
1451 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1456 DRM_WARN("No enough system memory\n");
1458 adev->pm.smu_prv_buffer_size = 0;
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1463 if (!(adev->flags & AMD_IS_APU) ||
1464 adev->asic_type < CHIP_RAVEN)
1467 switch (adev->asic_type) {
1469 if (adev->pdev->device == 0x15dd)
1470 adev->apu_flags |= AMD_APU_IS_RAVEN;
1471 if (adev->pdev->device == 0x15d8)
1472 adev->apu_flags |= AMD_APU_IS_PICASSO;
1475 if ((adev->pdev->device == 0x1636) ||
1476 (adev->pdev->device == 0x164c))
1477 adev->apu_flags |= AMD_APU_IS_RENOIR;
1479 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1482 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1484 case CHIP_YELLOW_CARP:
1486 case CHIP_CYAN_SKILLFISH:
1487 if ((adev->pdev->device == 0x13FE) ||
1488 (adev->pdev->device == 0x143F))
1489 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1499 * amdgpu_device_check_arguments - validate module params
1501 * @adev: amdgpu_device pointer
1503 * Validates certain module parameters and updates
1504 * the associated values used by the driver (all asics).
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1508 if (amdgpu_sched_jobs < 4) {
1509 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1511 amdgpu_sched_jobs = 4;
1512 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1515 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1518 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519 /* gart size must be greater or equal to 32M */
1520 dev_warn(adev->dev, "gart size (%d) too small\n",
1522 amdgpu_gart_size = -1;
1525 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526 /* gtt size must be greater or equal to 32M */
1527 dev_warn(adev->dev, "gtt size (%d) too small\n",
1529 amdgpu_gtt_size = -1;
1532 /* valid range is between 4 and 9 inclusive */
1533 if (amdgpu_vm_fragment_size != -1 &&
1534 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536 amdgpu_vm_fragment_size = -1;
1539 if (amdgpu_sched_hw_submission < 2) {
1540 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541 amdgpu_sched_hw_submission);
1542 amdgpu_sched_hw_submission = 2;
1543 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545 amdgpu_sched_hw_submission);
1546 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1549 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551 amdgpu_reset_method = -1;
1554 amdgpu_device_check_smu_prv_buffer_size(adev);
1556 amdgpu_device_check_vm_size(adev);
1558 amdgpu_device_check_block_size(adev);
1560 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1566 * amdgpu_switcheroo_set_state - set switcheroo state
1568 * @pdev: pci dev pointer
1569 * @state: vga_switcheroo state
1571 * Callback for the switcheroo driver. Suspends or resumes
1572 * the asics before or after it is powered up using ACPI methods.
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575 enum vga_switcheroo_state state)
1577 struct drm_device *dev = pci_get_drvdata(pdev);
1580 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1583 if (state == VGA_SWITCHEROO_ON) {
1584 pr_info("switched on\n");
1585 /* don't suspend or resume card normally */
1586 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1588 pci_set_power_state(pdev, PCI_D0);
1589 amdgpu_device_load_pci_state(pdev);
1590 r = pci_enable_device(pdev);
1592 DRM_WARN("pci_enable_device failed (%d)\n", r);
1593 amdgpu_device_resume(dev, true);
1595 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1597 pr_info("switched off\n");
1598 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599 amdgpu_device_suspend(dev, true);
1600 amdgpu_device_cache_pci_state(pdev);
1601 /* Shut down the device */
1602 pci_disable_device(pdev);
1603 pci_set_power_state(pdev, PCI_D3cold);
1604 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1609 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1611 * @pdev: pci dev pointer
1613 * Callback for the switcheroo driver. Check of the switcheroo
1614 * state can be changed.
1615 * Returns true if the state can be changed, false if not.
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1619 struct drm_device *dev = pci_get_drvdata(pdev);
1622 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623 * locking inversion with the driver load path. And the access here is
1624 * completely racy anyway. So don't bother with locking for now.
1626 return atomic_read(&dev->open_count) == 0;
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630 .set_gpu_state = amdgpu_switcheroo_set_state,
1632 .can_switch = amdgpu_switcheroo_can_switch,
1636 * amdgpu_device_ip_set_clockgating_state - set the CG state
1638 * @dev: amdgpu_device pointer
1639 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640 * @state: clockgating state (gate or ungate)
1642 * Sets the requested clockgating state for all instances of
1643 * the hardware IP specified.
1644 * Returns the error code from the last instance.
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647 enum amd_ip_block_type block_type,
1648 enum amd_clockgating_state state)
1650 struct amdgpu_device *adev = dev;
1653 for (i = 0; i < adev->num_ip_blocks; i++) {
1654 if (!adev->ip_blocks[i].status.valid)
1656 if (adev->ip_blocks[i].version->type != block_type)
1658 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1660 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661 (void *)adev, state);
1663 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664 adev->ip_blocks[i].version->funcs->name, r);
1670 * amdgpu_device_ip_set_powergating_state - set the PG state
1672 * @dev: amdgpu_device pointer
1673 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674 * @state: powergating state (gate or ungate)
1676 * Sets the requested powergating state for all instances of
1677 * the hardware IP specified.
1678 * Returns the error code from the last instance.
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681 enum amd_ip_block_type block_type,
1682 enum amd_powergating_state state)
1684 struct amdgpu_device *adev = dev;
1687 for (i = 0; i < adev->num_ip_blocks; i++) {
1688 if (!adev->ip_blocks[i].status.valid)
1690 if (adev->ip_blocks[i].version->type != block_type)
1692 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1694 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695 (void *)adev, state);
1697 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698 adev->ip_blocks[i].version->funcs->name, r);
1704 * amdgpu_device_ip_get_clockgating_state - get the CG state
1706 * @adev: amdgpu_device pointer
1707 * @flags: clockgating feature flags
1709 * Walks the list of IPs on the device and updates the clockgating
1710 * flags for each IP.
1711 * Updates @flags with the feature flags for each hardware IP where
1712 * clockgating is enabled.
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1719 for (i = 0; i < adev->num_ip_blocks; i++) {
1720 if (!adev->ip_blocks[i].status.valid)
1722 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1728 * amdgpu_device_ip_wait_for_idle - wait for idle
1730 * @adev: amdgpu_device pointer
1731 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1733 * Waits for the request hardware IP to be idle.
1734 * Returns 0 for success or a negative error code on failure.
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737 enum amd_ip_block_type block_type)
1741 for (i = 0; i < adev->num_ip_blocks; i++) {
1742 if (!adev->ip_blocks[i].status.valid)
1744 if (adev->ip_blocks[i].version->type == block_type) {
1745 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1756 * amdgpu_device_ip_is_idle - is the hardware IP idle
1758 * @adev: amdgpu_device pointer
1759 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1761 * Check if the hardware IP is idle or not.
1762 * Returns true if it the IP is idle, false if not.
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765 enum amd_ip_block_type block_type)
1769 for (i = 0; i < adev->num_ip_blocks; i++) {
1770 if (!adev->ip_blocks[i].status.valid)
1772 if (adev->ip_blocks[i].version->type == block_type)
1773 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1780 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1782 * @adev: amdgpu_device pointer
1783 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1785 * Returns a pointer to the hardware IP block structure
1786 * if it exists for the asic, otherwise NULL.
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790 enum amd_ip_block_type type)
1794 for (i = 0; i < adev->num_ip_blocks; i++)
1795 if (adev->ip_blocks[i].version->type == type)
1796 return &adev->ip_blocks[i];
1802 * amdgpu_device_ip_block_version_cmp
1804 * @adev: amdgpu_device pointer
1805 * @type: enum amd_ip_block_type
1806 * @major: major version
1807 * @minor: minor version
1809 * return 0 if equal or greater
1810 * return 1 if smaller or the ip_block doesn't exist
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813 enum amd_ip_block_type type,
1814 u32 major, u32 minor)
1816 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1818 if (ip_block && ((ip_block->version->major > major) ||
1819 ((ip_block->version->major == major) &&
1820 (ip_block->version->minor >= minor))))
1827 * amdgpu_device_ip_block_add
1829 * @adev: amdgpu_device pointer
1830 * @ip_block_version: pointer to the IP to add
1832 * Adds the IP block driver information to the collection of IPs
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836 const struct amdgpu_ip_block_version *ip_block_version)
1838 if (!ip_block_version)
1841 switch (ip_block_version->type) {
1842 case AMD_IP_BLOCK_TYPE_VCN:
1843 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1846 case AMD_IP_BLOCK_TYPE_JPEG:
1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1854 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855 ip_block_version->funcs->name);
1857 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1863 * amdgpu_device_enable_virtual_display - enable virtual display feature
1865 * @adev: amdgpu_device pointer
1867 * Enabled the virtual display feature if the user has enabled it via
1868 * the module parameter virtual_display. This feature provides a virtual
1869 * display hardware on headless boards or in virtualized environments.
1870 * This function parses and validates the configuration string specified by
1871 * the user and configues the virtual display configuration (number of
1872 * virtual connectors, crtcs, etc.) specified.
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1876 adev->enable_virtual_display = false;
1878 if (amdgpu_virtual_display) {
1879 const char *pci_address_name = pci_name(adev->pdev);
1880 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1882 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883 pciaddstr_tmp = pciaddstr;
1884 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885 pciaddname = strsep(&pciaddname_tmp, ",");
1886 if (!strcmp("all", pciaddname)
1887 || !strcmp(pci_address_name, pciaddname)) {
1891 adev->enable_virtual_display = true;
1894 res = kstrtol(pciaddname_tmp, 10,
1902 adev->mode_info.num_crtc = num_crtc;
1904 adev->mode_info.num_crtc = 1;
1910 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911 amdgpu_virtual_display, pci_address_name,
1912 adev->enable_virtual_display, adev->mode_info.num_crtc);
1919 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1921 * @adev: amdgpu_device pointer
1923 * Parses the asic configuration parameters specified in the gpu info
1924 * firmware and makes them availale to the driver for use in configuring
1926 * Returns 0 on success, -EINVAL on failure.
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1930 const char *chip_name;
1933 const struct gpu_info_firmware_header_v1_0 *hdr;
1935 adev->firmware.gpu_info_fw = NULL;
1937 if (adev->mman.discovery_bin) {
1939 * FIXME: The bounding box is still needed by Navi12, so
1940 * temporarily read it from gpu_info firmware. Should be dropped
1941 * when DAL no longer needs it.
1943 if (adev->asic_type != CHIP_NAVI12)
1947 switch (adev->asic_type) {
1951 chip_name = "vega10";
1954 chip_name = "vega12";
1957 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958 chip_name = "raven2";
1959 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960 chip_name = "picasso";
1962 chip_name = "raven";
1965 chip_name = "arcturus";
1968 chip_name = "navi12";
1972 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1976 "Failed to load gpu_info firmware \"%s\"\n",
1980 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1983 "Failed to validate gpu_info firmware \"%s\"\n",
1988 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1991 switch (hdr->version_major) {
1994 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1999 * Should be droped when DAL no longer needs it.
2001 if (adev->asic_type == CHIP_NAVI12)
2002 goto parse_soc_bounding_box;
2004 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008 adev->gfx.config.max_texture_channel_caches =
2009 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014 adev->gfx.config.double_offchip_lds_buf =
2015 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017 adev->gfx.cu_info.max_waves_per_simd =
2018 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022 if (hdr->version_minor >= 1) {
2023 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026 adev->gfx.config.num_sc_per_sh =
2027 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028 adev->gfx.config.num_packer_per_sc =
2029 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2032 parse_soc_bounding_box:
2034 * soc bounding box info is not integrated in disocovery table,
2035 * we always need to parse it from gpu info firmware if needed.
2037 if (hdr->version_minor == 2) {
2038 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2047 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2056 * amdgpu_device_ip_early_init - run early init for hardware IPs
2058 * @adev: amdgpu_device pointer
2060 * Early initialization pass for hardware IPs. The hardware IPs that make
2061 * up each asic are discovered each IP's early_init callback is run. This
2062 * is the first stage in initializing the asic.
2063 * Returns 0 on success, negative error code on failure.
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2067 struct drm_device *dev = adev_to_drm(adev);
2068 struct pci_dev *parent;
2071 amdgpu_device_enable_virtual_display(adev);
2073 if (amdgpu_sriov_vf(adev)) {
2074 r = amdgpu_virt_request_full_gpu(adev, true);
2079 switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2086 adev->family = AMDGPU_FAMILY_SI;
2087 r = si_set_ip_blocks(adev);
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2098 if (adev->flags & AMD_IS_APU)
2099 adev->family = AMDGPU_FAMILY_KV;
2101 adev->family = AMDGPU_FAMILY_CI;
2103 r = cik_set_ip_blocks(adev);
2111 case CHIP_POLARIS10:
2112 case CHIP_POLARIS11:
2113 case CHIP_POLARIS12:
2117 if (adev->flags & AMD_IS_APU)
2118 adev->family = AMDGPU_FAMILY_CZ;
2120 adev->family = AMDGPU_FAMILY_VI;
2122 r = vi_set_ip_blocks(adev);
2127 r = amdgpu_discovery_set_ip_blocks(adev);
2133 if (amdgpu_has_atpx() &&
2134 (amdgpu_is_atpx_hybrid() ||
2135 amdgpu_has_atpx_dgpu_power_cntl()) &&
2136 ((adev->flags & AMD_IS_APU) == 0) &&
2137 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138 adev->flags |= AMD_IS_PX;
2140 if (!(adev->flags & AMD_IS_APU)) {
2141 parent = pci_upstream_bridge(adev->pdev);
2142 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2145 amdgpu_amdkfd_device_probe(adev);
2147 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2153 for (i = 0; i < adev->num_ip_blocks; i++) {
2154 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155 DRM_ERROR("disabled ip block: %d <%s>\n",
2156 i, adev->ip_blocks[i].version->funcs->name);
2157 adev->ip_blocks[i].status.valid = false;
2159 if (adev->ip_blocks[i].version->funcs->early_init) {
2160 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2162 adev->ip_blocks[i].status.valid = false;
2164 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165 adev->ip_blocks[i].version->funcs->name, r);
2168 adev->ip_blocks[i].status.valid = true;
2171 adev->ip_blocks[i].status.valid = true;
2174 /* get the vbios after the asic_funcs are set up */
2175 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176 r = amdgpu_device_parse_gpu_info_fw(adev);
2181 if (!amdgpu_get_bios(adev))
2184 r = amdgpu_atombios_init(adev);
2186 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2191 /*get pf2vf msg info at it's earliest time*/
2192 if (amdgpu_sriov_vf(adev))
2193 amdgpu_virt_init_data_exchange(adev);
2198 adev->cg_flags &= amdgpu_cg_mask;
2199 adev->pg_flags &= amdgpu_pg_mask;
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2208 for (i = 0; i < adev->num_ip_blocks; i++) {
2209 if (!adev->ip_blocks[i].status.sw)
2211 if (adev->ip_blocks[i].status.hw)
2213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2218 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219 adev->ip_blocks[i].version->funcs->name, r);
2222 adev->ip_blocks[i].status.hw = true;
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2233 for (i = 0; i < adev->num_ip_blocks; i++) {
2234 if (!adev->ip_blocks[i].status.sw)
2236 if (adev->ip_blocks[i].status.hw)
2238 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2240 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241 adev->ip_blocks[i].version->funcs->name, r);
2244 adev->ip_blocks[i].status.hw = true;
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2254 uint32_t smu_version;
2256 if (adev->asic_type >= CHIP_VEGA10) {
2257 for (i = 0; i < adev->num_ip_blocks; i++) {
2258 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2261 if (!adev->ip_blocks[i].status.sw)
2264 /* no need to do the fw loading again if already done*/
2265 if (adev->ip_blocks[i].status.hw == true)
2268 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269 r = adev->ip_blocks[i].version->funcs->resume(adev);
2271 DRM_ERROR("resume of IP block <%s> failed %d\n",
2272 adev->ip_blocks[i].version->funcs->name, r);
2276 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2278 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279 adev->ip_blocks[i].version->funcs->name, r);
2284 adev->ip_blocks[i].status.hw = true;
2289 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2300 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301 struct amdgpu_ring *ring = adev->rings[i];
2303 /* No need to setup the GPU scheduler for rings that don't need it */
2304 if (!ring || ring->no_scheduler)
2307 switch (ring->funcs->type) {
2308 case AMDGPU_RING_TYPE_GFX:
2309 timeout = adev->gfx_timeout;
2311 case AMDGPU_RING_TYPE_COMPUTE:
2312 timeout = adev->compute_timeout;
2314 case AMDGPU_RING_TYPE_SDMA:
2315 timeout = adev->sdma_timeout;
2318 timeout = adev->video_timeout;
2322 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323 ring->num_hw_submission, amdgpu_job_hang_limit,
2324 timeout, adev->reset_domain->wq,
2325 ring->sched_score, ring->name,
2328 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2339 * amdgpu_device_ip_init - run init for hardware IPs
2341 * @adev: amdgpu_device pointer
2343 * Main initialization pass for hardware IPs. The list of all the hardware
2344 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345 * are run. sw_init initializes the software state associated with each IP
2346 * and hw_init initializes the hardware associated with each IP.
2347 * Returns 0 on success, negative error code on failure.
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2353 r = amdgpu_ras_init(adev);
2357 for (i = 0; i < adev->num_ip_blocks; i++) {
2358 if (!adev->ip_blocks[i].status.valid)
2360 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2362 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363 adev->ip_blocks[i].version->funcs->name, r);
2366 adev->ip_blocks[i].status.sw = true;
2368 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369 /* need to do common hw init early so everything is set up for gmc */
2370 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2372 DRM_ERROR("hw_init %d failed %d\n", i, r);
2375 adev->ip_blocks[i].status.hw = true;
2376 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377 /* need to do gmc hw init early so we can allocate gpu mem */
2378 /* Try to reserve bad pages early */
2379 if (amdgpu_sriov_vf(adev))
2380 amdgpu_virt_exchange_data(adev);
2382 r = amdgpu_device_vram_scratch_init(adev);
2384 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2387 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2389 DRM_ERROR("hw_init %d failed %d\n", i, r);
2392 r = amdgpu_device_wb_init(adev);
2394 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2397 adev->ip_blocks[i].status.hw = true;
2399 /* right after GMC hw init, we create CSA */
2401 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402 AMDGPU_GEM_DOMAIN_VRAM,
2405 DRM_ERROR("allocate CSA failed %d\n", r);
2412 if (amdgpu_sriov_vf(adev))
2413 amdgpu_virt_init_data_exchange(adev);
2415 r = amdgpu_ib_pool_init(adev);
2417 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2422 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2426 r = amdgpu_device_ip_hw_init_phase1(adev);
2430 r = amdgpu_device_fw_loading(adev);
2434 r = amdgpu_device_ip_hw_init_phase2(adev);
2439 * retired pages will be loaded from eeprom and reserved here,
2440 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2441 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442 * for I2C communication which only true at this point.
2444 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445 * failure from bad gpu situation and stop amdgpu init process
2446 * accordingly. For other failed cases, it will still release all
2447 * the resource and print error message, rather than returning one
2448 * negative value to upper level.
2450 * Note: theoretically, this should be called before all vram allocations
2451 * to protect retired page from abusing
2453 r = amdgpu_ras_recovery_init(adev);
2458 * In case of XGMI grab extra reference for reset domain for this device
2460 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461 if (amdgpu_xgmi_add_device(adev) == 0) {
2462 if (!amdgpu_sriov_vf(adev)) {
2463 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2465 if (!hive->reset_domain ||
2466 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2468 amdgpu_put_xgmi_hive(hive);
2472 /* Drop the early temporary reset domain we created for device */
2473 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474 adev->reset_domain = hive->reset_domain;
2475 amdgpu_put_xgmi_hive(hive);
2480 r = amdgpu_device_init_schedulers(adev);
2484 /* Don't init kfd if whole hive need to be reset during init */
2485 if (!adev->gmc.xgmi.pending_reset)
2486 amdgpu_amdkfd_device_init(adev);
2488 amdgpu_fru_get_product_info(adev);
2491 if (amdgpu_sriov_vf(adev))
2492 amdgpu_virt_release_full_gpu(adev, true);
2498 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2500 * @adev: amdgpu_device pointer
2502 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2503 * this function before a GPU reset. If the value is retained after a
2504 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2508 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2512 * amdgpu_device_check_vram_lost - check if vram is valid
2514 * @adev: amdgpu_device pointer
2516 * Checks the reset magic value written to the gart pointer in VRAM.
2517 * The driver calls this after a GPU reset to see if the contents of
2518 * VRAM is lost or now.
2519 * returns true if vram is lost, false if not.
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2523 if (memcmp(adev->gart.ptr, adev->reset_magic,
2524 AMDGPU_RESET_MAGIC_NUM))
2527 if (!amdgpu_in_reset(adev))
2531 * For all ASICs with baco/mode1 reset, the VRAM is
2532 * always assumed to be lost.
2534 switch (amdgpu_asic_reset_method(adev)) {
2535 case AMD_RESET_METHOD_BACO:
2536 case AMD_RESET_METHOD_MODE1:
2544 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2546 * @adev: amdgpu_device pointer
2547 * @state: clockgating state (gate or ungate)
2549 * The list of all the hardware IPs that make up the asic is walked and the
2550 * set_clockgating_state callbacks are run.
2551 * Late initialization pass enabling clockgating for hardware IPs.
2552 * Fini or suspend, pass disabling clockgating for hardware IPs.
2553 * Returns 0 on success, negative error code on failure.
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557 enum amd_clockgating_state state)
2561 if (amdgpu_emu_mode == 1)
2564 for (j = 0; j < adev->num_ip_blocks; j++) {
2565 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566 if (!adev->ip_blocks[i].status.late_initialized)
2568 /* skip CG for GFX on S0ix */
2569 if (adev->in_s0ix &&
2570 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2572 /* skip CG for VCE/UVD, it's handled specially */
2573 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578 /* enable clockgating to save power */
2579 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2582 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583 adev->ip_blocks[i].version->funcs->name, r);
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593 enum amd_powergating_state state)
2597 if (amdgpu_emu_mode == 1)
2600 for (j = 0; j < adev->num_ip_blocks; j++) {
2601 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602 if (!adev->ip_blocks[i].status.late_initialized)
2604 /* skip PG for GFX on S0ix */
2605 if (adev->in_s0ix &&
2606 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2608 /* skip CG for VCE/UVD, it's handled specially */
2609 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614 /* enable powergating to save power */
2615 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2618 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619 adev->ip_blocks[i].version->funcs->name, r);
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2629 struct amdgpu_gpu_instance *gpu_ins;
2630 struct amdgpu_device *adev;
2633 mutex_lock(&mgpu_info.mutex);
2636 * MGPU fan boost feature should be enabled
2637 * only when there are two or more dGPUs in
2640 if (mgpu_info.num_dgpu < 2)
2643 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645 adev = gpu_ins->adev;
2646 if (!(adev->flags & AMD_IS_APU) &&
2647 !gpu_ins->mgpu_fan_enabled) {
2648 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2652 gpu_ins->mgpu_fan_enabled = 1;
2657 mutex_unlock(&mgpu_info.mutex);
2663 * amdgpu_device_ip_late_init - run late init for hardware IPs
2665 * @adev: amdgpu_device pointer
2667 * Late initialization pass for hardware IPs. The list of all the hardware
2668 * IPs that make up the asic is walked and the late_init callbacks are run.
2669 * late_init covers any special initialization that an IP requires
2670 * after all of the have been initialized or something that needs to happen
2671 * late in the init process.
2672 * Returns 0 on success, negative error code on failure.
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2676 struct amdgpu_gpu_instance *gpu_instance;
2679 for (i = 0; i < adev->num_ip_blocks; i++) {
2680 if (!adev->ip_blocks[i].status.hw)
2682 if (adev->ip_blocks[i].version->funcs->late_init) {
2683 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2685 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686 adev->ip_blocks[i].version->funcs->name, r);
2690 adev->ip_blocks[i].status.late_initialized = true;
2693 r = amdgpu_ras_late_init(adev);
2695 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2699 amdgpu_ras_set_error_query_ready(adev, true);
2701 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2704 amdgpu_device_fill_reset_magic(adev);
2706 r = amdgpu_device_enable_mgpu_fan_boost();
2708 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2710 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712 adev->asic_type == CHIP_ALDEBARAN ))
2713 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2715 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716 mutex_lock(&mgpu_info.mutex);
2719 * Reset device p-state to low as this was booted with high.
2721 * This should be performed only after all devices from the same
2722 * hive get initialized.
2724 * However, it's unknown how many device in the hive in advance.
2725 * As this is counted one by one during devices initializations.
2727 * So, we wait for all XGMI interlinked devices initialized.
2728 * This may bring some delays as those devices may come from
2729 * different hives. But that should be OK.
2731 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732 for (i = 0; i < mgpu_info.num_gpu; i++) {
2733 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734 if (gpu_instance->adev->flags & AMD_IS_APU)
2737 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738 AMDGPU_XGMI_PSTATE_MIN);
2740 DRM_ERROR("pstate setting failed (%d).\n", r);
2746 mutex_unlock(&mgpu_info.mutex);
2753 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2755 * @adev: amdgpu_device pointer
2757 * For ASICs need to disable SMC first
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2763 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2766 for (i = 0; i < adev->num_ip_blocks; i++) {
2767 if (!adev->ip_blocks[i].status.hw)
2769 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771 /* XXX handle errors */
2773 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774 adev->ip_blocks[i].version->funcs->name, r);
2776 adev->ip_blocks[i].status.hw = false;
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2786 for (i = 0; i < adev->num_ip_blocks; i++) {
2787 if (!adev->ip_blocks[i].version->funcs->early_fini)
2790 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2792 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793 adev->ip_blocks[i].version->funcs->name, r);
2797 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2800 amdgpu_amdkfd_suspend(adev, false);
2802 /* Workaroud for ASICs need to disable SMC first */
2803 amdgpu_device_smu_fini_early(adev);
2805 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806 if (!adev->ip_blocks[i].status.hw)
2809 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810 /* XXX handle errors */
2812 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813 adev->ip_blocks[i].version->funcs->name, r);
2816 adev->ip_blocks[i].status.hw = false;
2819 if (amdgpu_sriov_vf(adev)) {
2820 if (amdgpu_virt_release_full_gpu(adev, false))
2821 DRM_ERROR("failed to release exclusive mode on fini\n");
2828 * amdgpu_device_ip_fini - run fini for hardware IPs
2830 * @adev: amdgpu_device pointer
2832 * Main teardown pass for hardware IPs. The list of all the hardware
2833 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834 * are run. hw_fini tears down the hardware associated with each IP
2835 * and sw_fini tears down any software state associated with each IP.
2836 * Returns 0 on success, negative error code on failure.
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2842 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843 amdgpu_virt_release_ras_err_handler_data(adev);
2845 if (adev->gmc.xgmi.num_physical_nodes > 1)
2846 amdgpu_xgmi_remove_device(adev);
2848 amdgpu_amdkfd_device_fini_sw(adev);
2850 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851 if (!adev->ip_blocks[i].status.sw)
2854 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855 amdgpu_ucode_free_bo(adev);
2856 amdgpu_free_static_csa(&adev->virt.csa_obj);
2857 amdgpu_device_wb_fini(adev);
2858 amdgpu_device_vram_scratch_fini(adev);
2859 amdgpu_ib_pool_fini(adev);
2862 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863 /* XXX handle errors */
2865 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866 adev->ip_blocks[i].version->funcs->name, r);
2868 adev->ip_blocks[i].status.sw = false;
2869 adev->ip_blocks[i].status.valid = false;
2872 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873 if (!adev->ip_blocks[i].status.late_initialized)
2875 if (adev->ip_blocks[i].version->funcs->late_fini)
2876 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877 adev->ip_blocks[i].status.late_initialized = false;
2880 amdgpu_ras_fini(adev);
2886 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2888 * @work: work_struct.
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2892 struct amdgpu_device *adev =
2893 container_of(work, struct amdgpu_device, delayed_init_work.work);
2896 r = amdgpu_ib_ring_tests(adev);
2898 DRM_ERROR("ib ring test failed (%d).\n", r);
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2903 struct amdgpu_device *adev =
2904 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2906 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2909 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910 adev->gfx.gfx_off_state = true;
2914 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2916 * @adev: amdgpu_device pointer
2918 * Main suspend function for hardware IPs. The list of all the hardware
2919 * IPs that make up the asic is walked, clockgating is disabled and the
2920 * suspend callbacks are run. suspend puts the hardware and software state
2921 * in each IP into a state suitable for suspend.
2922 * Returns 0 on success, negative error code on failure.
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2928 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2932 * Per PMFW team's suggestion, driver needs to handle gfxoff
2933 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2934 * scenario. Add the missing df cstate disablement here.
2936 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2937 dev_warn(adev->dev, "Failed to disallow df cstate");
2939 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2940 if (!adev->ip_blocks[i].status.valid)
2943 /* displays are handled separately */
2944 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2947 /* XXX handle errors */
2948 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2949 /* XXX handle errors */
2951 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2952 adev->ip_blocks[i].version->funcs->name, r);
2956 adev->ip_blocks[i].status.hw = false;
2963 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2965 * @adev: amdgpu_device pointer
2967 * Main suspend function for hardware IPs. The list of all the hardware
2968 * IPs that make up the asic is walked, clockgating is disabled and the
2969 * suspend callbacks are run. suspend puts the hardware and software state
2970 * in each IP into a state suitable for suspend.
2971 * Returns 0 on success, negative error code on failure.
2973 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2978 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2980 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2981 if (!adev->ip_blocks[i].status.valid)
2983 /* displays are handled in phase1 */
2984 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2986 /* PSP lost connection when err_event_athub occurs */
2987 if (amdgpu_ras_intr_triggered() &&
2988 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2989 adev->ip_blocks[i].status.hw = false;
2993 /* skip unnecessary suspend if we do not initialize them yet */
2994 if (adev->gmc.xgmi.pending_reset &&
2995 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2996 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2997 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2998 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2999 adev->ip_blocks[i].status.hw = false;
3003 /* skip suspend of gfx and psp for S0ix
3004 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3005 * like at runtime. PSP is also part of the always on hardware
3006 * so no need to suspend it.
3008 if (adev->in_s0ix &&
3009 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3010 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3013 /* XXX handle errors */
3014 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3015 /* XXX handle errors */
3017 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3018 adev->ip_blocks[i].version->funcs->name, r);
3020 adev->ip_blocks[i].status.hw = false;
3021 /* handle putting the SMC in the appropriate state */
3022 if(!amdgpu_sriov_vf(adev)){
3023 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3024 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3026 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3027 adev->mp1_state, r);
3038 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3040 * @adev: amdgpu_device pointer
3042 * Main suspend function for hardware IPs. The list of all the hardware
3043 * IPs that make up the asic is walked, clockgating is disabled and the
3044 * suspend callbacks are run. suspend puts the hardware and software state
3045 * in each IP into a state suitable for suspend.
3046 * Returns 0 on success, negative error code on failure.
3048 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3052 if (amdgpu_sriov_vf(adev)) {
3053 amdgpu_virt_fini_data_exchange(adev);
3054 amdgpu_virt_request_full_gpu(adev, false);
3057 r = amdgpu_device_ip_suspend_phase1(adev);
3060 r = amdgpu_device_ip_suspend_phase2(adev);
3062 if (amdgpu_sriov_vf(adev))
3063 amdgpu_virt_release_full_gpu(adev, false);
3068 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3072 static enum amd_ip_block_type ip_order[] = {
3073 AMD_IP_BLOCK_TYPE_COMMON,
3074 AMD_IP_BLOCK_TYPE_GMC,
3075 AMD_IP_BLOCK_TYPE_PSP,
3076 AMD_IP_BLOCK_TYPE_IH,
3079 for (i = 0; i < adev->num_ip_blocks; i++) {
3081 struct amdgpu_ip_block *block;
3083 block = &adev->ip_blocks[i];
3084 block->status.hw = false;
3086 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3088 if (block->version->type != ip_order[j] ||
3089 !block->status.valid)
3092 r = block->version->funcs->hw_init(adev);
3093 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3096 block->status.hw = true;
3103 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3107 static enum amd_ip_block_type ip_order[] = {
3108 AMD_IP_BLOCK_TYPE_SMC,
3109 AMD_IP_BLOCK_TYPE_DCE,
3110 AMD_IP_BLOCK_TYPE_GFX,
3111 AMD_IP_BLOCK_TYPE_SDMA,
3112 AMD_IP_BLOCK_TYPE_UVD,
3113 AMD_IP_BLOCK_TYPE_VCE,
3114 AMD_IP_BLOCK_TYPE_VCN
3117 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3119 struct amdgpu_ip_block *block;
3121 for (j = 0; j < adev->num_ip_blocks; j++) {
3122 block = &adev->ip_blocks[j];
3124 if (block->version->type != ip_order[i] ||
3125 !block->status.valid ||
3129 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3130 r = block->version->funcs->resume(adev);
3132 r = block->version->funcs->hw_init(adev);
3134 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3137 block->status.hw = true;
3145 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3147 * @adev: amdgpu_device pointer
3149 * First resume function for hardware IPs. The list of all the hardware
3150 * IPs that make up the asic is walked and the resume callbacks are run for
3151 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3152 * after a suspend and updates the software state as necessary. This
3153 * function is also used for restoring the GPU after a GPU reset.
3154 * Returns 0 on success, negative error code on failure.
3156 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3160 for (i = 0; i < adev->num_ip_blocks; i++) {
3161 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3163 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3164 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3165 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3166 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3168 r = adev->ip_blocks[i].version->funcs->resume(adev);
3170 DRM_ERROR("resume of IP block <%s> failed %d\n",
3171 adev->ip_blocks[i].version->funcs->name, r);
3174 adev->ip_blocks[i].status.hw = true;
3182 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3184 * @adev: amdgpu_device pointer
3186 * First resume function for hardware IPs. The list of all the hardware
3187 * IPs that make up the asic is walked and the resume callbacks are run for
3188 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3189 * functional state after a suspend and updates the software state as
3190 * necessary. This function is also used for restoring the GPU after a GPU
3192 * Returns 0 on success, negative error code on failure.
3194 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3198 for (i = 0; i < adev->num_ip_blocks; i++) {
3199 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3201 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3202 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3203 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3204 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3206 r = adev->ip_blocks[i].version->funcs->resume(adev);
3208 DRM_ERROR("resume of IP block <%s> failed %d\n",
3209 adev->ip_blocks[i].version->funcs->name, r);
3212 adev->ip_blocks[i].status.hw = true;
3214 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3215 /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3216 * amdgpu_device_resume() after IP resume.
3218 amdgpu_gfx_off_ctrl(adev, false);
3219 DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3228 * amdgpu_device_ip_resume - run resume for hardware IPs
3230 * @adev: amdgpu_device pointer
3232 * Main resume function for hardware IPs. The hardware IPs
3233 * are split into two resume functions because they are
3234 * are also used in in recovering from a GPU reset and some additional
3235 * steps need to be take between them. In this case (S3/S4) they are
3237 * Returns 0 on success, negative error code on failure.
3239 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3243 r = amdgpu_amdkfd_resume_iommu(adev);
3247 r = amdgpu_device_ip_resume_phase1(adev);
3251 r = amdgpu_device_fw_loading(adev);
3255 r = amdgpu_device_ip_resume_phase2(adev);
3261 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3263 * @adev: amdgpu_device pointer
3265 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3267 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3269 if (amdgpu_sriov_vf(adev)) {
3270 if (adev->is_atom_fw) {
3271 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3272 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3274 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3275 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3278 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3279 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3284 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3286 * @asic_type: AMD asic type
3288 * Check if there is DC (new modesetting infrastructre) support for an asic.
3289 * returns true if DC has support, false if not.
3291 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3293 switch (asic_type) {
3294 #ifdef CONFIG_DRM_AMDGPU_SI
3298 /* chips with no display hardware */
3300 #if defined(CONFIG_DRM_AMD_DC)
3306 * We have systems in the wild with these ASICs that require
3307 * LVDS and VGA support which is not supported with DC.
3309 * Fallback to the non-DC driver here by default so as not to
3310 * cause regressions.
3312 #if defined(CONFIG_DRM_AMD_DC_SI)
3313 return amdgpu_dc > 0;
3322 * We have systems in the wild with these ASICs that require
3323 * VGA support which is not supported with DC.
3325 * Fallback to the non-DC driver here by default so as not to
3326 * cause regressions.
3328 return amdgpu_dc > 0;
3330 return amdgpu_dc != 0;
3334 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3335 "but isn't supported by ASIC, ignoring\n");
3342 * amdgpu_device_has_dc_support - check if dc is supported
3344 * @adev: amdgpu_device pointer
3346 * Returns true for supported, false for not supported
3348 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3350 if (amdgpu_sriov_vf(adev) ||
3351 adev->enable_virtual_display ||
3352 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3355 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3358 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3360 struct amdgpu_device *adev =
3361 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3362 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3364 /* It's a bug to not have a hive within this function */
3369 * Use task barrier to synchronize all xgmi reset works across the
3370 * hive. task_barrier_enter and task_barrier_exit will block
3371 * until all the threads running the xgmi reset works reach
3372 * those points. task_barrier_full will do both blocks.
3374 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3376 task_barrier_enter(&hive->tb);
3377 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3379 if (adev->asic_reset_res)
3382 task_barrier_exit(&hive->tb);
3383 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3385 if (adev->asic_reset_res)
3388 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3389 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3390 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3393 task_barrier_full(&hive->tb);
3394 adev->asic_reset_res = amdgpu_asic_reset(adev);
3398 if (adev->asic_reset_res)
3399 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3400 adev->asic_reset_res, adev_to_drm(adev)->unique);
3401 amdgpu_put_xgmi_hive(hive);
3404 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3406 char *input = amdgpu_lockup_timeout;
3407 char *timeout_setting = NULL;
3413 * By default timeout for non compute jobs is 10000
3414 * and 60000 for compute jobs.
3415 * In SR-IOV or passthrough mode, timeout for compute
3416 * jobs are 60000 by default.
3418 adev->gfx_timeout = msecs_to_jiffies(10000);
3419 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3420 if (amdgpu_sriov_vf(adev))
3421 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3422 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3424 adev->compute_timeout = msecs_to_jiffies(60000);
3426 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3427 while ((timeout_setting = strsep(&input, ",")) &&
3428 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3429 ret = kstrtol(timeout_setting, 0, &timeout);
3436 } else if (timeout < 0) {
3437 timeout = MAX_SCHEDULE_TIMEOUT;
3438 dev_warn(adev->dev, "lockup timeout disabled");
3439 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3441 timeout = msecs_to_jiffies(timeout);
3446 adev->gfx_timeout = timeout;
3449 adev->compute_timeout = timeout;
3452 adev->sdma_timeout = timeout;
3455 adev->video_timeout = timeout;
3462 * There is only one value specified and
3463 * it should apply to all non-compute jobs.
3466 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3467 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3468 adev->compute_timeout = adev->gfx_timeout;
3476 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3478 * @adev: amdgpu_device pointer
3480 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3482 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3484 struct iommu_domain *domain;
3486 domain = iommu_get_domain_for_dev(adev->dev);
3487 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3488 adev->ram_is_direct_mapped = true;
3491 static const struct attribute *amdgpu_dev_attributes[] = {
3492 &dev_attr_product_name.attr,
3493 &dev_attr_product_number.attr,
3494 &dev_attr_serial_number.attr,
3495 &dev_attr_pcie_replay_count.attr,
3500 * amdgpu_device_init - initialize the driver
3502 * @adev: amdgpu_device pointer
3503 * @flags: driver flags
3505 * Initializes the driver info and hw (all asics).
3506 * Returns 0 for success or an error on failure.
3507 * Called at driver startup.
3509 int amdgpu_device_init(struct amdgpu_device *adev,
3512 struct drm_device *ddev = adev_to_drm(adev);
3513 struct pci_dev *pdev = adev->pdev;
3518 adev->shutdown = false;
3519 adev->flags = flags;
3521 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3522 adev->asic_type = amdgpu_force_asic_type;
3524 adev->asic_type = flags & AMD_ASIC_MASK;
3526 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3527 if (amdgpu_emu_mode == 1)
3528 adev->usec_timeout *= 10;
3529 adev->gmc.gart_size = 512 * 1024 * 1024;
3530 adev->accel_working = false;
3531 adev->num_rings = 0;
3532 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3533 adev->mman.buffer_funcs = NULL;
3534 adev->mman.buffer_funcs_ring = NULL;
3535 adev->vm_manager.vm_pte_funcs = NULL;
3536 adev->vm_manager.vm_pte_num_scheds = 0;
3537 adev->gmc.gmc_funcs = NULL;
3538 adev->harvest_ip_mask = 0x0;
3539 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3540 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3542 adev->smc_rreg = &amdgpu_invalid_rreg;
3543 adev->smc_wreg = &amdgpu_invalid_wreg;
3544 adev->pcie_rreg = &amdgpu_invalid_rreg;
3545 adev->pcie_wreg = &amdgpu_invalid_wreg;
3546 adev->pciep_rreg = &amdgpu_invalid_rreg;
3547 adev->pciep_wreg = &amdgpu_invalid_wreg;
3548 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3549 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3550 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3551 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3552 adev->didt_rreg = &amdgpu_invalid_rreg;
3553 adev->didt_wreg = &amdgpu_invalid_wreg;
3554 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3555 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3556 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3557 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3559 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3560 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3561 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3563 /* mutex initialization are all done here so we
3564 * can recall function without having locking issues */
3565 mutex_init(&adev->firmware.mutex);
3566 mutex_init(&adev->pm.mutex);
3567 mutex_init(&adev->gfx.gpu_clock_mutex);
3568 mutex_init(&adev->srbm_mutex);
3569 mutex_init(&adev->gfx.pipe_reserve_mutex);
3570 mutex_init(&adev->gfx.gfx_off_mutex);
3571 mutex_init(&adev->grbm_idx_mutex);
3572 mutex_init(&adev->mn_lock);
3573 mutex_init(&adev->virt.vf_errors.lock);
3574 hash_init(adev->mn_hash);
3575 mutex_init(&adev->psp.mutex);
3576 mutex_init(&adev->notifier_lock);
3577 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3578 mutex_init(&adev->benchmark_mutex);
3580 amdgpu_device_init_apu_flags(adev);
3582 r = amdgpu_device_check_arguments(adev);
3586 spin_lock_init(&adev->mmio_idx_lock);
3587 spin_lock_init(&adev->smc_idx_lock);
3588 spin_lock_init(&adev->pcie_idx_lock);
3589 spin_lock_init(&adev->uvd_ctx_idx_lock);
3590 spin_lock_init(&adev->didt_idx_lock);
3591 spin_lock_init(&adev->gc_cac_idx_lock);
3592 spin_lock_init(&adev->se_cac_idx_lock);
3593 spin_lock_init(&adev->audio_endpt_idx_lock);
3594 spin_lock_init(&adev->mm_stats.lock);
3596 INIT_LIST_HEAD(&adev->shadow_list);
3597 mutex_init(&adev->shadow_list_lock);
3599 INIT_LIST_HEAD(&adev->reset_list);
3601 INIT_LIST_HEAD(&adev->ras_list);
3603 INIT_DELAYED_WORK(&adev->delayed_init_work,
3604 amdgpu_device_delayed_init_work_handler);
3605 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3606 amdgpu_device_delay_enable_gfx_off);
3608 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3610 adev->gfx.gfx_off_req_count = 1;
3611 adev->gfx.gfx_off_residency = 0;
3612 adev->gfx.gfx_off_entrycount = 0;
3613 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3615 atomic_set(&adev->throttling_logging_enabled, 1);
3617 * If throttling continues, logging will be performed every minute
3618 * to avoid log flooding. "-1" is subtracted since the thermal
3619 * throttling interrupt comes every second. Thus, the total logging
3620 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3621 * for throttling interrupt) = 60 seconds.
3623 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3624 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3626 /* Registers mapping */
3627 /* TODO: block userspace mapping of io register */
3628 if (adev->asic_type >= CHIP_BONAIRE) {
3629 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3630 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3632 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3633 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3636 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3637 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3639 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3640 if (adev->rmmio == NULL) {
3643 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3644 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3646 amdgpu_device_get_pcie_info(adev);
3649 DRM_INFO("MCBP is enabled\n");
3652 * Reset domain needs to be present early, before XGMI hive discovered
3653 * (if any) and intitialized to use reset sem and in_gpu reset flag
3654 * early on during init and before calling to RREG32.
3656 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3657 if (!adev->reset_domain)
3660 /* detect hw virtualization here */
3661 amdgpu_detect_virtualization(adev);
3663 r = amdgpu_device_get_job_timeout_settings(adev);
3665 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3669 /* early init functions */
3670 r = amdgpu_device_ip_early_init(adev);
3674 /* Enable TMZ based on IP_VERSION */
3675 amdgpu_gmc_tmz_set(adev);
3677 amdgpu_gmc_noretry_set(adev);
3678 /* Need to get xgmi info early to decide the reset behavior*/
3679 if (adev->gmc.xgmi.supported) {
3680 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3685 /* enable PCIE atomic ops */
3686 if (amdgpu_sriov_vf(adev))
3687 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3688 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3689 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3691 adev->have_atomics_support =
3692 !pci_enable_atomic_ops_to_root(adev->pdev,
3693 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3694 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3695 if (!adev->have_atomics_support)
3696 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3698 /* doorbell bar mapping and doorbell index init*/
3699 amdgpu_device_doorbell_init(adev);
3701 if (amdgpu_emu_mode == 1) {
3702 /* post the asic on emulation mode */
3703 emu_soc_asic_init(adev);
3704 goto fence_driver_init;
3707 amdgpu_reset_init(adev);
3709 /* detect if we are with an SRIOV vbios */
3710 amdgpu_device_detect_sriov_bios(adev);
3712 /* check if we need to reset the asic
3713 * E.g., driver was not cleanly unloaded previously, etc.
3715 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3716 if (adev->gmc.xgmi.num_physical_nodes) {
3717 dev_info(adev->dev, "Pending hive reset.\n");
3718 adev->gmc.xgmi.pending_reset = true;
3719 /* Only need to init necessary block for SMU to handle the reset */
3720 for (i = 0; i < adev->num_ip_blocks; i++) {
3721 if (!adev->ip_blocks[i].status.valid)
3723 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3724 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3725 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3726 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3727 DRM_DEBUG("IP %s disabled for hw_init.\n",
3728 adev->ip_blocks[i].version->funcs->name);
3729 adev->ip_blocks[i].status.hw = true;
3733 r = amdgpu_asic_reset(adev);
3735 dev_err(adev->dev, "asic reset on init failed\n");
3741 pci_enable_pcie_error_reporting(adev->pdev);
3743 /* Post card if necessary */
3744 if (amdgpu_device_need_post(adev)) {
3746 dev_err(adev->dev, "no vBIOS found\n");
3750 DRM_INFO("GPU posting now...\n");
3751 r = amdgpu_device_asic_init(adev);
3753 dev_err(adev->dev, "gpu post error!\n");
3758 if (adev->is_atom_fw) {
3759 /* Initialize clocks */
3760 r = amdgpu_atomfirmware_get_clock_info(adev);
3762 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3763 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3767 /* Initialize clocks */
3768 r = amdgpu_atombios_get_clock_info(adev);
3770 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3771 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3774 /* init i2c buses */
3775 if (!amdgpu_device_has_dc_support(adev))
3776 amdgpu_atombios_i2c_init(adev);
3781 r = amdgpu_fence_driver_sw_init(adev);
3783 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3784 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3788 /* init the mode config */
3789 drm_mode_config_init(adev_to_drm(adev));
3791 r = amdgpu_device_ip_init(adev);
3793 /* failed in exclusive mode due to timeout */
3794 if (amdgpu_sriov_vf(adev) &&
3795 !amdgpu_sriov_runtime(adev) &&
3796 amdgpu_virt_mmio_blocked(adev) &&
3797 !amdgpu_virt_wait_reset(adev)) {
3798 dev_err(adev->dev, "VF exclusive mode timeout\n");
3799 /* Don't send request since VF is inactive. */
3800 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3801 adev->virt.ops = NULL;
3803 goto release_ras_con;
3805 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3806 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3807 goto release_ras_con;
3810 amdgpu_fence_driver_hw_init(adev);
3813 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3814 adev->gfx.config.max_shader_engines,
3815 adev->gfx.config.max_sh_per_se,
3816 adev->gfx.config.max_cu_per_sh,
3817 adev->gfx.cu_info.number);
3819 adev->accel_working = true;
3821 amdgpu_vm_check_compute_bug(adev);
3823 /* Initialize the buffer migration limit. */
3824 if (amdgpu_moverate >= 0)
3825 max_MBps = amdgpu_moverate;
3827 max_MBps = 8; /* Allow 8 MB/s. */
3828 /* Get a log2 for easy divisions. */
3829 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3831 r = amdgpu_pm_sysfs_init(adev);
3833 adev->pm_sysfs_en = false;
3834 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3836 adev->pm_sysfs_en = true;
3838 r = amdgpu_ucode_sysfs_init(adev);
3840 adev->ucode_sysfs_en = false;
3841 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3843 adev->ucode_sysfs_en = true;
3845 r = amdgpu_psp_sysfs_init(adev);
3847 adev->psp_sysfs_en = false;
3848 if (!amdgpu_sriov_vf(adev))
3849 DRM_ERROR("Creating psp sysfs failed\n");
3851 adev->psp_sysfs_en = true;
3854 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3855 * Otherwise the mgpu fan boost feature will be skipped due to the
3856 * gpu instance is counted less.
3858 amdgpu_register_gpu_instance(adev);
3860 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3861 * explicit gating rather than handling it automatically.
3863 if (!adev->gmc.xgmi.pending_reset) {
3864 r = amdgpu_device_ip_late_init(adev);
3866 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3867 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3868 goto release_ras_con;
3871 amdgpu_ras_resume(adev);
3872 queue_delayed_work(system_wq, &adev->delayed_init_work,
3873 msecs_to_jiffies(AMDGPU_RESUME_MS));
3876 if (amdgpu_sriov_vf(adev))
3877 flush_delayed_work(&adev->delayed_init_work);
3879 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3881 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3883 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3884 r = amdgpu_pmu_init(adev);
3886 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3888 /* Have stored pci confspace at hand for restore in sudden PCI error */
3889 if (amdgpu_device_cache_pci_state(adev->pdev))
3890 pci_restore_state(pdev);
3892 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3893 /* this will fail for cards that aren't VGA class devices, just
3895 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3896 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3898 if (amdgpu_device_supports_px(ddev)) {
3900 vga_switcheroo_register_client(adev->pdev,
3901 &amdgpu_switcheroo_ops, px);
3902 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3905 if (adev->gmc.xgmi.pending_reset)
3906 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3907 msecs_to_jiffies(AMDGPU_RESUME_MS));
3909 amdgpu_device_check_iommu_direct_map(adev);
3914 amdgpu_release_ras_context(adev);
3917 amdgpu_vf_error_trans_all(adev);
3922 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3925 /* Clear all CPU mappings pointing to this device */
3926 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3928 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3929 amdgpu_device_doorbell_fini(adev);
3931 iounmap(adev->rmmio);
3933 if (adev->mman.aper_base_kaddr)
3934 iounmap(adev->mman.aper_base_kaddr);
3935 adev->mman.aper_base_kaddr = NULL;
3937 /* Memory manager related */
3938 if (!adev->gmc.xgmi.connected_to_cpu) {
3939 arch_phys_wc_del(adev->gmc.vram_mtrr);
3940 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3945 * amdgpu_device_fini_hw - tear down the driver
3947 * @adev: amdgpu_device pointer
3949 * Tear down the driver info (all asics).
3950 * Called at driver shutdown.
3952 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3954 dev_info(adev->dev, "amdgpu: finishing device.\n");
3955 flush_delayed_work(&adev->delayed_init_work);
3956 adev->shutdown = true;
3958 /* make sure IB test finished before entering exclusive mode
3959 * to avoid preemption on IB test
3961 if (amdgpu_sriov_vf(adev)) {
3962 amdgpu_virt_request_full_gpu(adev, false);
3963 amdgpu_virt_fini_data_exchange(adev);
3966 /* disable all interrupts */
3967 amdgpu_irq_disable_all(adev);
3968 if (adev->mode_info.mode_config_initialized){
3969 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3970 drm_helper_force_disable_all(adev_to_drm(adev));
3972 drm_atomic_helper_shutdown(adev_to_drm(adev));
3974 amdgpu_fence_driver_hw_fini(adev);
3976 if (adev->mman.initialized) {
3977 flush_delayed_work(&adev->mman.bdev.wq);
3978 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3981 if (adev->pm_sysfs_en)
3982 amdgpu_pm_sysfs_fini(adev);
3983 if (adev->ucode_sysfs_en)
3984 amdgpu_ucode_sysfs_fini(adev);
3985 if (adev->psp_sysfs_en)
3986 amdgpu_psp_sysfs_fini(adev);
3987 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3989 /* disable ras feature must before hw fini */
3990 amdgpu_ras_pre_fini(adev);
3992 amdgpu_device_ip_fini_early(adev);
3994 amdgpu_irq_fini_hw(adev);
3996 if (adev->mman.initialized)
3997 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3999 amdgpu_gart_dummy_page_fini(adev);
4001 amdgpu_device_unmap_mmio(adev);
4005 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4009 amdgpu_fence_driver_sw_fini(adev);
4010 amdgpu_device_ip_fini(adev);
4011 release_firmware(adev->firmware.gpu_info_fw);
4012 adev->firmware.gpu_info_fw = NULL;
4013 adev->accel_working = false;
4014 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4016 amdgpu_reset_fini(adev);
4018 /* free i2c buses */
4019 if (!amdgpu_device_has_dc_support(adev))
4020 amdgpu_i2c_fini(adev);
4022 if (amdgpu_emu_mode != 1)
4023 amdgpu_atombios_fini(adev);
4027 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4028 vga_switcheroo_unregister_client(adev->pdev);
4029 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4031 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4032 vga_client_unregister(adev->pdev);
4034 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4036 iounmap(adev->rmmio);
4038 amdgpu_device_doorbell_fini(adev);
4042 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4043 amdgpu_pmu_fini(adev);
4044 if (adev->mman.discovery_bin)
4045 amdgpu_discovery_fini(adev);
4047 amdgpu_reset_put_reset_domain(adev->reset_domain);
4048 adev->reset_domain = NULL;
4050 kfree(adev->pci_state);
4055 * amdgpu_device_evict_resources - evict device resources
4056 * @adev: amdgpu device object
4058 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4059 * of the vram memory type. Mainly used for evicting device resources
4063 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4067 /* No need to evict vram on APUs for suspend to ram or s2idle */
4068 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4071 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4073 DRM_WARN("evicting device resources failed\n");
4081 * amdgpu_device_suspend - initiate device suspend
4083 * @dev: drm dev pointer
4084 * @fbcon : notify the fbdev of suspend
4086 * Puts the hw in the suspend state (all asics).
4087 * Returns 0 for success or an error on failure.
4088 * Called at driver suspend.
4090 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4092 struct amdgpu_device *adev = drm_to_adev(dev);
4095 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4098 adev->in_suspend = true;
4100 if (amdgpu_sriov_vf(adev)) {
4101 amdgpu_virt_fini_data_exchange(adev);
4102 r = amdgpu_virt_request_full_gpu(adev, false);
4107 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4108 DRM_WARN("smart shift update failed\n");
4110 drm_kms_helper_poll_disable(dev);
4113 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4115 cancel_delayed_work_sync(&adev->delayed_init_work);
4117 amdgpu_ras_suspend(adev);
4119 amdgpu_device_ip_suspend_phase1(adev);
4122 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4124 r = amdgpu_device_evict_resources(adev);
4128 amdgpu_fence_driver_hw_fini(adev);
4130 amdgpu_device_ip_suspend_phase2(adev);
4132 if (amdgpu_sriov_vf(adev))
4133 amdgpu_virt_release_full_gpu(adev, false);
4139 * amdgpu_device_resume - initiate device resume
4141 * @dev: drm dev pointer
4142 * @fbcon : notify the fbdev of resume
4144 * Bring the hw back to operating state (all asics).
4145 * Returns 0 for success or an error on failure.
4146 * Called at driver resume.
4148 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4150 struct amdgpu_device *adev = drm_to_adev(dev);
4153 if (amdgpu_sriov_vf(adev)) {
4154 r = amdgpu_virt_request_full_gpu(adev, true);
4159 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4163 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4166 if (amdgpu_device_need_post(adev)) {
4167 r = amdgpu_device_asic_init(adev);
4169 dev_err(adev->dev, "amdgpu asic init failed\n");
4172 r = amdgpu_device_ip_resume(adev);
4174 /* no matter what r is, always need to properly release full GPU */
4175 if (amdgpu_sriov_vf(adev)) {
4176 amdgpu_virt_init_data_exchange(adev);
4177 amdgpu_virt_release_full_gpu(adev, true);
4181 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4184 amdgpu_fence_driver_hw_init(adev);
4186 r = amdgpu_device_ip_late_init(adev);
4190 queue_delayed_work(system_wq, &adev->delayed_init_work,
4191 msecs_to_jiffies(AMDGPU_RESUME_MS));
4193 if (!adev->in_s0ix) {
4194 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4199 /* Make sure IB tests flushed */
4200 if (amdgpu_sriov_vf(adev))
4201 amdgpu_irq_gpu_reset_resume_helper(adev);
4202 flush_delayed_work(&adev->delayed_init_work);
4204 if (adev->in_s0ix) {
4205 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4206 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4208 amdgpu_gfx_off_ctrl(adev, true);
4209 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4212 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4214 drm_kms_helper_poll_enable(dev);
4216 amdgpu_ras_resume(adev);
4219 * Most of the connector probing functions try to acquire runtime pm
4220 * refs to ensure that the GPU is powered on when connector polling is
4221 * performed. Since we're calling this from a runtime PM callback,
4222 * trying to acquire rpm refs will cause us to deadlock.
4224 * Since we're guaranteed to be holding the rpm lock, it's safe to
4225 * temporarily disable the rpm helpers so this doesn't deadlock us.
4228 dev->dev->power.disable_depth++;
4230 if (!amdgpu_device_has_dc_support(adev))
4231 drm_helper_hpd_irq_event(dev);
4233 drm_kms_helper_hotplug_event(dev);
4235 dev->dev->power.disable_depth--;
4237 adev->in_suspend = false;
4239 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4240 DRM_WARN("smart shift update failed\n");
4246 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4248 * @adev: amdgpu_device pointer
4250 * The list of all the hardware IPs that make up the asic is walked and
4251 * the check_soft_reset callbacks are run. check_soft_reset determines
4252 * if the asic is still hung or not.
4253 * Returns true if any of the IPs are still in a hung state, false if not.
4255 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4258 bool asic_hang = false;
4260 if (amdgpu_sriov_vf(adev))
4263 if (amdgpu_asic_need_full_reset(adev))
4266 for (i = 0; i < adev->num_ip_blocks; i++) {
4267 if (!adev->ip_blocks[i].status.valid)
4269 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4270 adev->ip_blocks[i].status.hang =
4271 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4272 if (adev->ip_blocks[i].status.hang) {
4273 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4281 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4283 * @adev: amdgpu_device pointer
4285 * The list of all the hardware IPs that make up the asic is walked and the
4286 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4287 * handles any IP specific hardware or software state changes that are
4288 * necessary for a soft reset to succeed.
4289 * Returns 0 on success, negative error code on failure.
4291 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4295 for (i = 0; i < adev->num_ip_blocks; i++) {
4296 if (!adev->ip_blocks[i].status.valid)
4298 if (adev->ip_blocks[i].status.hang &&
4299 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4300 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4310 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4312 * @adev: amdgpu_device pointer
4314 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4315 * reset is necessary to recover.
4316 * Returns true if a full asic reset is required, false if not.
4318 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4322 if (amdgpu_asic_need_full_reset(adev))
4325 for (i = 0; i < adev->num_ip_blocks; i++) {
4326 if (!adev->ip_blocks[i].status.valid)
4328 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4329 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4330 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4331 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4332 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4333 if (adev->ip_blocks[i].status.hang) {
4334 dev_info(adev->dev, "Some block need full reset!\n");
4343 * amdgpu_device_ip_soft_reset - do a soft reset
4345 * @adev: amdgpu_device pointer
4347 * The list of all the hardware IPs that make up the asic is walked and the
4348 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4349 * IP specific hardware or software state changes that are necessary to soft
4351 * Returns 0 on success, negative error code on failure.
4353 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4357 for (i = 0; i < adev->num_ip_blocks; i++) {
4358 if (!adev->ip_blocks[i].status.valid)
4360 if (adev->ip_blocks[i].status.hang &&
4361 adev->ip_blocks[i].version->funcs->soft_reset) {
4362 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4372 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4374 * @adev: amdgpu_device pointer
4376 * The list of all the hardware IPs that make up the asic is walked and the
4377 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4378 * handles any IP specific hardware or software state changes that are
4379 * necessary after the IP has been soft reset.
4380 * Returns 0 on success, negative error code on failure.
4382 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4386 for (i = 0; i < adev->num_ip_blocks; i++) {
4387 if (!adev->ip_blocks[i].status.valid)
4389 if (adev->ip_blocks[i].status.hang &&
4390 adev->ip_blocks[i].version->funcs->post_soft_reset)
4391 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4400 * amdgpu_device_recover_vram - Recover some VRAM contents
4402 * @adev: amdgpu_device pointer
4404 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4405 * restore things like GPUVM page tables after a GPU reset where
4406 * the contents of VRAM might be lost.
4409 * 0 on success, negative error code on failure.
4411 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4413 struct dma_fence *fence = NULL, *next = NULL;
4414 struct amdgpu_bo *shadow;
4415 struct amdgpu_bo_vm *vmbo;
4418 if (amdgpu_sriov_runtime(adev))
4419 tmo = msecs_to_jiffies(8000);
4421 tmo = msecs_to_jiffies(100);
4423 dev_info(adev->dev, "recover vram bo from shadow start\n");
4424 mutex_lock(&adev->shadow_list_lock);
4425 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4427 /* No need to recover an evicted BO */
4428 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4429 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4430 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4433 r = amdgpu_bo_restore_shadow(shadow, &next);
4438 tmo = dma_fence_wait_timeout(fence, false, tmo);
4439 dma_fence_put(fence);
4444 } else if (tmo < 0) {
4452 mutex_unlock(&adev->shadow_list_lock);
4455 tmo = dma_fence_wait_timeout(fence, false, tmo);
4456 dma_fence_put(fence);
4458 if (r < 0 || tmo <= 0) {
4459 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4463 dev_info(adev->dev, "recover vram bo from shadow done\n");
4469 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4471 * @adev: amdgpu_device pointer
4472 * @from_hypervisor: request from hypervisor
4474 * do VF FLR and reinitialize Asic
4475 * return 0 means succeeded otherwise failed
4477 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4478 bool from_hypervisor)
4481 struct amdgpu_hive_info *hive = NULL;
4482 int retry_limit = 0;
4485 amdgpu_amdkfd_pre_reset(adev);
4487 if (from_hypervisor)
4488 r = amdgpu_virt_request_full_gpu(adev, true);
4490 r = amdgpu_virt_reset_gpu(adev);
4494 /* Resume IP prior to SMC */
4495 r = amdgpu_device_ip_reinit_early_sriov(adev);
4499 amdgpu_virt_init_data_exchange(adev);
4501 r = amdgpu_device_fw_loading(adev);
4505 /* now we are okay to resume SMC/CP/SDMA */
4506 r = amdgpu_device_ip_reinit_late_sriov(adev);
4510 hive = amdgpu_get_xgmi_hive(adev);
4511 /* Update PSP FW topology after reset */
4512 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4513 r = amdgpu_xgmi_update_topology(hive, adev);
4516 amdgpu_put_xgmi_hive(hive);
4519 amdgpu_irq_gpu_reset_resume_helper(adev);
4520 r = amdgpu_ib_ring_tests(adev);
4522 amdgpu_amdkfd_post_reset(adev);
4526 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4527 amdgpu_inc_vram_lost(adev);
4528 r = amdgpu_device_recover_vram(adev);
4530 amdgpu_virt_release_full_gpu(adev, true);
4532 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4533 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4537 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4544 * amdgpu_device_has_job_running - check if there is any job in mirror list
4546 * @adev: amdgpu_device pointer
4548 * check if there is any job in mirror list
4550 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4553 struct drm_sched_job *job;
4555 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4556 struct amdgpu_ring *ring = adev->rings[i];
4558 if (!ring || !ring->sched.thread)
4561 spin_lock(&ring->sched.job_list_lock);
4562 job = list_first_entry_or_null(&ring->sched.pending_list,
4563 struct drm_sched_job, list);
4564 spin_unlock(&ring->sched.job_list_lock);
4572 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4574 * @adev: amdgpu_device pointer
4576 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4579 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4582 if (amdgpu_gpu_recovery == 0)
4585 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4586 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4590 if (amdgpu_sriov_vf(adev))
4593 if (amdgpu_gpu_recovery == -1) {
4594 switch (adev->asic_type) {
4595 #ifdef CONFIG_DRM_AMDGPU_SI
4602 #ifdef CONFIG_DRM_AMDGPU_CIK
4609 case CHIP_CYAN_SKILLFISH:
4619 dev_info(adev->dev, "GPU recovery disabled.\n");
4623 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4628 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4630 dev_info(adev->dev, "GPU mode1 reset\n");
4633 pci_clear_master(adev->pdev);
4635 amdgpu_device_cache_pci_state(adev->pdev);
4637 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4638 dev_info(adev->dev, "GPU smu mode1 reset\n");
4639 ret = amdgpu_dpm_mode1_reset(adev);
4641 dev_info(adev->dev, "GPU psp mode1 reset\n");
4642 ret = psp_gpu_reset(adev);
4646 dev_err(adev->dev, "GPU mode1 reset failed\n");
4648 amdgpu_device_load_pci_state(adev->pdev);
4650 /* wait for asic to come out of reset */
4651 for (i = 0; i < adev->usec_timeout; i++) {
4652 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4654 if (memsize != 0xffffffff)
4659 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4663 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4664 struct amdgpu_reset_context *reset_context)
4667 struct amdgpu_job *job = NULL;
4668 bool need_full_reset =
4669 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4671 if (reset_context->reset_req_dev == adev)
4672 job = reset_context->job;
4674 if (amdgpu_sriov_vf(adev)) {
4675 /* stop the data exchange thread */
4676 amdgpu_virt_fini_data_exchange(adev);
4679 amdgpu_fence_driver_isr_toggle(adev, true);
4681 /* block all schedulers and reset given job's ring */
4682 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4683 struct amdgpu_ring *ring = adev->rings[i];
4685 if (!ring || !ring->sched.thread)
4688 /*clear job fence from fence drv to avoid force_completion
4689 *leave NULL and vm flush fence in fence drv */
4690 amdgpu_fence_driver_clear_job_fences(ring);
4692 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4693 amdgpu_fence_driver_force_completion(ring);
4696 amdgpu_fence_driver_isr_toggle(adev, false);
4699 drm_sched_increase_karma(&job->base);
4701 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4702 /* If reset handler not implemented, continue; otherwise return */
4708 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4709 if (!amdgpu_sriov_vf(adev)) {
4711 if (!need_full_reset)
4712 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4714 if (!need_full_reset && amdgpu_gpu_recovery) {
4715 amdgpu_device_ip_pre_soft_reset(adev);
4716 r = amdgpu_device_ip_soft_reset(adev);
4717 amdgpu_device_ip_post_soft_reset(adev);
4718 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4719 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4720 need_full_reset = true;
4724 if (need_full_reset)
4725 r = amdgpu_device_ip_suspend(adev);
4726 if (need_full_reset)
4727 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4729 clear_bit(AMDGPU_NEED_FULL_RESET,
4730 &reset_context->flags);
4736 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4740 lockdep_assert_held(&adev->reset_domain->sem);
4742 for (i = 0; i < adev->num_regs; i++) {
4743 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4744 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4745 adev->reset_dump_reg_value[i]);
4751 #ifdef CONFIG_DEV_COREDUMP
4752 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4753 size_t count, void *data, size_t datalen)
4755 struct drm_printer p;
4756 struct amdgpu_device *adev = data;
4757 struct drm_print_iterator iter;
4762 iter.start = offset;
4763 iter.remain = count;
4765 p = drm_coredump_printer(&iter);
4767 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4768 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4769 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4770 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4771 if (adev->reset_task_info.pid)
4772 drm_printf(&p, "process_name: %s PID: %d\n",
4773 adev->reset_task_info.process_name,
4774 adev->reset_task_info.pid);
4776 if (adev->reset_vram_lost)
4777 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4778 if (adev->num_regs) {
4779 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4781 for (i = 0; i < adev->num_regs; i++)
4782 drm_printf(&p, "0x%08x: 0x%08x\n",
4783 adev->reset_dump_reg_list[i],
4784 adev->reset_dump_reg_value[i]);
4787 return count - iter.remain;
4790 static void amdgpu_devcoredump_free(void *data)
4794 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4796 struct drm_device *dev = adev_to_drm(adev);
4798 ktime_get_ts64(&adev->reset_time);
4799 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4800 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4804 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4805 struct amdgpu_reset_context *reset_context)
4807 struct amdgpu_device *tmp_adev = NULL;
4808 bool need_full_reset, skip_hw_reset, vram_lost = false;
4810 bool gpu_reset_for_dev_remove = 0;
4812 /* Try reset handler method first */
4813 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4815 amdgpu_reset_reg_dumps(tmp_adev);
4817 reset_context->reset_device_list = device_list_handle;
4818 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4819 /* If reset handler not implemented, continue; otherwise return */
4825 /* Reset handler not implemented, use the default method */
4827 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4828 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4830 gpu_reset_for_dev_remove =
4831 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4832 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4835 * ASIC reset has to be done on all XGMI hive nodes ASAP
4836 * to allow proper links negotiation in FW (within 1 sec)
4838 if (!skip_hw_reset && need_full_reset) {
4839 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4840 /* For XGMI run all resets in parallel to speed up the process */
4841 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4842 tmp_adev->gmc.xgmi.pending_reset = false;
4843 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4846 r = amdgpu_asic_reset(tmp_adev);
4849 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4850 r, adev_to_drm(tmp_adev)->unique);
4855 /* For XGMI wait for all resets to complete before proceed */
4857 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4858 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4859 flush_work(&tmp_adev->xgmi_reset_work);
4860 r = tmp_adev->asic_reset_res;
4868 if (!r && amdgpu_ras_intr_triggered()) {
4869 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4870 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4871 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4872 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4875 amdgpu_ras_intr_cleared();
4878 /* Since the mode1 reset affects base ip blocks, the
4879 * phase1 ip blocks need to be resumed. Otherwise there
4880 * will be a BIOS signature error and the psp bootloader
4881 * can't load kdb on the next amdgpu install.
4883 if (gpu_reset_for_dev_remove) {
4884 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4885 amdgpu_device_ip_resume_phase1(tmp_adev);
4890 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4891 if (need_full_reset) {
4893 r = amdgpu_device_asic_init(tmp_adev);
4895 dev_warn(tmp_adev->dev, "asic atom init failed!");
4897 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4898 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4902 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4906 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4907 #ifdef CONFIG_DEV_COREDUMP
4908 tmp_adev->reset_vram_lost = vram_lost;
4909 memset(&tmp_adev->reset_task_info, 0,
4910 sizeof(tmp_adev->reset_task_info));
4911 if (reset_context->job && reset_context->job->vm)
4912 tmp_adev->reset_task_info =
4913 reset_context->job->vm->task_info;
4914 amdgpu_reset_capture_coredumpm(tmp_adev);
4917 DRM_INFO("VRAM is lost due to GPU reset!\n");
4918 amdgpu_inc_vram_lost(tmp_adev);
4921 r = amdgpu_device_fw_loading(tmp_adev);
4925 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4930 amdgpu_device_fill_reset_magic(tmp_adev);
4933 * Add this ASIC as tracked as reset was already
4934 * complete successfully.
4936 amdgpu_register_gpu_instance(tmp_adev);
4938 if (!reset_context->hive &&
4939 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4940 amdgpu_xgmi_add_device(tmp_adev);
4942 r = amdgpu_device_ip_late_init(tmp_adev);
4946 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4949 * The GPU enters bad state once faulty pages
4950 * by ECC has reached the threshold, and ras
4951 * recovery is scheduled next. So add one check
4952 * here to break recovery if it indeed exceeds
4953 * bad page threshold, and remind user to
4954 * retire this GPU or setting one bigger
4955 * bad_page_threshold value to fix this once
4956 * probing driver again.
4958 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4960 amdgpu_ras_resume(tmp_adev);
4966 /* Update PSP FW topology after reset */
4967 if (reset_context->hive &&
4968 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4969 r = amdgpu_xgmi_update_topology(
4970 reset_context->hive, tmp_adev);
4976 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4977 r = amdgpu_ib_ring_tests(tmp_adev);
4979 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4980 need_full_reset = true;
4987 r = amdgpu_device_recover_vram(tmp_adev);
4989 tmp_adev->asic_reset_res = r;
4993 if (need_full_reset)
4994 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4996 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5000 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5003 switch (amdgpu_asic_reset_method(adev)) {
5004 case AMD_RESET_METHOD_MODE1:
5005 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5007 case AMD_RESET_METHOD_MODE2:
5008 adev->mp1_state = PP_MP1_STATE_RESET;
5011 adev->mp1_state = PP_MP1_STATE_NONE;
5016 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5018 amdgpu_vf_error_trans_all(adev);
5019 adev->mp1_state = PP_MP1_STATE_NONE;
5022 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5024 struct pci_dev *p = NULL;
5026 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5027 adev->pdev->bus->number, 1);
5029 pm_runtime_enable(&(p->dev));
5030 pm_runtime_resume(&(p->dev));
5034 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5036 enum amd_reset_method reset_method;
5037 struct pci_dev *p = NULL;
5041 * For now, only BACO and mode1 reset are confirmed
5042 * to suffer the audio issue without proper suspended.
5044 reset_method = amdgpu_asic_reset_method(adev);
5045 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5046 (reset_method != AMD_RESET_METHOD_MODE1))
5049 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5050 adev->pdev->bus->number, 1);
5054 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5057 * If we cannot get the audio device autosuspend delay,
5058 * a fixed 4S interval will be used. Considering 3S is
5059 * the audio controller default autosuspend delay setting.
5060 * 4S used here is guaranteed to cover that.
5062 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5064 while (!pm_runtime_status_suspended(&(p->dev))) {
5065 if (!pm_runtime_suspend(&(p->dev)))
5068 if (expires < ktime_get_mono_fast_ns()) {
5069 dev_warn(adev->dev, "failed to suspend display audio\n");
5070 /* TODO: abort the succeeding gpu reset? */
5075 pm_runtime_disable(&(p->dev));
5080 static void amdgpu_device_recheck_guilty_jobs(
5081 struct amdgpu_device *adev, struct list_head *device_list_handle,
5082 struct amdgpu_reset_context *reset_context)
5086 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5087 struct amdgpu_ring *ring = adev->rings[i];
5089 struct drm_sched_job *s_job;
5091 if (!ring || !ring->sched.thread)
5094 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5095 struct drm_sched_job, list);
5099 /* clear job's guilty and depend the folowing step to decide the real one */
5100 drm_sched_reset_karma(s_job);
5101 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5103 if (!s_job->s_fence->parent) {
5104 DRM_WARN("Failed to get a HW fence for job!");
5108 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5109 if (ret == 0) { /* timeout */
5110 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5111 ring->sched.name, s_job->id);
5114 amdgpu_fence_driver_isr_toggle(adev, true);
5116 /* Clear this failed job from fence array */
5117 amdgpu_fence_driver_clear_job_fences(ring);
5119 amdgpu_fence_driver_isr_toggle(adev, false);
5121 /* Since the job won't signal and we go for
5122 * another resubmit drop this parent pointer
5124 dma_fence_put(s_job->s_fence->parent);
5125 s_job->s_fence->parent = NULL;
5128 drm_sched_increase_karma(s_job);
5129 amdgpu_reset_prepare_hwcontext(adev, reset_context);
5132 if (amdgpu_sriov_vf(adev)) {
5133 amdgpu_virt_fini_data_exchange(adev);
5134 r = amdgpu_device_reset_sriov(adev, false);
5136 adev->asic_reset_res = r;
5138 clear_bit(AMDGPU_SKIP_HW_RESET,
5139 &reset_context->flags);
5140 r = amdgpu_do_asic_reset(device_list_handle,
5142 if (r && r == -EAGAIN)
5147 * add reset counter so that the following
5148 * resubmitted job could flush vmid
5150 atomic_inc(&adev->gpu_reset_counter);
5154 /* got the hw fence, signal finished fence */
5155 atomic_dec(ring->sched.score);
5156 dma_fence_get(&s_job->s_fence->finished);
5157 dma_fence_signal(&s_job->s_fence->finished);
5158 dma_fence_put(&s_job->s_fence->finished);
5160 /* remove node from list and free the job */
5161 spin_lock(&ring->sched.job_list_lock);
5162 list_del_init(&s_job->list);
5163 spin_unlock(&ring->sched.job_list_lock);
5164 ring->sched.ops->free_job(s_job);
5168 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5170 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5172 #if defined(CONFIG_DEBUG_FS)
5173 if (!amdgpu_sriov_vf(adev))
5174 cancel_work(&adev->reset_work);
5178 cancel_work(&adev->kfd.reset_work);
5180 if (amdgpu_sriov_vf(adev))
5181 cancel_work(&adev->virt.flr_work);
5183 if (con && adev->ras_enabled)
5184 cancel_work(&con->recovery_work);
5190 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5192 * @adev: amdgpu_device pointer
5193 * @job: which job trigger hang
5195 * Attempt to reset the GPU if it has hung (all asics).
5196 * Attempt to do soft-reset or full-reset and reinitialize Asic
5197 * Returns 0 for success or an error on failure.
5200 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5201 struct amdgpu_job *job,
5202 struct amdgpu_reset_context *reset_context)
5204 struct list_head device_list, *device_list_handle = NULL;
5205 bool job_signaled = false;
5206 struct amdgpu_hive_info *hive = NULL;
5207 struct amdgpu_device *tmp_adev = NULL;
5209 bool need_emergency_restart = false;
5210 bool audio_suspended = false;
5211 int tmp_vram_lost_counter;
5212 bool gpu_reset_for_dev_remove = false;
5214 gpu_reset_for_dev_remove =
5215 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5216 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5219 * Special case: RAS triggered and full reset isn't supported
5221 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5224 * Flush RAM to disk so that after reboot
5225 * the user can read log and see why the system rebooted.
5227 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5228 DRM_WARN("Emergency reboot.");
5231 emergency_restart();
5234 dev_info(adev->dev, "GPU %s begin!\n",
5235 need_emergency_restart ? "jobs stop":"reset");
5237 if (!amdgpu_sriov_vf(adev))
5238 hive = amdgpu_get_xgmi_hive(adev);
5240 mutex_lock(&hive->hive_lock);
5242 reset_context->job = job;
5243 reset_context->hive = hive;
5245 * Build list of devices to reset.
5246 * In case we are in XGMI hive mode, resort the device list
5247 * to put adev in the 1st position.
5249 INIT_LIST_HEAD(&device_list);
5250 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5251 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5252 list_add_tail(&tmp_adev->reset_list, &device_list);
5253 if (gpu_reset_for_dev_remove && adev->shutdown)
5254 tmp_adev->shutdown = true;
5256 if (!list_is_first(&adev->reset_list, &device_list))
5257 list_rotate_to_front(&adev->reset_list, &device_list);
5258 device_list_handle = &device_list;
5260 list_add_tail(&adev->reset_list, &device_list);
5261 device_list_handle = &device_list;
5264 /* We need to lock reset domain only once both for XGMI and single device */
5265 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5267 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5269 /* block all schedulers and reset given job's ring */
5270 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5272 amdgpu_device_set_mp1_state(tmp_adev);
5275 * Try to put the audio codec into suspend state
5276 * before gpu reset started.
5278 * Due to the power domain of the graphics device
5279 * is shared with AZ power domain. Without this,
5280 * we may change the audio hardware from behind
5281 * the audio driver's back. That will trigger
5282 * some audio codec errors.
5284 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5285 audio_suspended = true;
5287 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5289 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5291 if (!amdgpu_sriov_vf(tmp_adev))
5292 amdgpu_amdkfd_pre_reset(tmp_adev);
5295 * Mark these ASICs to be reseted as untracked first
5296 * And add them back after reset completed
5298 amdgpu_unregister_gpu_instance(tmp_adev);
5300 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5302 /* disable ras on ALL IPs */
5303 if (!need_emergency_restart &&
5304 amdgpu_device_ip_need_full_reset(tmp_adev))
5305 amdgpu_ras_suspend(tmp_adev);
5307 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5308 struct amdgpu_ring *ring = tmp_adev->rings[i];
5310 if (!ring || !ring->sched.thread)
5313 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5315 if (need_emergency_restart)
5316 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5318 atomic_inc(&tmp_adev->gpu_reset_counter);
5321 if (need_emergency_restart)
5322 goto skip_sched_resume;
5325 * Must check guilty signal here since after this point all old
5326 * HW fences are force signaled.
5328 * job->base holds a reference to parent fence
5330 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5331 job_signaled = true;
5332 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5336 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5337 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5338 if (gpu_reset_for_dev_remove) {
5339 /* Workaroud for ASICs need to disable SMC first */
5340 amdgpu_device_smu_fini_early(tmp_adev);
5342 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5343 /*TODO Should we stop ?*/
5345 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5346 r, adev_to_drm(tmp_adev)->unique);
5347 tmp_adev->asic_reset_res = r;
5351 * Drop all pending non scheduler resets. Scheduler resets
5352 * were already dropped during drm_sched_stop
5354 amdgpu_device_stop_pending_resets(tmp_adev);
5357 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5358 /* Actual ASIC resets if needed.*/
5359 /* Host driver will handle XGMI hive reset for SRIOV */
5360 if (amdgpu_sriov_vf(adev)) {
5361 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5363 adev->asic_reset_res = r;
5365 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5366 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5367 amdgpu_ras_resume(adev);
5369 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5370 if (r && r == -EAGAIN)
5373 if (!r && gpu_reset_for_dev_remove)
5379 /* Post ASIC reset for all devs .*/
5380 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5383 * Sometimes a later bad compute job can block a good gfx job as gfx
5384 * and compute ring share internal GC HW mutually. We add an additional
5385 * guilty jobs recheck step to find the real guilty job, it synchronously
5386 * submits and pends for the first job being signaled. If it gets timeout,
5387 * we identify it as a real guilty job.
5389 if (amdgpu_gpu_recovery == 2 &&
5390 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5391 amdgpu_device_recheck_guilty_jobs(
5392 tmp_adev, device_list_handle, reset_context);
5394 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5395 struct amdgpu_ring *ring = tmp_adev->rings[i];
5397 if (!ring || !ring->sched.thread)
5400 /* No point to resubmit jobs if we didn't HW reset*/
5401 if (!tmp_adev->asic_reset_res && !job_signaled)
5402 drm_sched_resubmit_jobs(&ring->sched);
5404 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5407 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5408 amdgpu_mes_self_test(tmp_adev);
5410 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5411 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5414 if (tmp_adev->asic_reset_res)
5415 r = tmp_adev->asic_reset_res;
5417 tmp_adev->asic_reset_res = 0;
5420 /* bad news, how to tell it to userspace ? */
5421 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5422 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5424 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5425 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5426 DRM_WARN("smart shift update failed\n");
5431 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5432 /* unlock kfd: SRIOV would do it separately */
5433 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5434 amdgpu_amdkfd_post_reset(tmp_adev);
5436 /* kfd_post_reset will do nothing if kfd device is not initialized,
5437 * need to bring up kfd here if it's not be initialized before
5439 if (!adev->kfd.init_complete)
5440 amdgpu_amdkfd_device_init(adev);
5442 if (audio_suspended)
5443 amdgpu_device_resume_display_audio(tmp_adev);
5445 amdgpu_device_unset_mp1_state(tmp_adev);
5449 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5451 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5454 mutex_unlock(&hive->hive_lock);
5455 amdgpu_put_xgmi_hive(hive);
5459 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5461 atomic_set(&adev->reset_domain->reset_res, r);
5466 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5468 * @adev: amdgpu_device pointer
5470 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5471 * and lanes) of the slot the device is in. Handles APUs and
5472 * virtualized environments where PCIE config space may not be available.
5474 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5476 struct pci_dev *pdev;
5477 enum pci_bus_speed speed_cap, platform_speed_cap;
5478 enum pcie_link_width platform_link_width;
5480 if (amdgpu_pcie_gen_cap)
5481 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5483 if (amdgpu_pcie_lane_cap)
5484 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5486 /* covers APUs as well */
5487 if (pci_is_root_bus(adev->pdev->bus)) {
5488 if (adev->pm.pcie_gen_mask == 0)
5489 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5490 if (adev->pm.pcie_mlw_mask == 0)
5491 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5495 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5498 pcie_bandwidth_available(adev->pdev, NULL,
5499 &platform_speed_cap, &platform_link_width);
5501 if (adev->pm.pcie_gen_mask == 0) {
5504 speed_cap = pcie_get_speed_cap(pdev);
5505 if (speed_cap == PCI_SPEED_UNKNOWN) {
5506 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5507 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5508 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5510 if (speed_cap == PCIE_SPEED_32_0GT)
5511 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5512 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5513 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5514 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5515 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5516 else if (speed_cap == PCIE_SPEED_16_0GT)
5517 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5518 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5519 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5520 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5521 else if (speed_cap == PCIE_SPEED_8_0GT)
5522 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5523 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5524 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5525 else if (speed_cap == PCIE_SPEED_5_0GT)
5526 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5527 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5529 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5532 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5533 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5534 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5536 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5537 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5538 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5539 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5540 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5541 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5542 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5543 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5544 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5545 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5546 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5547 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5548 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5549 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5550 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5551 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5552 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5553 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5555 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5559 if (adev->pm.pcie_mlw_mask == 0) {
5560 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5561 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5563 switch (platform_link_width) {
5565 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5566 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5567 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5568 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5570 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5571 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5574 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5575 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5576 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5577 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5578 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5579 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5582 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5583 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5584 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5585 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5586 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5589 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5590 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5591 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5592 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5595 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5596 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5597 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5600 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5601 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5604 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5614 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5616 * @adev: amdgpu_device pointer
5617 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5619 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5620 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5623 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5624 struct amdgpu_device *peer_adev)
5626 #ifdef CONFIG_HSA_AMD_P2P
5627 uint64_t address_mask = peer_adev->dev->dma_mask ?
5628 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5629 resource_size_t aper_limit =
5630 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5632 !adev->gmc.xgmi.connected_to_cpu &&
5633 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5635 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5636 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5637 !(adev->gmc.aper_base & address_mask ||
5638 aper_limit & address_mask));
5644 int amdgpu_device_baco_enter(struct drm_device *dev)
5646 struct amdgpu_device *adev = drm_to_adev(dev);
5647 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5649 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5652 if (ras && adev->ras_enabled &&
5653 adev->nbio.funcs->enable_doorbell_interrupt)
5654 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5656 return amdgpu_dpm_baco_enter(adev);
5659 int amdgpu_device_baco_exit(struct drm_device *dev)
5661 struct amdgpu_device *adev = drm_to_adev(dev);
5662 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5665 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5668 ret = amdgpu_dpm_baco_exit(adev);
5672 if (ras && adev->ras_enabled &&
5673 adev->nbio.funcs->enable_doorbell_interrupt)
5674 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5676 if (amdgpu_passthrough(adev) &&
5677 adev->nbio.funcs->clear_doorbell_interrupt)
5678 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5684 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5685 * @pdev: PCI device struct
5686 * @state: PCI channel state
5688 * Description: Called when a PCI error is detected.
5690 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5692 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5694 struct drm_device *dev = pci_get_drvdata(pdev);
5695 struct amdgpu_device *adev = drm_to_adev(dev);
5698 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5700 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5701 DRM_WARN("No support for XGMI hive yet...");
5702 return PCI_ERS_RESULT_DISCONNECT;
5705 adev->pci_channel_state = state;
5708 case pci_channel_io_normal:
5709 return PCI_ERS_RESULT_CAN_RECOVER;
5710 /* Fatal error, prepare for slot reset */
5711 case pci_channel_io_frozen:
5713 * Locking adev->reset_domain->sem will prevent any external access
5714 * to GPU during PCI error recovery
5716 amdgpu_device_lock_reset_domain(adev->reset_domain);
5717 amdgpu_device_set_mp1_state(adev);
5720 * Block any work scheduling as we do for regular GPU reset
5721 * for the duration of the recovery
5723 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5724 struct amdgpu_ring *ring = adev->rings[i];
5726 if (!ring || !ring->sched.thread)
5729 drm_sched_stop(&ring->sched, NULL);
5731 atomic_inc(&adev->gpu_reset_counter);
5732 return PCI_ERS_RESULT_NEED_RESET;
5733 case pci_channel_io_perm_failure:
5734 /* Permanent error, prepare for device removal */
5735 return PCI_ERS_RESULT_DISCONNECT;
5738 return PCI_ERS_RESULT_NEED_RESET;
5742 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5743 * @pdev: pointer to PCI device
5745 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5748 DRM_INFO("PCI error: mmio enabled callback!!\n");
5750 /* TODO - dump whatever for debugging purposes */
5752 /* This called only if amdgpu_pci_error_detected returns
5753 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5754 * works, no need to reset slot.
5757 return PCI_ERS_RESULT_RECOVERED;
5761 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5762 * @pdev: PCI device struct
5764 * Description: This routine is called by the pci error recovery
5765 * code after the PCI slot has been reset, just before we
5766 * should resume normal operations.
5768 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5770 struct drm_device *dev = pci_get_drvdata(pdev);
5771 struct amdgpu_device *adev = drm_to_adev(dev);
5773 struct amdgpu_reset_context reset_context;
5775 struct list_head device_list;
5777 DRM_INFO("PCI error: slot reset callback!!\n");
5779 memset(&reset_context, 0, sizeof(reset_context));
5781 INIT_LIST_HEAD(&device_list);
5782 list_add_tail(&adev->reset_list, &device_list);
5784 /* wait for asic to come out of reset */
5787 /* Restore PCI confspace */
5788 amdgpu_device_load_pci_state(pdev);
5790 /* confirm ASIC came out of reset */
5791 for (i = 0; i < adev->usec_timeout; i++) {
5792 memsize = amdgpu_asic_get_config_memsize(adev);
5794 if (memsize != 0xffffffff)
5798 if (memsize == 0xffffffff) {
5803 reset_context.method = AMD_RESET_METHOD_NONE;
5804 reset_context.reset_req_dev = adev;
5805 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5806 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5808 adev->no_hw_access = true;
5809 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5810 adev->no_hw_access = false;
5814 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5818 if (amdgpu_device_cache_pci_state(adev->pdev))
5819 pci_restore_state(adev->pdev);
5821 DRM_INFO("PCIe error recovery succeeded\n");
5823 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5824 amdgpu_device_unset_mp1_state(adev);
5825 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5828 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5832 * amdgpu_pci_resume() - resume normal ops after PCI reset
5833 * @pdev: pointer to PCI device
5835 * Called when the error recovery driver tells us that its
5836 * OK to resume normal operation.
5838 void amdgpu_pci_resume(struct pci_dev *pdev)
5840 struct drm_device *dev = pci_get_drvdata(pdev);
5841 struct amdgpu_device *adev = drm_to_adev(dev);
5845 DRM_INFO("PCI error: resume callback!!\n");
5847 /* Only continue execution for the case of pci_channel_io_frozen */
5848 if (adev->pci_channel_state != pci_channel_io_frozen)
5851 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5852 struct amdgpu_ring *ring = adev->rings[i];
5854 if (!ring || !ring->sched.thread)
5858 drm_sched_resubmit_jobs(&ring->sched);
5859 drm_sched_start(&ring->sched, true);
5862 amdgpu_device_unset_mp1_state(adev);
5863 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5866 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5868 struct drm_device *dev = pci_get_drvdata(pdev);
5869 struct amdgpu_device *adev = drm_to_adev(dev);
5872 r = pci_save_state(pdev);
5874 kfree(adev->pci_state);
5876 adev->pci_state = pci_store_saved_state(pdev);
5878 if (!adev->pci_state) {
5879 DRM_ERROR("Failed to store PCI saved state");
5883 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5890 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5892 struct drm_device *dev = pci_get_drvdata(pdev);
5893 struct amdgpu_device *adev = drm_to_adev(dev);
5896 if (!adev->pci_state)
5899 r = pci_load_saved_state(pdev, adev->pci_state);
5902 pci_restore_state(pdev);
5904 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5911 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5912 struct amdgpu_ring *ring)
5914 #ifdef CONFIG_X86_64
5915 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5918 if (adev->gmc.xgmi.connected_to_cpu)
5921 if (ring && ring->funcs->emit_hdp_flush)
5922 amdgpu_ring_emit_hdp_flush(ring);
5924 amdgpu_asic_flush_hdp(adev, ring);
5927 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5928 struct amdgpu_ring *ring)
5930 #ifdef CONFIG_X86_64
5931 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5934 if (adev->gmc.xgmi.connected_to_cpu)
5937 amdgpu_asic_invalidate_hdp(adev, ring);
5940 int amdgpu_in_reset(struct amdgpu_device *adev)
5942 return atomic_read(&adev->reset_domain->in_gpu_reset);
5946 * amdgpu_device_halt() - bring hardware to some kind of halt state
5948 * @adev: amdgpu_device pointer
5950 * Bring hardware to some kind of halt state so that no one can touch it
5951 * any more. It will help to maintain error context when error occurred.
5952 * Compare to a simple hang, the system will keep stable at least for SSH
5953 * access. Then it should be trivial to inspect the hardware state and
5954 * see what's going on. Implemented as following:
5956 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5957 * clears all CPU mappings to device, disallows remappings through page faults
5958 * 2. amdgpu_irq_disable_all() disables all interrupts
5959 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5960 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5961 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5962 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5963 * flush any in flight DMA operations
5965 void amdgpu_device_halt(struct amdgpu_device *adev)
5967 struct pci_dev *pdev = adev->pdev;
5968 struct drm_device *ddev = adev_to_drm(adev);
5970 drm_dev_unplug(ddev);
5972 amdgpu_irq_disable_all(adev);
5974 amdgpu_fence_driver_hw_fini(adev);
5976 adev->no_hw_access = true;
5978 amdgpu_device_unmap_mmio(adev);
5980 pci_disable_device(pdev);
5981 pci_wait_for_pending_transaction(pdev);
5984 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5987 unsigned long flags, address, data;
5990 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5991 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5993 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5994 WREG32(address, reg * 4);
5995 (void)RREG32(address);
5997 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6001 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6004 unsigned long flags, address, data;
6006 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6007 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6009 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6010 WREG32(address, reg * 4);
6011 (void)RREG32(address);
6014 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6018 * amdgpu_device_switch_gang - switch to a new gang
6019 * @adev: amdgpu_device pointer
6020 * @gang: the gang to switch to
6022 * Try to switch to a new gang.
6023 * Returns: NULL if we switched to the new gang or a reference to the current
6026 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6027 struct dma_fence *gang)
6029 struct dma_fence *old = NULL;
6034 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6040 if (!dma_fence_is_signaled(old))
6043 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,