Merge tag 'exynos-drm-next-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel...
authorDave Airlie <airlied@redhat.com>
Thu, 26 Aug 2021 03:23:57 +0000 (13:23 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 26 Aug 2021 03:24:10 +0000 (13:24 +1000)
Two fixups
- Fix missing unlock issue in exynos_drm_g2d.c
- Fix a build warning in exynos_drm_dma.c

One cleanup
- Replace atomic_t with refcount_t in exynos_drm_g2d.c

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Inki Dae <inki.dae@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210821172825.54720-1-inki.dae@samsung.com
159 files changed:
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/hwmgr.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/Makefile
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/firewall.c [new file with mode: 0644]
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/gem.h
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/tegra/plane.h
drivers/gpu/drm/tegra/submit.c [new file with mode: 0644]
drivers/gpu/drm/tegra/submit.h [new file with mode: 0644]
drivers/gpu/drm/tegra/uapi.c [new file with mode: 0644]
drivers/gpu/drm/tegra/uapi.h [new file with mode: 0644]
drivers/gpu/drm/tegra/vic.c
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/fence.c [new file with mode: 0644]
drivers/gpu/host1x/fence.h [new file with mode: 0644]
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/debug_hw_1x01.c
drivers/gpu/host1x/hw/debug_hw_1x06.c
drivers/gpu/host1x/hw/hw_host1x02_uclass.h
drivers/gpu/host1x/hw/hw_host1x04_uclass.h
drivers/gpu/host1x/hw/hw_host1x05_uclass.h
drivers/gpu/host1x/hw/hw_host1x06_uclass.h
drivers/gpu/host1x/hw/hw_host1x07_uclass.h
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
drivers/gpu/host1x/job.c
drivers/gpu/host1x/job.h
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
include/linux/host1x.h
include/uapi/drm/tegra_drm.h

index 0d37235..cea777a 100644 (file)
@@ -256,7 +256,6 @@ config DRM_AMDGPU
        select HWMON
        select BACKLIGHT_CLASS_DEVICE
        select INTERVAL_TREE
-       select CHASH
        help
          Choose this option if you have a recent AMD Radeon graphics card.
 
index 96e895d..0f278cc 100644 (file)
@@ -1271,6 +1271,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 
 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
 
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
 /* Common functions */
 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
index 7b46ba5..3003ee1 100644 (file)
@@ -714,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
        ret = dma_fence_wait(f, false);
 
 err_ib_sched:
-       dma_fence_put(f);
        amdgpu_job_free(job);
 err:
        return ret;
index 491acdf..960acf6 100644 (file)
@@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
        adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
+static void program_trap_handler_settings(struct kgd_dev *kgd,
+               uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                       lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                       upper_32_bits(tba_addr >> 8) |
+                       (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                       upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
        .program_sh_mem_settings = kgd_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                        get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
+       .program_trap_handler_settings = program_trap_handler_settings,
 };
index 1f5620c..dac0d75 100644 (file)
@@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
        adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
+static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
+                       uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                       lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                       upper_32_bits(tba_addr >> 8) |
+                       (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                        upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 #if 0
 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
                                uint32_t trap_debug_wave_launch_mode,
@@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
        .address_watch_get_offset = address_watch_get_offset_v10_3,
        .get_atc_vmid_pasid_mapping_info = NULL,
        .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
+       .program_trap_handler_settings = program_trap_handler_settings_v10_3,
 #if 0
        .enable_debug_trap = enable_debug_trap_v10_3,
        .disable_debug_trap = disable_debug_trap_v10_3,
index ed3014f..1542449 100644 (file)
@@ -42,7 +42,8 @@
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
        DRAIN_PIPE,
-       RESET_WAVES
+       RESET_WAVES,
+       SAVE_WAVES
 };
 
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
                                adev->gfx.cu_info.max_waves_per_simd;
 }
 
+static void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+                        uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                        lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                        upper_32_bits(tba_addr >> 8));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                       upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
        .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
                        kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
        .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+       .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
 };
index 536005b..2771288 100644 (file)
@@ -1414,7 +1414,7 @@ no_preempt:
                        continue;
                }
                job = to_amdgpu_job(s_job);
-               if (preempted && job->fence == fence)
+               if (preempted && (&job->hw_fence) == fence)
                        /* mark the job as preempted */
                        job->preemption_status |= AMDGPU_IB_PREEMPTED;
        }
index d7cc45e..41c6b3a 100644 (file)
@@ -2829,12 +2829,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
-       }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
+       WARN_ON_ONCE(adev->gfx.gfx_off_state);
+       WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+       if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+               adev->gfx.gfx_off_state = true;
 }
 
 /**
@@ -3826,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
 {
        dev_info(adev->dev, "amdgpu: finishing device.\n");
        flush_delayed_work(&adev->delayed_init_work);
-       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       if (adev->mman.initialized) {
+               flush_delayed_work(&adev->mman.bdev.wq);
+               ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       }
        adev->shutdown = true;
 
        /* make sure IB test finished before entering exclusive mode
@@ -4448,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                                 struct amdgpu_reset_context *reset_context)
 {
-       int i, r = 0;
+       int i, j, r = 0;
        struct amdgpu_job *job = NULL;
        bool need_full_reset =
                test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4472,6 +4474,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                if (!ring || !ring->sched.thread)
                        continue;
 
+               /*clear job fence from fence drv to avoid force_completion
+                *leave NULL and vm flush fence in fence drv */
+               for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
+                       struct dma_fence *old, **ptr;
+
+                       ptr = &ring->fence_drv.fences[j];
+                       old = rcu_dereference_protected(*ptr, 1);
+                       if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
+                               RCU_INIT_POINTER(*ptr, NULL);
+                       }
+               }
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
        }
index 43e7b61..ada7bc1 100644 (file)
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                  ip->major, ip->minor,
                                  ip->revision);
 
+                       if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+                               adev->vcn.num_vcn_inst++;
+
                        for (k = 0; k < num_base_address; k++) {
                                /*
                                 * convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        struct harvest_table *harvest_info;
-       int i;
+       int i, vcn_harvest_count = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 
                switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       vcn_harvest_count++;
                        break;
                case DMU_HWID:
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        break;
                }
        }
+       if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+       }
 }
 
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
index 5a143ca..cd0acbe 100644 (file)
@@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
        return 0;
 
 out:
-       if (abo) {
-
-       }
        if (fb && ret) {
                drm_gem_object_put(gobj);
                drm_framebuffer_unregister_private(fb);
index 6ed5366..14499f0 100644 (file)
@@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  *
  * @ring: ring the fence is associated with
  * @f: resulting fence object
+ * @job: job the fence is embedded in
  * @flags: flags to pass into the subordinate .emit_fence() call
  *
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
                      unsigned flags)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_fence *fence;
+       struct dma_fence *fence;
+       struct amdgpu_fence *am_fence;
        struct dma_fence __rcu **ptr;
        uint32_t seq;
        int r;
 
-       fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
-       if (fence == NULL)
-               return -ENOMEM;
+       if (job == NULL) {
+               /* create a sperate hw fence */
+               am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
+               if (am_fence == NULL)
+                       return -ENOMEM;
+               fence = &am_fence->base;
+               am_fence->ring = ring;
+       } else {
+               /* take use of job-embedded fence */
+               fence = &job->hw_fence;
+       }
 
        seq = ++ring->fence_drv.sync_seq;
-       fence->ring = ring;
-       dma_fence_init(&fence->base, &amdgpu_fence_ops,
-                      &ring->fence_drv.lock,
-                      adev->fence_context + ring->idx,
-                      seq);
+       if (job != NULL && job->job_run_counter) {
+               /* reinit seq for resubmitted jobs */
+               fence->seqno = seq;
+       } else {
+               dma_fence_init(fence, &amdgpu_fence_ops,
+                               &ring->fence_drv.lock,
+                               adev->fence_context + ring->idx,
+                               seq);
+       }
+
+       if (job != NULL) {
+               /* mark this fence has a parent job */
+               set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+       }
+
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
        pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
        /* This function can't be called concurrently anyway, otherwise
         * emitting the fence would mess up the hardware ring buffer.
         */
-       rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
+       rcu_assign_pointer(*ptr, dma_fence_get(fence));
 
-       *f = &fence->base;
+       *f = fence;
 
        return 0;
 }
@@ -621,8 +641,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 {
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       return (const char *)fence->ring->name;
+       struct amdgpu_ring *ring;
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+               ring = to_amdgpu_ring(job->base.sched);
+       } else {
+               ring = to_amdgpu_fence(f)->ring;
+       }
+       return (const char *)ring->name;
 }
 
 /**
@@ -635,13 +663,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
  */
 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 {
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       struct amdgpu_ring *ring = fence->ring;
+       struct amdgpu_ring *ring;
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+               ring = to_amdgpu_ring(job->base.sched);
+       } else {
+               ring = to_amdgpu_fence(f)->ring;
+       }
 
        if (!timer_pending(&ring->fence_drv.fallback_timer))
                amdgpu_fence_schedule_fallback(ring);
 
-       DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+       DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
 
        return true;
 }
@@ -656,8 +691,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 static void amdgpu_fence_free(struct rcu_head *rcu)
 {
        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       kmem_cache_free(amdgpu_fence_slab, fence);
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+       /* free job if fence has a parent job */
+               struct amdgpu_job *job;
+
+               job = container_of(f, struct amdgpu_job, hw_fence);
+               kfree(job);
+       } else {
+       /* free fence_slab if it's separated fence*/
+               struct amdgpu_fence *fence;
+
+               fence = to_amdgpu_fence(f);
+               kmem_cache_free(amdgpu_fence_slab, fence);
+       }
 }
 
 /**
@@ -680,6 +727,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
        .release = amdgpu_fence_release,
 };
 
+
 /*
  * Fence debugfs
  */
index a0be077..e7f06bd 100644 (file)
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       if (!enable)
-               adev->gfx.gfx_off_req_count++;
-       else if (adev->gfx.gfx_off_req_count > 0)
+       if (enable) {
+               /* If the count is already 0, it means there's an imbalance bug somewhere.
+                * Note that the bug may be in a different caller than the one which triggers the
+                * WARN_ON_ONCE.
+                */
+               if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+                       goto unlock;
+
                adev->gfx.gfx_off_req_count--;
 
-       if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
-       } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
-                       adev->gfx.gfx_off_state = false;
+               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+       } else {
+               if (adev->gfx.gfx_off_req_count == 0) {
+                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+                       if (adev->gfx.gfx_off_state &&
+                           !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+                               adev->gfx.gfx_off_state = false;
 
-                       if (adev->gfx.funcs->init_spm_golden) {
-                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
-                               amdgpu_gfx_init_spm_golden(adev);
+                               if (adev->gfx.funcs->init_spm_golden) {
+                                       dev_dbg(adev->dev,
+                                               "GFXOFF is disabled, re-init SPM golden settings\n");
+                                       amdgpu_gfx_init_spm_golden(adev);
+                               }
                        }
                }
+
+               adev->gfx.gfx_off_req_count++;
        }
 
+unlock:
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
@@ -615,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
                adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
                adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->gfx.ras_if->sub_block_index = 0;
-               strcpy(adev->gfx.ras_if->name, "gfx");
        }
        fs_info.head = ih_info.head = *adev->gfx.ras_if;
        r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
index 1d50d53..a766e1a 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
                adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
                adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->hdp.ras_if->sub_block_index = 0;
-               strcpy(adev->hdp.ras_if->name, "hdp");
        }
        ih_info.head = fs_info.head = *adev->hdp.ras_if;
        r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
index bca4ddd..82608df 100644 (file)
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
 void
 amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
 {
-       u8 val;
+       u8 val = 0;
 
        if (!amdgpu_connector->router.ddc_valid)
                return;
index ec65ab0..c076a6b 100644 (file)
@@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                                       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
        }
 
-       r = amdgpu_fence_emit(ring, f, fence_flags);
+       r = amdgpu_fence_emit(ring, f, job, fence_flags);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vmid)
index d33e6d9..de29518 100644 (file)
@@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
        struct dma_fence *f;
+       struct dma_fence *hw_fence;
        unsigned i;
 
-       /* use sched fence if available */
-       f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+       if (job->hw_fence.ops == NULL)
+               hw_fence = job->external_hw_fence;
+       else
+               hw_fence = &job->hw_fence;
 
+       /* use sched fence if available */
+       f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
        for (i = 0; i < job->num_ibs; ++i)
                amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
@@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 
        drm_sched_job_cleanup(s_job);
 
-       dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
-       kfree(job);
+
+    /* only put the hw fence if has embedded fence */
+       if (job->hw_fence.ops != NULL)
+               dma_fence_put(&job->hw_fence);
+       else
+               kfree(job);
 }
 
 void amdgpu_job_free(struct amdgpu_job *job)
 {
        amdgpu_job_free_resources(job);
-
-       dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
-       kfree(job);
+
+       /* only put the hw fence if has embedded fence */
+       if (job->hw_fence.ops != NULL)
+               dma_fence_put(&job->hw_fence);
+       else
+               kfree(job);
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 
        job->base.sched = &ring->sched;
        r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
-       job->fence = dma_fence_get(*fence);
+       /* record external_hw_fence for direct submit */
+       job->external_hw_fence = dma_fence_get(*fence);
        if (r)
                return r;
 
        amdgpu_job_free(job);
+       dma_fence_put(*fence);
+
        return 0;
 }
 
@@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
        }
-       /* if gpu reset, hw fence will be replaced here */
-       dma_fence_put(job->fence);
-       job->fence = dma_fence_get(fence);
 
+       if (!job->job_run_counter)
+               dma_fence_get(fence);
+       else if (finished->error < 0)
+               dma_fence_put(&job->hw_fence);
+       job->job_run_counter++;
        amdgpu_job_free_resources(job);
 
        fence = r ? ERR_PTR(r) : fence;
index 81caac9..9e65730 100644 (file)
@@ -46,7 +46,8 @@ struct amdgpu_job {
        struct amdgpu_sync      sync;
        struct amdgpu_sync      sched_sync;
        struct amdgpu_ib        *ibs;
-       struct dma_fence        *fence; /* the hw fence */
+       struct dma_fence        hw_fence;
+       struct dma_fence        *external_hw_fence;
        uint32_t                preamble_status;
        uint32_t                preemption_status;
        uint32_t                num_ibs;
@@ -62,6 +63,9 @@ struct amdgpu_job {
        /* user fence handling */
        uint64_t                uf_addr;
        uint64_t                uf_sequence;
+
+       /* job_run_counter >= 1 means a resubmit job */
+       uint32_t                job_run_counter;
 };
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
index 20b049a..7e45640 100644 (file)
@@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                switch (query_fw->index) {
                case TA_FW_TYPE_PSP_XGMI:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+                       fw_info->feature = adev->psp.xgmi.feature_version;
                        break;
                case TA_FW_TYPE_PSP_RAS:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_ras_ucode_version;
+                       fw_info->feature = adev->psp.ras.feature_version;
                        break;
                case TA_FW_TYPE_PSP_HDCP:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+                       fw_info->feature = adev->psp.hdcp.feature_version;
                        break;
                case TA_FW_TYPE_PSP_DTM:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_dtm_ucode_version;
+                       fw_info->feature = adev->psp.dtm.feature_version;
                        break;
                case TA_FW_TYPE_PSP_RAP:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_rap_ucode_version;
+                       fw_info->feature = adev->psp.rap.feature_version;
                        break;
                case TA_FW_TYPE_PSP_SECUREDISPLAY:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
+                       fw_info->feature = adev->psp.securedisplay.feature_version;
                        break;
                default:
                        return -EINVAL;
@@ -378,8 +378,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                fw_info->feature = adev->psp.sos.feature_version;
                break;
        case AMDGPU_INFO_FW_ASD:
-               fw_info->ver = adev->psp.asd_fw_version;
-               fw_info->feature = adev->psp.asd_feature_version;
+               fw_info->ver = adev->psp.asd.fw_version;
+               fw_info->feature = adev->psp.asd.feature_version;
                break;
        case AMDGPU_INFO_FW_DMCU:
                fw_info->ver = adev->dm.dmcu_fw_version;
index ead3dc5..24297dc 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
                adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
                adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->mmhub.ras_if->sub_block_index = 0;
-               strcpy(adev->mmhub.ras_if->name, "mmhub");
        }
        ih_info.head = fs_info.head = *adev->mmhub.ras_if;
        r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
index 6201a5f..6afb02f 100644 (file)
@@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
                adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
                adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->nbio.ras_if->sub_block_index = 0;
-               strcpy(adev->nbio.ras_if->name, "pcie_bif");
        }
        ih_info.head = fs_info.head = *adev->nbio.ras_if;
        r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
index d15eee9..7734c10 100644 (file)
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
        }
 
-       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
-        * See function amdgpu_display_supported_domains()
-        */
-       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
        if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
                uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return 0;
        }
 
+       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+        * See function amdgpu_display_supported_domains()
+        */
+       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+
        if (bo->tbo.base.import_attach)
                dma_buf_pin(bo->tbo.base.import_attach);
 
index 9dc2d6d..a78a832 100644 (file)
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_xgmi.h"
 #include "soc15_common.h"
 #include "psp_v3_1.h"
 #include "psp_v10_0.h"
@@ -799,15 +800,15 @@ static int psp_asd_load(struct psp_context *psp)
         * add workaround to bypass it for sriov now.
         * TODO: add version check to make it common
         */
-       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
+       if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
                return 0;
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
+       psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
 
        psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-                                 psp->asd_ucode_size);
+                                 psp->asd.size_bytes);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
@@ -908,9 +909,9 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->xgmi_context.xgmi_shared_bo,
-                                     &psp->xgmi_context.xgmi_shared_mc_addr,
-                                     &psp->xgmi_context.xgmi_shared_buf);
+                                     &psp->xgmi_context.context.mem_context.shared_bo,
+                                     &psp->xgmi_context.context.mem_context.shared_mc_addr,
+                                     &psp->xgmi_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -952,20 +953,20 @@ static int psp_xgmi_load(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+       psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_xgmi_ucode_size,
-                                psp->xgmi_context.xgmi_shared_mc_addr,
+                                psp->xgmi.size_bytes,
+                                psp->xgmi_context.context.mem_context.shared_mc_addr,
                                 PSP_XGMI_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->xgmi_context.initialized = 1;
-               psp->xgmi_context.session_id = cmd->resp.session_id;
+               psp->xgmi_context.context.initialized = true;
+               psp->xgmi_context.context.session_id = cmd->resp.session_id;
        }
 
        release_psp_cmd_buf(psp);
@@ -990,7 +991,7 @@ static int psp_xgmi_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
@@ -1002,41 +1003,44 @@ static int psp_xgmi_unload(struct psp_context *psp)
 
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-       return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
 }
 
 int psp_xgmi_terminate(struct psp_context *psp)
 {
        int ret;
 
-       if (!psp->xgmi_context.initialized)
+       if (!psp->xgmi_context.context.initialized)
                return 0;
 
        ret = psp_xgmi_unload(psp);
        if (ret)
                return ret;
 
-       psp->xgmi_context.initialized = 0;
+       psp->xgmi_context.context.initialized = false;
 
        /* free xgmi shared memory */
-       amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
-                       &psp->xgmi_context.xgmi_shared_mc_addr,
-                       &psp->xgmi_context.xgmi_shared_buf);
+       amdgpu_bo_free_kernel(&psp->xgmi_context.context.mem_context.shared_bo,
+                       &psp->xgmi_context.context.mem_context.shared_mc_addr,
+                       &psp->xgmi_context.context.mem_context.shared_buf);
 
        return 0;
 }
 
-int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
 {
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       if (!psp->adev->psp.ta_fw ||
-           !psp->adev->psp.ta_xgmi_ucode_size ||
-           !psp->adev->psp.ta_xgmi_start_addr)
+       if (!psp->ta_fw ||
+           !psp->xgmi.size_bytes ||
+           !psp->xgmi.start_addr)
                return -ENOENT;
 
-       if (!psp->xgmi_context.initialized) {
+       if (!load_ta)
+               goto invoke;
+
+       if (!psp->xgmi_context.context.initialized) {
                ret = psp_xgmi_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1047,9 +1051,11 @@ int psp_xgmi_initialize(struct psp_context *psp)
        if (ret)
                return ret;
 
+invoke:
        /* Initialize XGMI session */
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+       xgmi_cmd->flag_extend_link_record = set_extended_data;
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
 
        ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
@@ -1062,7 +1068,7 @@ int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
@@ -1082,7 +1088,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
@@ -1100,12 +1106,59 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
 {
        return psp->adev->asic_type == CHIP_ALDEBARAN &&
-                               psp->ta_xgmi_ucode_version >= 0x2000000b;
+                               psp->xgmi.feature_version >= 0x2000000b;
+}
+
+/*
+ * Chips that support extended topology information require the driver to
+ * reflect topology information in the opposite direction.  This is
+ * because the TA has already exceeded its link record limit and if the
+ * TA holds bi-directional information, the driver would have to do
+ * multiple fetches instead of just two.
+ */
+static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+                                       struct psp_xgmi_node_info node_info)
+{
+       struct amdgpu_device *mirror_adev;
+       struct amdgpu_hive_info *hive;
+       uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
+       uint64_t dst_node_id = node_info.node_id;
+       uint8_t dst_num_hops = node_info.num_hops;
+       uint8_t dst_num_links = node_info.num_links;
+
+       hive = amdgpu_get_xgmi_hive(psp->adev);
+       list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+               struct psp_xgmi_topology_info *mirror_top_info;
+               int j;
+
+               if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
+                       continue;
+
+               mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
+               for (j = 0; j < mirror_top_info->num_nodes; j++) {
+                       if (mirror_top_info->nodes[j].node_id != src_node_id)
+                               continue;
+
+                       mirror_top_info->nodes[j].num_hops = dst_num_hops;
+                       /*
+                        * prevent 0 num_links value re-reflection since reflection
+                        * criteria is based on num_hops (direct or indirect).
+                        *
+                        */
+                       if (dst_num_links)
+                               mirror_top_info->nodes[j].num_links = dst_num_links;
+
+                       break;
+               }
+
+               break;
+       }
 }
 
 int psp_xgmi_get_topology_info(struct psp_context *psp,
                               int number_devices,
-                              struct psp_xgmi_topology_info *topology)
+                              struct psp_xgmi_topology_info *topology,
+                              bool get_extended_data)
 {
        struct ta_xgmi_shared_memory *xgmi_cmd;
        struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
@@ -1116,8 +1169,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
        if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
                return -EINVAL;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+       xgmi_cmd->flag_extend_link_record = get_extended_data;
 
        /* Fill in the shared memory with topology information as input */
        topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1140,10 +1194,19 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
        topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
        topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
        for (i = 0; i < topology->num_nodes; i++) {
-               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
-               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
-               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
-               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+               /* extended data will either be 0 or equal to non-extended data */
+               if (topology_info_output->nodes[i].num_hops)
+                       topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+
+               /* non-extended data gets everything here so no need to update */
+               if (!get_extended_data) {
+                       topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+                       topology->nodes[i].is_sharing_enabled =
+                                       topology_info_output->nodes[i].is_sharing_enabled;
+                       topology->nodes[i].sdma_engine =
+                                       topology_info_output->nodes[i].sdma_engine;
+               }
+
        }
 
        /* Invoke xgmi ta again to get the link information */
@@ -1158,9 +1221,18 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
                        return ret;
 
                link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
-               for (i = 0; i < topology->num_nodes; i++)
-                       topology->nodes[i].num_links =
+               for (i = 0; i < topology->num_nodes; i++) {
+                       /* accumulate num_links on extended data */
+                       topology->nodes[i].num_links = get_extended_data ?
+                                       topology->nodes[i].num_links +
+                                                       link_info_output->nodes[i].num_links :
                                        link_info_output->nodes[i].num_links;
+
+                       /* reflect the topology information for bi-directionality */
+                       if (psp->xgmi_context.supports_extended_data &&
+                                       get_extended_data && topology->nodes[i].num_hops)
+                               psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
+               }
        }
 
        return 0;
@@ -1177,7 +1249,7 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
        if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
                return -EINVAL;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1206,9 +1278,9 @@ static int psp_ras_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                       &psp->ras.ras_shared_bo,
-                       &psp->ras.ras_shared_mc_addr,
-                       &psp->ras.ras_shared_buf);
+                       &psp->ras_context.context.mem_context.shared_bo,
+                       &psp->ras_context.context.mem_context.shared_mc_addr,
+                       &psp->ras_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1225,9 +1297,9 @@ static int psp_ras_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+       psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
 
        if (psp->adev->gmc.xgmi.connected_to_cpu)
                ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
@@ -1238,18 +1310,18 @@ static int psp_ras_load(struct psp_context *psp)
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_ras_ucode_size,
-                                psp->ras.ras_shared_mc_addr,
+                                psp->ras.size_bytes,
+                                psp->ras_context.context.mem_context.shared_mc_addr,
                                 PSP_RAS_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->ras.session_id = cmd->resp.session_id;
+               psp->ras_context.context.session_id = cmd->resp.session_id;
 
                if (!ras_cmd->ras_status)
-                       psp->ras.ras_initialized = true;
+                       psp->ras_context.context.initialized = true;
                else
                        dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
        }
@@ -1275,7 +1347,7 @@ static int psp_ras_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
@@ -1290,7 +1362,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
 
        /*
         * TODO: bypass the loading in sriov for now
@@ -1298,7 +1370,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
 
        if (amdgpu_ras_intr_triggered())
                return ret;
@@ -1354,10 +1426,10 @@ int psp_ras_enable_features(struct psp_context *psp,
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return -EINVAL;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
        memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
 
        if (enable)
@@ -1384,19 +1456,19 @@ static int psp_ras_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return 0;
 
        ret = psp_ras_unload(psp);
        if (ret)
                return ret;
 
-       psp->ras.ras_initialized = false;
+       psp->ras_context.context.initialized = false;
 
        /* free ras shared memory */
-       amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
-                       &psp->ras.ras_shared_mc_addr,
-                       &psp->ras.ras_shared_buf);
+       amdgpu_bo_free_kernel(&psp->ras_context.context.mem_context.shared_bo,
+                       &psp->ras_context.context.mem_context.shared_mc_addr,
+                       &psp->ras_context.context.mem_context.shared_buf);
 
        return 0;
 }
@@ -1413,8 +1485,8 @@ static int psp_ras_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       if (!adev->psp.ta_ras_ucode_size ||
-           !adev->psp.ta_ras_start_addr) {
+       if (!adev->psp.ras.size_bytes ||
+           !adev->psp.ras.start_addr) {
                dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
                return 0;
        }
@@ -1460,7 +1532,7 @@ static int psp_ras_initialize(struct psp_context *psp)
                }
        }
 
-       if (!psp->ras.ras_initialized) {
+       if (!psp->ras_context.context.initialized) {
                ret = psp_ras_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1479,10 +1551,10 @@ int psp_ras_trigger_error(struct psp_context *psp,
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return -EINVAL;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
        memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
 
        ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
@@ -1512,9 +1584,9 @@ static int psp_hdcp_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->hdcp_context.hdcp_shared_bo,
-                                     &psp->hdcp_context.hdcp_shared_mc_addr,
-                                     &psp->hdcp_context.hdcp_shared_buf);
+                                     &psp->hdcp_context.context.mem_context.shared_bo,
+                                     &psp->hdcp_context.context.mem_context.shared_mc_addr,
+                                     &psp->hdcp_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1530,22 +1602,22 @@ static int psp_hdcp_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_hdcp_start_addr,
-                   psp->ta_hdcp_ucode_size);
+       psp_copy_fw(psp, psp->hdcp.start_addr,
+                   psp->hdcp.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_hdcp_ucode_size,
-                                psp->hdcp_context.hdcp_shared_mc_addr,
+                                psp->hdcp.size_bytes,
+                                psp->hdcp_context.context.mem_context.shared_mc_addr,
                                 PSP_HDCP_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->hdcp_context.hdcp_initialized = true;
-               psp->hdcp_context.session_id = cmd->resp.session_id;
+               psp->hdcp_context.context.initialized = true;
+               psp->hdcp_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->hdcp_context.mutex);
        }
 
@@ -1563,13 +1635,13 @@ static int psp_hdcp_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_hdcp_ucode_size ||
-           !psp->adev->psp.ta_hdcp_start_addr) {
+       if (!psp->hdcp.size_bytes ||
+           !psp->hdcp.start_addr) {
                dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                ret = psp_hdcp_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1595,7 +1667,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1612,7 +1684,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
 }
 
 static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1625,8 +1697,8 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->hdcp_context.hdcp_initialized) {
-               if (psp->hdcp_context.hdcp_shared_buf)
+       if (!psp->hdcp_context.context.initialized) {
+               if (psp->hdcp_context.context.mem_context.shared_buf)
                        goto out;
                else
                        return 0;
@@ -1636,13 +1708,13 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (ret)
                return ret;
 
-       psp->hdcp_context.hdcp_initialized = false;
+       psp->hdcp_context.context.initialized = false;
 
 out:
        /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
-                             &psp->hdcp_context.hdcp_shared_mc_addr,
-                             &psp->hdcp_context.hdcp_shared_buf);
+       amdgpu_bo_free_kernel(&psp->hdcp_context.context.mem_context.shared_bo,
+                             &psp->hdcp_context.context.mem_context.shared_mc_addr,
+                             &psp->hdcp_context.context.mem_context.shared_buf);
 
        return 0;
 }
@@ -1659,9 +1731,9 @@ static int psp_dtm_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->dtm_context.dtm_shared_bo,
-                                     &psp->dtm_context.dtm_shared_mc_addr,
-                                     &psp->dtm_context.dtm_shared_buf);
+                                     &psp->dtm_context.context.mem_context.shared_bo,
+                                     &psp->dtm_context.context.mem_context.shared_mc_addr,
+                                     &psp->dtm_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1677,21 +1749,21 @@ static int psp_dtm_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+       psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_dtm_ucode_size,
-                                psp->dtm_context.dtm_shared_mc_addr,
+                                psp->dtm.size_bytes,
+                                psp->dtm_context.context.mem_context.shared_mc_addr,
                                 PSP_DTM_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->dtm_context.dtm_initialized = true;
-               psp->dtm_context.session_id = cmd->resp.session_id;
+               psp->dtm_context.context.initialized = true;
+               psp->dtm_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->dtm_context.mutex);
        }
 
@@ -1710,13 +1782,13 @@ static int psp_dtm_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_dtm_ucode_size ||
-           !psp->adev->psp.ta_dtm_start_addr) {
+       if (!psp->dtm.size_bytes ||
+           !psp->dtm.start_addr) {
                dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.context.initialized) {
                ret = psp_dtm_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1742,7 +1814,7 @@ static int psp_dtm_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1759,7 +1831,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
 }
 
 static int psp_dtm_terminate(struct psp_context *psp)
@@ -1772,8 +1844,8 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->dtm_context.dtm_initialized) {
-               if (psp->dtm_context.dtm_shared_buf)
+       if (!psp->dtm_context.context.initialized) {
+               if (psp->dtm_context.context.mem_context.shared_buf)
                        goto out;
                else
                        return 0;
@@ -1783,13 +1855,13 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (ret)
                return ret;
 
-       psp->dtm_context.dtm_initialized = false;
+       psp->dtm_context.context.initialized = false;
 
 out:
        /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
-                             &psp->dtm_context.dtm_shared_mc_addr,
-                             &psp->dtm_context.dtm_shared_buf);
+       amdgpu_bo_free_kernel(&psp->dtm_context.context.mem_context.shared_bo,
+                             &psp->dtm_context.context.mem_context.shared_mc_addr,
+                             &psp->dtm_context.context.mem_context.shared_buf);
 
        return 0;
 }
@@ -1806,9 +1878,9 @@ static int psp_rap_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->rap_context.rap_shared_bo,
-                                     &psp->rap_context.rap_shared_mc_addr,
-                                     &psp->rap_context.rap_shared_buf);
+                                     &psp->rap_context.context.mem_context.shared_bo,
+                                     &psp->rap_context.context.mem_context.shared_mc_addr,
+                                     &psp->rap_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1818,21 +1890,21 @@ static int psp_rap_load(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd;
 
-       psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+       psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_rap_ucode_size,
-                                psp->rap_context.rap_shared_mc_addr,
+                                psp->rap.size_bytes,
+                                psp->rap_context.context.mem_context.shared_mc_addr,
                                 PSP_RAP_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->rap_context.rap_initialized = true;
-               psp->rap_context.session_id = cmd->resp.session_id;
+               psp->rap_context.context.initialized = true;
+               psp->rap_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->rap_context.mutex);
        }
 
@@ -1846,7 +1918,7 @@ static int psp_rap_unload(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1866,13 +1938,13 @@ static int psp_rap_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_rap_ucode_size ||
-           !psp->adev->psp.ta_rap_start_addr) {
+       if (!psp->rap.size_bytes ||
+           !psp->rap.start_addr) {
                dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->rap_context.rap_initialized) {
+       if (!psp->rap_context.context.initialized) {
                ret = psp_rap_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1886,11 +1958,11 @@ static int psp_rap_initialize(struct psp_context *psp)
        if (ret || status != TA_RAP_STATUS__SUCCESS) {
                psp_rap_unload(psp);
 
-               amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
-                             &psp->rap_context.rap_shared_mc_addr,
-                             &psp->rap_context.rap_shared_buf);
+               amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
+                             &psp->rap_context.context.mem_context.shared_mc_addr,
+                             &psp->rap_context.context.mem_context.shared_buf);
 
-               psp->rap_context.rap_initialized = false;
+               psp->rap_context.context.initialized = false;
 
                dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
                         ret, status);
@@ -1905,17 +1977,17 @@ static int psp_rap_terminate(struct psp_context *psp)
 {
        int ret;
 
-       if (!psp->rap_context.rap_initialized)
+       if (!psp->rap_context.context.initialized)
                return 0;
 
        ret = psp_rap_unload(psp);
 
-       psp->rap_context.rap_initialized = false;
+       psp->rap_context.context.initialized = false;
 
        /* free rap shared memory */
-       amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
-                             &psp->rap_context.rap_shared_mc_addr,
-                             &psp->rap_context.rap_shared_buf);
+       amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
+                             &psp->rap_context.context.mem_context.shared_mc_addr,
+                             &psp->rap_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1925,7 +1997,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
        struct ta_rap_shared_memory *rap_cmd;
        int ret = 0;
 
-       if (!psp->rap_context.rap_initialized)
+       if (!psp->rap_context.context.initialized)
                return 0;
 
        if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
@@ -1935,13 +2007,13 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
        mutex_lock(&psp->rap_context.mutex);
 
        rap_cmd = (struct ta_rap_shared_memory *)
-                 psp->rap_context.rap_shared_buf;
+                 psp->rap_context.context.mem_context.shared_buf;
        memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
 
        rap_cmd->cmd_id = ta_cmd_id;
        rap_cmd->validation_method_id = METHOD_A;
 
-       ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
+       ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
        if (ret)
                goto out_unlock;
 
@@ -1966,9 +2038,9 @@ static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
         */
        ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->securedisplay_context.securedisplay_shared_bo,
-                                     &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                                     &psp->securedisplay_context.securedisplay_shared_buf);
+                                     &psp->securedisplay_context.context.mem_context.shared_bo,
+                                     &psp->securedisplay_context.context.mem_context.shared_mc_addr,
+                                     &psp->securedisplay_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -1979,19 +2051,19 @@ static int psp_securedisplay_load(struct psp_context *psp)
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-       memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+       memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_securedisplay_ucode_size,
-                                psp->securedisplay_context.securedisplay_shared_mc_addr,
+                                psp->securedisplay.size_bytes,
+                                psp->securedisplay_context.context.mem_context.shared_mc_addr,
                                 PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->securedisplay_context.securedisplay_initialized = true;
-               psp->securedisplay_context.session_id = cmd->resp.session_id;
+               psp->securedisplay_context.context.initialized = true;
+               psp->securedisplay_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->securedisplay_context.mutex);
        }
 
@@ -2005,7 +2077,7 @@ static int psp_securedisplay_unload(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -2025,13 +2097,13 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_securedisplay_ucode_size ||
-           !psp->adev->psp.ta_securedisplay_start_addr) {
+       if (!psp->securedisplay.size_bytes ||
+           !psp->securedisplay.start_addr) {
                dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->securedisplay_context.securedisplay_initialized) {
+       if (!psp->securedisplay_context.context.initialized) {
                ret = psp_securedisplay_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -2048,11 +2120,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
        if (ret) {
                psp_securedisplay_unload(psp);
 
-               amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
-                             &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                             &psp->securedisplay_context.securedisplay_shared_buf);
+               amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
+                             &psp->securedisplay_context.context.mem_context.shared_mc_addr,
+                             &psp->securedisplay_context.context.mem_context.shared_buf);
 
-               psp->securedisplay_context.securedisplay_initialized = false;
+               psp->securedisplay_context.context.initialized = false;
 
                dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
                return -EINVAL;
@@ -2077,19 +2149,19 @@ static int psp_securedisplay_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->securedisplay_context.securedisplay_initialized)
+       if (!psp->securedisplay_context.context.initialized)
                return 0;
 
        ret = psp_securedisplay_unload(psp);
        if (ret)
                return ret;
 
-       psp->securedisplay_context.securedisplay_initialized = false;
+       psp->securedisplay_context.context.initialized = false;
 
        /* free securedisplay shared memory */
-       amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
-                             &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                             &psp->securedisplay_context.securedisplay_shared_buf);
+       amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
+                             &psp->securedisplay_context.context.mem_context.shared_mc_addr,
+                             &psp->securedisplay_context.context.mem_context.shared_buf);
 
        return ret;
 }
@@ -2098,7 +2170,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
        int ret;
 
-       if (!psp->securedisplay_context.securedisplay_initialized)
+       if (!psp->securedisplay_context.context.initialized)
                return -EINVAL;
 
        if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
@@ -2107,7 +2179,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 
        mutex_lock(&psp->securedisplay_context.mutex);
 
-       ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
 
        mutex_unlock(&psp->securedisplay_context.mutex);
 
@@ -2420,7 +2492,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
        struct amdgpu_firmware_info *ucode =
                        &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
-       struct amdgpu_ras *ras = psp->ras.ras;
+       struct amdgpu_ras *ras = psp->ras_context.ras;
 
        if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
                return 0;
@@ -2625,7 +2697,7 @@ skip_memalloc:
                return ret;
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
@@ -2697,7 +2769,7 @@ static int psp_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                psp_ras_terminate(psp);
                psp_securedisplay_terminate(psp);
                psp_rap_terminate(psp);
@@ -2727,7 +2799,7 @@ static int psp_suspend(void *handle)
        struct psp_context *psp = &adev->psp;
 
        if (adev->gmc.xgmi.num_physical_nodes > 1 &&
-           psp->xgmi_context.initialized == 1) {
+           psp->xgmi_context.context.initialized) {
                ret = psp_xgmi_terminate(psp);
                if (ret) {
                        DRM_ERROR("Failed to terminate xgmi ta\n");
@@ -2735,7 +2807,7 @@ static int psp_suspend(void *handle)
                }
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_terminate(psp);
                if (ret) {
                        DRM_ERROR("Failed to terminate ras ta\n");
@@ -2817,7 +2889,7 @@ static int psp_resume(void *handle)
        }
 
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
-               ret = psp_xgmi_initialize(psp);
+               ret = psp_xgmi_initialize(psp, false, true);
                /* Warning the XGMI seesion initialize failure
                 * Instead of stop driver initialization
                 */
@@ -2826,7 +2898,7 @@ static int psp_resume(void *handle)
                                "XGMI: Failed to initialize XGMI session\n");
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
@@ -2978,10 +3050,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
                goto out;
 
        asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+       adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+       adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+       adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+       adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
                                le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
        return 0;
 out:
@@ -3123,6 +3195,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
                adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
                adev->psp.sos.start_addr = ucode_array_start_addr +
                                le32_to_cpu(sos_hdr->sos.offset_bytes);
+               adev->psp.xgmi_context.supports_extended_data = false;
        } else {
                /* Load alternate PSP SOS FW */
                sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
@@ -3137,6 +3210,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
                adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
                adev->psp.sos.start_addr = ucode_array_start_addr +
                        le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
+               adev->psp.xgmi_context.supports_extended_data = true;
        }
 
        if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
@@ -3266,40 +3340,40 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
 
        switch (desc->fw_type) {
        case TA_FW_TYPE_PSP_ASD:
-               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
-               psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
-               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
-               psp->asd_start_addr        = ucode_start_addr;
+               psp->asd.fw_version        = le32_to_cpu(desc->fw_version);
+               psp->asd.feature_version   = le32_to_cpu(desc->fw_version);
+               psp->asd.size_bytes        = le32_to_cpu(desc->size_bytes);
+               psp->asd.start_addr        = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_XGMI:
-               psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
-               psp->ta_xgmi_ucode_size    = le32_to_cpu(desc->size_bytes);
-               psp->ta_xgmi_start_addr    = ucode_start_addr;
+               psp->xgmi.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->xgmi.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->xgmi.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_RAS:
-               psp->ta_ras_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_ras_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_ras_start_addr     = ucode_start_addr;
+               psp->ras.feature_version   = le32_to_cpu(desc->fw_version);
+               psp->ras.size_bytes        = le32_to_cpu(desc->size_bytes);
+               psp->ras.start_addr        = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_HDCP:
-               psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
-               psp->ta_hdcp_ucode_size    = le32_to_cpu(desc->size_bytes);
-               psp->ta_hdcp_start_addr    = ucode_start_addr;
+               psp->hdcp.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->hdcp.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->hdcp.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_DTM:
-               psp->ta_dtm_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_dtm_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_dtm_start_addr     = ucode_start_addr;
+               psp->dtm.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->dtm.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->dtm.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_RAP:
-               psp->ta_rap_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_rap_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_rap_start_addr     = ucode_start_addr;
+               psp->rap.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->rap.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->rap.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_SECUREDISPLAY:
-               psp->ta_securedisplay_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_securedisplay_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_securedisplay_start_addr     = ucode_start_addr;
+               psp->securedisplay.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->securedisplay.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->securedisplay.start_addr       = ucode_start_addr;
                break;
        default:
                dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
index 6b16455..8ef2d28 100644 (file)
@@ -136,59 +136,32 @@ struct psp_asd_context {
        uint32_t                session_id;
 };
 
-struct psp_xgmi_context {
-       uint8_t                         initialized;
-       uint32_t                        session_id;
-       struct amdgpu_bo                *xgmi_shared_bo;
-       uint64_t                        xgmi_shared_mc_addr;
-       void                            *xgmi_shared_buf;
-       struct psp_xgmi_topology_info   top_info;
+struct ta_mem_context {
+       struct amdgpu_bo                *shared_bo;
+       uint64_t                shared_mc_addr;
+       void                    *shared_buf;
 };
 
-struct psp_ras_context {
-       /*ras fw*/
-       bool                    ras_initialized;
+struct ta_context {
+       bool                    initialized;
        uint32_t                session_id;
-       struct amdgpu_bo        *ras_shared_bo;
-       uint64_t                ras_shared_mc_addr;
-       void                    *ras_shared_buf;
-       struct amdgpu_ras       *ras;
+       struct ta_mem_context   mem_context;
 };
 
-struct psp_hdcp_context {
-       bool                    hdcp_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *hdcp_shared_bo;
-       uint64_t                hdcp_shared_mc_addr;
-       void                    *hdcp_shared_buf;
-       struct mutex            mutex;
-};
-
-struct psp_dtm_context {
-       bool                    dtm_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *dtm_shared_bo;
-       uint64_t                dtm_shared_mc_addr;
-       void                    *dtm_shared_buf;
-       struct mutex            mutex;
+struct ta_cp_context {
+       struct ta_context               context;
+       struct mutex                    mutex;
 };
 
-struct psp_rap_context {
-       bool                    rap_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *rap_shared_bo;
-       uint64_t                rap_shared_mc_addr;
-       void                    *rap_shared_buf;
-       struct mutex            mutex;
+struct psp_xgmi_context {
+       struct ta_context               context;
+       struct psp_xgmi_topology_info   top_info;
+       bool                            supports_extended_data;
 };
 
-struct psp_securedisplay_context {
-       bool                    securedisplay_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *securedisplay_shared_bo;
-       uint64_t                securedisplay_shared_mc_addr;
-       void                    *securedisplay_shared_buf;
-       struct mutex            mutex;
+struct psp_ras_context {
+       struct ta_context               context;
+       struct amdgpu_ras               *ras;
 };
 
 #define MEM_TRAIN_SYSTEM_SIGNATURE             0x54534942
@@ -327,11 +300,8 @@ struct psp_context
        uint64_t                        tmr_mc_addr;
 
        /* asd firmware */
-       const struct firmware           *asd_fw;
-       uint32_t                        asd_fw_version;
-       uint32_t                        asd_feature_version;
-       uint32_t                        asd_ucode_size;
-       uint8_t                         *asd_start_addr;
+       const struct firmware   *asd_fw;
+       struct psp_bin_desc             asd;
 
        /* toc firmware */
        const struct firmware           *toc_fw;
@@ -356,36 +326,20 @@ struct psp_context
        /* xgmi ta firmware and buffer */
        const struct firmware           *ta_fw;
        uint32_t                        ta_fw_version;
-       uint32_t                        ta_xgmi_ucode_version;
-       uint32_t                        ta_xgmi_ucode_size;
-       uint8_t                         *ta_xgmi_start_addr;
-       uint32_t                        ta_ras_ucode_version;
-       uint32_t                        ta_ras_ucode_size;
-       uint8_t                         *ta_ras_start_addr;
-
-       uint32_t                        ta_hdcp_ucode_version;
-       uint32_t                        ta_hdcp_ucode_size;
-       uint8_t                         *ta_hdcp_start_addr;
-
-       uint32_t                        ta_dtm_ucode_version;
-       uint32_t                        ta_dtm_ucode_size;
-       uint8_t                         *ta_dtm_start_addr;
-
-       uint32_t                        ta_rap_ucode_version;
-       uint32_t                        ta_rap_ucode_size;
-       uint8_t                         *ta_rap_start_addr;
-
-       uint32_t                        ta_securedisplay_ucode_version;
-       uint32_t                        ta_securedisplay_ucode_size;
-       uint8_t                         *ta_securedisplay_start_addr;
+       struct psp_bin_desc             xgmi;
+       struct psp_bin_desc             ras;
+       struct psp_bin_desc             hdcp;
+       struct psp_bin_desc             dtm;
+       struct psp_bin_desc             rap;
+       struct psp_bin_desc             securedisplay;
 
        struct psp_asd_context          asd_context;
        struct psp_xgmi_context         xgmi_context;
-       struct psp_ras_context          ras;
-       struct psp_hdcp_context         hdcp_context;
-       struct psp_dtm_context          dtm_context;
-       struct psp_rap_context          rap_context;
-       struct psp_securedisplay_context        securedisplay_context;
+       struct psp_ras_context          ras_context;
+       struct ta_cp_context            hdcp_context;
+       struct ta_cp_context            dtm_context;
+       struct ta_cp_context            rap_context;
+       struct ta_cp_context            securedisplay_context;
        struct mutex                    mutex;
        struct psp_memory_training_context mem_train_ctx;
 
@@ -452,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
                        uint64_t cmd_gpu_addr, int cmd_size);
 
-int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
 int psp_xgmi_terminate(struct psp_context *psp);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
 int psp_xgmi_get_topology_info(struct psp_context *psp,
                               int number_devices,
-                              struct psp_xgmi_topology_info *topology);
+                              struct psp_xgmi_topology_info *topology,
+                              bool get_extended_data);
 int psp_xgmi_set_topology_info(struct psp_context *psp,
                               int number_devices,
                               struct psp_xgmi_topology_info *topology);
index 51909bf..12010c9 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
                        dev_info(adev->dev, "RAP L0 validate test success.\n");
                } else {
                        rap_shared_mem = (struct ta_rap_shared_memory *)
-                                        adev->psp.rap_context.rap_shared_buf;
+                                        adev->psp.rap_context.context.mem_context.shared_buf;
                        rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
 
                        dev_info(adev->dev, "RAP test failed, the output is:\n");
@@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DEBUG_FS)
        struct drm_minor *minor = adev_to_drm(adev)->primary;
 
-       if (!adev->psp.rap_context.rap_initialized)
+       if (!adev->psp.rap_context.context.initialized)
                return;
 
        debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
index 194f7cc..96a8fd0 100644 (file)
@@ -64,7 +64,6 @@ const char *ras_block_string[] = {
 };
 
 #define ras_err_str(i) (ras_error_string[ffs(i)])
-#define ras_block_str(i) (ras_block_string[i])
 
 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
 
@@ -530,7 +529,7 @@ static inline void put_obj(struct ras_manager *obj)
        if (obj && (--obj->use == 0))
                list_del(&obj->node);
        if (obj && (obj->use < 0))
-               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
 }
 
 /* make one obj and return it. */
@@ -793,7 +792,6 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
                        .type = default_ras_type,
                        .sub_block_index = 0,
                };
-               strcpy(head.name, ras_block_str(i));
                if (bypass) {
                        /*
                         * bypass psp. vbios enable ras for us.
@@ -1866,7 +1864,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
 {
        struct amdgpu_ras_eeprom_control *control =
-               &adev->psp.ras.ras->eeprom_control;
+               &adev->psp.ras_context.ras->eeprom_control;
        struct eeprom_table_record *bps;
        int ret;
 
index 4d9c63f..abc5710 100644 (file)
@@ -53,6 +53,9 @@ enum amdgpu_ras_block {
        AMDGPU_RAS_BLOCK__LAST
 };
 
+extern const char *ras_block_string[];
+
+#define ras_block_str(i) (ras_block_string[i])
 #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
 #define AMDGPU_RAS_BLOCK_MASK  ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
 
@@ -306,8 +309,6 @@ struct ras_common_if {
        enum amdgpu_ras_block block;
        enum amdgpu_ras_error_type type;
        uint32_t sub_block_index;
-       /* block name */
-       char name[32];
 };
 
 struct amdgpu_ras {
@@ -470,8 +471,8 @@ struct ras_debug_if {
  * 8: feature disable
  */
 
-#define amdgpu_ras_get_context(adev)           ((adev)->psp.ras.ras)
-#define amdgpu_ras_set_context(adev, ras_con)  ((adev)->psp.ras.ras = (ras_con))
+#define amdgpu_ras_get_context(adev)           ((adev)->psp.ras_context.ras)
+#define amdgpu_ras_set_context(adev, ras_con)  ((adev)->psp.ras_context.ras = (ras_con))
 
 /* check if ras is supported on block, say, sdma, gfx */
 static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
index 9c11ced..e713d31 100644 (file)
@@ -48,6 +48,9 @@
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
+/* fence flag bit to indicate the face is embedded in job*/
+#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT             (DMA_FENCE_FLAG_USER_BITS + 1)
+
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
 #define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
@@ -118,7 +121,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
                      unsigned flags);
 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
                              uint32_t timeout);
index de91d29..65debb6 100644 (file)
@@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
                adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
                adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->sdma.ras_if->sub_block_index = 0;
-               strcpy(adev->sdma.ras_if->name, "sdma");
        }
        fs_info.head = ih_info->head = *adev->sdma.ras_if;
 
index 1234539..cc7597a 100644 (file)
@@ -80,7 +80,7 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
 void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
        enum ta_securedisplay_command command_id)
 {
-       *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+       *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
        memset(*cmd, 0, sizeof(struct securedisplay_cmd));
        (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
        (*cmd)->cmd_id = command_id;
@@ -170,7 +170,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 
-       if (!adev->psp.securedisplay_context.securedisplay_initialized)
+       if (!adev->psp.securedisplay_context.context.initialized)
                return;
 
        debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
index 5fdecea..abd8469 100644 (file)
@@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
 FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
-FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
 FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
index e2e2624..7c2538d 100644 (file)
@@ -136,21 +136,11 @@ struct psp_firmware_header_v2_0 {
 /* version_major=1, version_minor=0 */
 struct ta_firmware_header_v1_0 {
        struct common_firmware_header header;
-       uint32_t ta_xgmi_ucode_version;
-       uint32_t ta_xgmi_offset_bytes;
-       uint32_t ta_xgmi_size_bytes;
-       uint32_t ta_ras_ucode_version;
-       uint32_t ta_ras_offset_bytes;
-       uint32_t ta_ras_size_bytes;
-       uint32_t ta_hdcp_ucode_version;
-       uint32_t ta_hdcp_offset_bytes;
-       uint32_t ta_hdcp_size_bytes;
-       uint32_t ta_dtm_ucode_version;
-       uint32_t ta_dtm_offset_bytes;
-       uint32_t ta_dtm_size_bytes;
-       uint32_t ta_securedisplay_ucode_version;
-       uint32_t ta_securedisplay_offset_bytes;
-       uint32_t ta_securedisplay_size_bytes;
+       struct psp_fw_legacy_bin_desc xgmi;
+       struct psp_fw_legacy_bin_desc ras;
+       struct psp_fw_legacy_bin_desc hdcp;
+       struct psp_fw_legacy_bin_desc dtm;
+       struct psp_fw_legacy_bin_desc securedisplay;
 };
 
 enum ta_fw_type {
index 0c7c56a..a90029e 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
                adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
                adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->umc.ras_if->sub_block_index = 0;
-               strcpy(adev->umc.ras_if->name, "umc");
        }
        ih_info.head = fs_info.head = *adev->umc.ras_if;
 
index 12a7cc2..ca058fb 100644 (file)
@@ -532,9 +532,9 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd_fw_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ta_ras_ucode_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.ta_xgmi_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ras.feature_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.xgmi.feature_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
index 2a88ed5..2af8860 100644 (file)
@@ -1218,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
        if (vm_flush_needed || pasid_mapping_needed) {
-               r = amdgpu_fence_emit(ring, &fence, 0);
+               r = amdgpu_fence_emit(ring, &fence, NULL, 0);
                if (r)
                        return r;
        }
index 258cf86..dda4f0c 100644 (file)
@@ -498,6 +498,32 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
        return  -EINVAL;
 }
 
+/*
+ * Devices that support extended data require the entire hive to initialize with
+ * the shared memory buffer flag set.
+ *
+ * Hive locks and conditions apply - see amdgpu_xgmi_add_device
+ */
+static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
+                                                       bool set_extended_data)
+{
+       struct amdgpu_device *tmp_adev;
+       int ret;
+
+       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+               ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
+               if (ret) {
+                       dev_err(tmp_adev->dev,
+                               "XGMI: Failed to initialize xgmi session for data partition %i\n",
+                               set_extended_data);
+                       return ret;
+               }
+
+       }
+
+       return 0;
+}
+
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
        struct psp_xgmi_topology_info *top_info;
@@ -512,7 +538,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
        if (!adev->gmc.xgmi.pending_reset &&
            amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
-               ret = psp_xgmi_initialize(&adev->psp);
+               ret = psp_xgmi_initialize(&adev->psp, false, true);
                if (ret) {
                        dev_err(adev->dev,
                                "XGMI: Failed to initialize xgmi session\n");
@@ -575,7 +601,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                /* get latest topology info for each device from psp */
                list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
                        ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
-                                       &tmp_adev->psp.xgmi_context.top_info);
+                                       &tmp_adev->psp.xgmi_context.top_info, false);
                        if (ret) {
                                dev_err(tmp_adev->dev,
                                        "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -585,6 +611,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                                goto exit_unlock;
                        }
                }
+
+               /* get topology again for hives that support extended data */
+               if (adev->psp.xgmi_context.supports_extended_data) {
+
+                       /* initialize the hive to get extended data.  */
+                       ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
+                       if (ret)
+                               goto exit_unlock;
+
+                       /* get the extended data. */
+                       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+                               ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+                                               &tmp_adev->psp.xgmi_context.top_info, true);
+                               if (ret) {
+                                       dev_err(tmp_adev->dev,
+                                               "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
+                                               tmp_adev->gmc.xgmi.node_id,
+                                               tmp_adev->gmc.xgmi.hive_id, ret);
+                                       goto exit_unlock;
+                               }
+                       }
+
+                       /* initialize the hive to get non-extended data for the next round. */
+                       ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
+                       if (ret)
+                               goto exit_unlock;
+
+               }
        }
 
        if (!ret && !adev->gmc.xgmi.pending_reset)
@@ -663,7 +717,6 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
                adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
                adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->gmc.xgmi.ras_if->sub_block_index = 0;
-               strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
        }
        ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
        r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
index 1769c4c..00a2b36 100644 (file)
@@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
 };
 
-/**
+/*
  * This shader is used to clear VGPRS and LDS, and also write the input
  * pattern into the write back buffer, which will be used by driver to
  * check whether all SIMDs have been covered.
@@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
        { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
-/**
+/*
  * The below shaders are used to clear SGPRS, and also write the input
  * pattern into the write back buffer. The first two dispatch should be
  * scheduled simultaneously which make sure that all SGPRS could be
@@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
        { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
-/**
+/*
  * This shader is used to clear the uninitiated sgprs after the above
  * two dispatches, because of hardware feature, dispath 0 couldn't clear
  * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
index 8fca72e..497b86c 100644 (file)
@@ -75,9 +75,8 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
                max_physical_node_id     = 7;
                break;
        case CHIP_ALDEBARAN:
-               /* just using duplicates for Aldebaran support, revisit later */
-               max_num_physical_nodes   = 8;
-               max_physical_node_id     = 7;
+               max_num_physical_nodes   = 16;
+               max_physical_node_id     = 15;
                break;
        default:
                return -EINVAL;
index 2095863..2cdab80 100644 (file)
@@ -24,9 +24,7 @@
 #ifndef __MMSCH_V1_0_H__
 #define __MMSCH_V1_0_H__
 
-#define MMSCH_VERSION_MAJOR    1
-#define MMSCH_VERSION_MINOR    0
-#define MMSCH_VERSION  (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+#define MMSCH_VERSION  0x1
 
 enum mmsch_v1_0_command_type {
        MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
index 9f7aac4..a35e6d8 100644 (file)
@@ -96,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
 
 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 {
-       int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+       int r;
+       uint64_t timeout, now;
+
+       now = (uint64_t)ktime_to_ms(ktime_get());
+       timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
 
        do {
                r = xgpu_nv_mailbox_rcv_msg(adev, event);
@@ -104,8 +108,8 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
                        return 0;
 
                msleep(10);
-               timeout -= 10;
-       } while (timeout > 1);
+               now = (uint64_t)ktime_to_ms(ktime_get());
+       } while (timeout > now);
 
 
        return -ETIME;
@@ -149,9 +153,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
                                        enum idh_request req)
 {
-       int r;
+       int r, retry = 1;
        enum idh_event event = -1;
 
+send_request:
        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 
        switch (req) {
@@ -170,6 +175,9 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
        if (event != -1) {
                r = xgpu_nv_poll_msg(adev, event);
                if (r) {
+                       if (retry++ < 2)
+                               goto send_request;
+
                        if (req != IDH_REQ_GPU_INIT_DATA) {
                                pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
                                return r;
@@ -279,6 +287,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
        amdgpu_virt_fini_data_exchange(adev);
        atomic_set(&adev->in_gpu_reset, 1);
 
+       xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
        do {
                if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
                        goto flr_done;
index 9f58086..73887b0 100644 (file)
@@ -37,7 +37,8 @@ enum idh_request {
        IDH_REQ_GPU_RESET_ACCESS,
        IDH_REQ_GPU_INIT_DATA,
 
-       IDH_LOG_VF_ERROR       = 200,
+       IDH_LOG_VF_ERROR        = 200,
+       IDH_READY_TO_RESET      = 201,
 };
 
 enum idh_event {
index cef9297..1c94a14 100644 (file)
@@ -372,13 +372,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
                                                "errors detected in %s block, "
                                                "no user action is needed.\n",
                                                obj->err_data.ce_count,
-                                               adev->nbio.ras_if->name);
+                                               ras_block_str(adev->nbio.ras_if->block));
 
                        if (err_data.ue_count)
                                dev_info(adev->dev, "%ld uncorrectable hardware "
                                                "errors detected in %s block\n",
                                                obj->err_data.ue_count,
-                                               adev->nbio.ras_if->name);
+                                               ras_block_str(adev->nbio.ras_if->block));
                }
 
                dev_info(adev->dev, "RAS controller interrupt triggered "
index 4b1cc5e..5872d68 100644 (file)
@@ -84,29 +84,29 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
-               adev->psp.ta_hdcp_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-               adev->psp.ta_hdcp_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-               adev->psp.ta_hdcp_start_addr =
+               adev->psp.hdcp.feature_version =
+                       le32_to_cpu(ta_hdr->hdcp.fw_version);
+               adev->psp.hdcp.size_bytes =
+                       le32_to_cpu(ta_hdr->hdcp.size_bytes);
+               adev->psp.hdcp.start_addr =
                        (uint8_t *)ta_hdr +
                        le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
-               adev->psp.ta_dtm_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-               adev->psp.ta_dtm_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-               adev->psp.ta_dtm_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
-
-               adev->psp.ta_securedisplay_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
-               adev->psp.ta_securedisplay_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
-               adev->psp.ta_securedisplay_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+               adev->psp.dtm.feature_version =
+                       le32_to_cpu(ta_hdr->dtm.fw_version);
+               adev->psp.dtm.size_bytes =
+                       le32_to_cpu(ta_hdr->dtm.size_bytes);
+               adev->psp.dtm.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+               adev->psp.securedisplay.feature_version =
+                       le32_to_cpu(ta_hdr->securedisplay.fw_version);
+               adev->psp.securedisplay.size_bytes =
+                       le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+               adev->psp.securedisplay.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
 
                adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
        }
index 8862684..29bf9f0 100644 (file)
@@ -151,15 +151,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                                goto out2;
 
                        ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
-                       adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
-                       adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
-                       adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+                       adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
+                       adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
+                       adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
                                le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
                        adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-                       adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
-                       adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
-                       adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
-                               le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
+                       adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
+                       adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
+                       adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
+                               le32_to_cpu(ta_hdr->ras.offset_bytes);
                }
                break;
        case CHIP_NAVI10:
@@ -186,17 +186,17 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                                goto out2;
 
                        ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
-                       adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-                       adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-                       adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+                       adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
+                       adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
+                       adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
                                le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
                        adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 
-                       adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-                       adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-                       adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                               le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+                       adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
+                       adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
+                       adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
+                               le32_to_cpu(ta_hdr->dtm.offset_bytes);
                }
                break;
        case CHIP_SIENNA_CICHLID:
index 0c908d4..cc64940 100644 (file)
@@ -84,23 +84,23 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
-               adev->psp.ta_hdcp_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-               adev->psp.ta_hdcp_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-               adev->psp.ta_hdcp_start_addr =
+               adev->psp.hdcp.feature_version =
+                       le32_to_cpu(ta_hdr->hdcp.fw_version);
+               adev->psp.hdcp.size_bytes =
+                       le32_to_cpu(ta_hdr->hdcp.size_bytes);
+               adev->psp.hdcp.start_addr =
                        (uint8_t *)ta_hdr +
                        le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
                adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 
-               adev->psp.ta_dtm_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-               adev->psp.ta_dtm_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-               adev->psp.ta_dtm_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+               adev->psp.dtm.feature_version =
+                       le32_to_cpu(ta_hdr->dtm.fw_version);
+               adev->psp.dtm.size_bytes =
+                       le32_to_cpu(ta_hdr->dtm.size_bytes);
+               adev->psp.dtm.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->dtm.offset_bytes);
        }
 
        return 0;
index cce7127..da815a9 100644 (file)
@@ -134,7 +134,8 @@ struct ta_xgmi_shared_memory {
        uint32_t                        cmd_id;
        uint32_t                        resp_id;
        enum ta_xgmi_status             xgmi_status;
-       uint32_t                        reserved;
+       uint8_t                         flag_extend_link_record;
+       uint8_t                         reserved0[3];
        union ta_xgmi_cmd_input         xgmi_in_message;
        union ta_xgmi_cmd_output        xgmi_out_message;
 };
index fe9a7cc..42a35d9 100644 (file)
@@ -904,7 +904,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
        case CHIP_POLARIS11:
        case CHIP_POLARIS12:
        case CHIP_TOPAZ:
-               return amdgpu_dpm_is_baco_supported(adev);
+               /* Disable BACO support for the specific polaris12 SKU temporarily */
+               if ((adev->pdev->device == 0x699F) &&
+                    (adev->pdev->revision == 0xC7) &&
+                    (adev->pdev->subsystem_vendor == 0x1028) &&
+                    (adev->pdev->subsystem_device == 0x0039))
+                       return false;
+               else
+                       return amdgpu_dpm_is_baco_supported(adev);
        default:
                return false;
        }
index a972ef5..f8fce9d 100644 (file)
@@ -211,6 +211,15 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
        WARN_ON(!old);
 }
 
+static void program_trap_handler_settings(struct device_queue_manager *dqm,
+                               struct qcm_process_device *qpd)
+{
+       if (dqm->dev->kfd2kgd->program_trap_handler_settings)
+               dqm->dev->kfd2kgd->program_trap_handler_settings(
+                                               dqm->dev->kgd, qpd->vmid,
+                                               qpd->tba_addr, qpd->tma_addr);
+}
+
 static int allocate_vmid(struct device_queue_manager *dqm,
                        struct qcm_process_device *qpd,
                        struct queue *q)
@@ -241,6 +250,10 @@ static int allocate_vmid(struct device_queue_manager *dqm,
 
        program_sh_mem_settings(dqm, qpd);
 
+       if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
+           dqm->dev->cwsr_enabled)
+               program_trap_handler_settings(dqm, qpd);
+
        /* qpd->page_table_base is set earlier when register_process()
         * is called, i.e. when the first queue is created.
         */
@@ -582,7 +595,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                }
 
                retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
-                               KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+                               (dqm->dev->cwsr_enabled?
+                                KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+                               KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
                                KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
                if (retval) {
                        pr_err("destroy mqd failed\n");
@@ -675,7 +690,9 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                        continue;
 
                retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
-                               KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+                               (dqm->dev->cwsr_enabled?
+                                KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+                               KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
                                KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
                if (retval && !ret)
                        /* Return the first error, but keep going to
index 7df69b7..ecc390c 100644 (file)
@@ -2675,22 +2675,26 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
        return 0;
 }
 
-/* svm_range_best_prefetch_location - decide the best prefetch location
+/**
+ * svm_range_best_prefetch_location - decide the best prefetch location
  * @prange: svm range structure
  *
  * For xnack off:
- * If range map to single GPU, the best acutal location is prefetch loc, which
+ * If range map to single GPU, the best prefetch location is prefetch_loc, which
  * can be CPU or GPU.
  *
- * If range map to multiple GPUs, only if mGPU connection on xgmi same hive,
- * the best actual location could be prefetch_loc GPU. If mGPU connection on
- * PCIe, the best actual location is always CPU, because GPU cannot access vram
- * of other GPUs, assuming PCIe small bar (large bar support is not upstream).
+ * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
+ * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
+ * the best prefetch location is always CPU, because GPU can not have coherent
+ * mapping VRAM of other GPUs even with large-BAR PCIe connection.
  *
  * For xnack on:
- * The best actual location is prefetch location. If mGPU connection on xgmi
- * same hive, range map to multiple GPUs. Otherwise, the range only map to
- * actual location GPU. Other GPU access vm fault will trigger migration.
+ * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
+ * prefetch_loc, other GPU access will generate vm fault and trigger migration.
+ *
+ * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
+ * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
+ * prefetch location is always CPU.
  *
  * Context: Process context
  *
@@ -2710,11 +2714,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
 
        p = container_of(prange->svms, struct kfd_process, svms);
 
-       /* xnack on */
-       if (p->xnack_enabled)
-               goto out;
-
-       /* xnack off */
        if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
                goto out;
 
@@ -2724,8 +2723,12 @@ svm_range_best_prefetch_location(struct svm_range *prange)
                best_loc = 0;
                goto out;
        }
-       bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
-                 MAX_GPU_INSTANCE);
+
+       if (p->xnack_enabled)
+               bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
+       else
+               bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
+                         MAX_GPU_INSTANCE);
 
        for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
                pdd = kfd_process_device_from_gpuidx(p, gpuidx);
@@ -3027,6 +3030,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
                 start + size - 1, nattr);
 
+       /* Flush pending deferred work to avoid racing with deferred actions from
+        * previous memory map changes (e.g. munmap). Concurrent memory map changes
+        * can still race with get_attr because we don't hold the mmap lock. But that
+        * would be a race condition in the application anyway, and undefined
+        * behaviour is acceptable in that case.
+        */
+       flush_work(&p->svms.deferred_list_work);
+
        mmap_read_lock(mm);
        if (!svm_range_is_valid(mm, start, size)) {
                pr_debug("invalid range\n");
index 3e28f17..8167236 100644 (file)
@@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 }
 #endif
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-static void event_mall_stutter(struct work_struct *work)
+static void vblank_control_worker(struct work_struct *work)
 {
-
-       struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
+       struct vblank_control_work *vblank_work =
+               container_of(work, struct vblank_control_work, work);
        struct amdgpu_display_manager *dm = vblank_work->dm;
 
        mutex_lock(&dm->dc_lock);
@@ -1061,23 +1061,25 @@ static void event_mall_stutter(struct work_struct *work)
 
        DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 
-       mutex_unlock(&dm->dc_lock);
-}
-
-static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
-{
-       struct vblank_workqueue *vblank_work;
-
-       vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
-       if (ZERO_OR_NULL_PTR(vblank_work)) {
-               kfree(vblank_work);
-               return NULL;
+       /* Control PSR based on vblank requirements from OS */
+       if (vblank_work->stream && vblank_work->stream->link) {
+               if (vblank_work->enable) {
+                       if (vblank_work->stream->link->psr_settings.psr_allow_active)
+                               amdgpu_dm_psr_disable(vblank_work->stream);
+               } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
+                          !vblank_work->stream->link->psr_settings.psr_allow_active &&
+                          vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+                       amdgpu_dm_psr_enable(vblank_work->stream);
+               }
        }
 
-       INIT_WORK(&vblank_work->mall_work, event_mall_stutter);
+       mutex_unlock(&dm->dc_lock);
+
+       dc_stream_release(vblank_work->stream);
 
-       return vblank_work;
+       kfree(vblank_work);
 }
+
 #endif
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
@@ -1220,12 +1222,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if (adev->dm.dc->caps.max_links > 0) {
-               adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
-
-               if (!adev->dm.vblank_workqueue)
+               adev->dm.vblank_control_workqueue =
+                       create_singlethread_workqueue("dm_vblank_control_workqueue");
+               if (!adev->dm.vblank_control_workqueue)
                        DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
-               else
-                       DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
        }
 #endif
 
@@ -1298,6 +1298,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 {
        int i;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       if (adev->dm.vblank_control_workqueue) {
+               destroy_workqueue(adev->dm.vblank_control_workqueue);
+               adev->dm.vblank_control_workqueue = NULL;
+       }
+#endif
+
        for (i = 0; i < adev->dm.display_indexes_num; i++) {
                drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
        }
@@ -1321,14 +1328,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
                dc_deinit_callbacks(adev->dm.dc);
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (adev->dm.vblank_workqueue) {
-               adev->dm.vblank_workqueue->dm = NULL;
-               kfree(adev->dm.vblank_workqueue);
-               adev->dm.vblank_workqueue = NULL;
-       }
-#endif
-
        dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 
        if (dc_enable_dmub_notifications(adev->dm.dc)) {
@@ -6000,7 +5999,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct amdgpu_display_manager *dm = &adev->dm;
-       unsigned long flags;
+       struct vblank_control_work *work;
 #endif
        int rc = 0;
 
@@ -6025,12 +6024,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
                return 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       spin_lock_irqsave(&dm->vblank_lock, flags);
-       dm->vblank_workqueue->dm = dm;
-       dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
-       dm->vblank_workqueue->enable = enable;
-       spin_unlock_irqrestore(&dm->vblank_lock, flags);
-       schedule_work(&dm->vblank_workqueue->mall_work);
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+               return -ENOMEM;
+
+       INIT_WORK(&work->work, vblank_control_worker);
+       work->dm = dm;
+       work->acrtc = acrtc;
+       work->enable = enable;
+
+       if (acrtc_state->stream) {
+               dc_stream_retain(acrtc_state->stream);
+               work->stream = acrtc_state->stream;
+       }
+
+       queue_work(dm->vblank_control_workqueue, &work->work);
 #endif
 
        return 0;
@@ -8635,6 +8643,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        /* Update the planes if changed or disable if we don't have any. */
        if ((planes_count || acrtc_state->active_planes == 0) &&
                acrtc_state->stream) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               /*
+                * If PSR or idle optimizations are enabled then flush out
+                * any pending work before hardware programming.
+                */
+               flush_workqueue(dm->vblank_control_workqueue);
+#endif
+
                bundle->stream_update.stream = acrtc_state->stream;
                if (new_pcrtc_state->mode_changed) {
                        bundle->stream_update.src = acrtc_state->stream->src;
@@ -8703,16 +8719,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
                                !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
-               else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
-                               !acrtc_state->stream->link->psr_settings.psr_allow_active) {
-                       struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
-                                       acrtc_state->stream->dm_stream_context;
+
+               /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+               if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+                   acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+                       struct amdgpu_dm_connector *aconn =
+                               (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
 
                        if (aconn->psr_skip_count > 0)
                                aconn->psr_skip_count--;
-                       else
-                               amdgpu_dm_psr_enable(acrtc_state->stream);
+
+                       /* Allow PSR when skip count is 0. */
+                       acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+               } else {
+                       acrtc_attach->dm_irq_params.allow_psr_entry = false;
                }
 
                mutex_unlock(&dm->dc_lock);
@@ -8961,8 +8981,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
        if (dc_state) {
                /* if there mode set or reset, disable eDP PSR */
-               if (mode_set_reset_required)
+               if (mode_set_reset_required) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+                       flush_workqueue(dm->vblank_control_workqueue);
+#endif
                        amdgpu_dm_psr_disable_all(dm);
+               }
 
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);
index ab1670b..d1d353a 100644 (file)
@@ -60,6 +60,7 @@ enum aux_return_code_type;
 
 /* Forward declarations */
 struct amdgpu_device;
+struct amdgpu_crtc;
 struct drm_device;
 struct dc;
 struct amdgpu_bo;
@@ -86,16 +87,18 @@ struct dm_compressor_info {
 };
 
 /**
- * struct vblank_workqueue - Works to be executed in a separate thread during vblank
- * @mall_work: work for mall stutter
+ * struct vblank_control_work - Work data for vblank control
+ * @work: Kernel work data for the work event
  * @dm: amdgpu display manager device
- * @otg_inst: otg instance of which vblank is being set
- * @enable: true if enable vblank
+ * @acrtc: amdgpu CRTC instance for which the event has occurred
+ * @stream: DC stream for which the event has occurred
+ * @enable: true if enabling vblank
  */
-struct vblank_workqueue {
-       struct work_struct mall_work;
+struct vblank_control_work {
+       struct work_struct work;
        struct amdgpu_display_manager *dm;
-       int otg_inst;
+       struct amdgpu_crtc *acrtc;
+       struct dc_stream_state *stream;
        bool enable;
 };
 
@@ -380,11 +383,11 @@ struct amdgpu_display_manager {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        /**
-        * @vblank_workqueue:
+        * @vblank_control_workqueue:
         *
-        * amdgpu workqueue during vblank
+        * Deferred work for vblank control events.
         */
-       struct vblank_workqueue *vblank_workqueue;
+       struct workqueue_struct *vblank_control_workqueue;
 #endif
 
        struct drm_atomic_state *cached_state;
index 8e39e92..c5f1dc3 100644 (file)
@@ -79,12 +79,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
 
        struct ta_hdcp_shared_memory *hdcp_cmd;
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
                return NULL;
        }
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
@@ -105,12 +105,12 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
 
        struct ta_hdcp_shared_memory *hdcp_cmd;
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
                return -EINVAL;
        }
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
@@ -414,12 +414,12 @@ static bool enable_assr(void *handle, struct dc_link *link)
        struct ta_dtm_shared_memory *dtm_cmd;
        bool res = true;
 
-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.context.initialized) {
                DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
                return false;
        }
 
-       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
 
        mutex_lock(&psp->dtm_context.mutex);
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
index 40f617b..4aba0e8 100644 (file)
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
                handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
 
                /*allocate a new amdgpu_dm_irq_handler_data*/
-               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+               handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
                if (!handler_data_add) {
                        DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
                        return;
index f3b93ba..79b5f99 100644 (file)
@@ -33,6 +33,7 @@ struct dm_irq_params {
        struct mod_vrr_params vrr_params;
        struct dc_stream_state *stream;
        int active_planes;
+       bool allow_psr_entry;
        struct mod_freesync_config freesync_config;
 
 #ifdef CONFIG_DEBUG_FS
index 5568d4e..1bcba69 100644 (file)
@@ -213,6 +213,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
                        drm_connector_update_edid_property(
                                &aconnector->base,
                                NULL);
+
+                       DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
+                       if (!aconnector->dc_sink) {
+                               struct dc_sink *dc_sink;
+                               struct dc_sink_init_data init_params = {
+                                       .link = aconnector->dc_link,
+                                       .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+                               dc_sink = dc_link_add_remote_sink(
+                                       aconnector->dc_link,
+                                       NULL,
+                                       0,
+                                       &init_params);
+
+                               if (!dc_sink) {
+                                       DRM_ERROR("Unable to add a remote sink\n");
+                                       return 0;
+                               }
+
+                               dc_sink->priv = aconnector;
+                               aconnector->dc_sink = dc_sink;
+                       }
+
                        return ret;
                }
 
index 605e297..c798c65 100644 (file)
@@ -1481,6 +1481,22 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
        return true;
 }
 
+static inline bool should_update_pipe_for_stream(
+               struct dc_state *context,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_stream_state *stream)
+{
+       return (pipe_ctx->stream && pipe_ctx->stream == stream);
+}
+
+static inline bool should_update_pipe_for_plane(
+               struct dc_state *context,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_plane_state *plane_state)
+{
+       return (pipe_ctx->plane_state == plane_state);
+}
+
 void dc_enable_stereo(
        struct dc *dc,
        struct dc_state *context,
@@ -1491,12 +1507,15 @@ void dc_enable_stereo(
        struct pipe_ctx *pipe;
 
        for (i = 0; i < MAX_PIPES; i++) {
-               if (context != NULL)
+               if (context != NULL) {
                        pipe = &context->res_ctx.pipe_ctx[i];
-               else
+               } else {
+                       context = dc->current_state;
                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-               for (j = 0 ; pipe && j < stream_count; j++)  {
-                       if (streams[j] && streams[j] == pipe->stream &&
+               }
+
+               for (j = 0; pipe && j < stream_count; j++)  {
+                       if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
                                dc->hwss.setup_stereo)
                                dc->hwss.setup_stereo(pipe, dc);
                }
@@ -1530,6 +1549,12 @@ void dc_z10_restore(struct dc *dc)
        if (dc->hwss.z10_restore)
                dc->hwss.z10_restore(dc);
 }
+
+void dc_z10_save_init(struct dc *dc)
+{
+       if (dc->hwss.z10_save_init)
+               dc->hwss.z10_save_init(dc);
+}
 #endif
 /*
  * Applies given context to HW and copy it into current context.
@@ -2623,6 +2648,7 @@ static void commit_planes_for_stream(struct dc *dc,
 {
        int i, j;
        struct pipe_ctx *top_pipe_to_program = NULL;
+       bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        dc_z10_restore(dc);
@@ -2694,7 +2720,7 @@ static void commit_planes_for_stream(struct dc *dc,
                                                top_pipe_to_program->stream_res.tg);
                }
 
-       if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
                dc->hwss.interdependent_update_lock(dc, context, true);
        else
                /* Lock the top pipe while updating plane addrs, since freesync requires
@@ -2717,7 +2743,7 @@ static void commit_planes_for_stream(struct dc *dc,
                if (dc->hwss.program_front_end_for_ctx)
                        dc->hwss.program_front_end_for_ctx(dc, context);
 
-               if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+               if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
                        dc->hwss.interdependent_update_lock(dc, context, false);
                else
                        dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2733,14 +2759,14 @@ static void commit_planes_for_stream(struct dc *dc,
                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
                                if (!pipe_ctx->plane_state)
                                        continue;
-                               if (pipe_ctx->plane_state != plane_state)
+                               if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
                                        continue;
-                               plane_state->triplebuffer_flips = false;
+                               pipe_ctx->plane_state->triplebuffer_flips = false;
                                if (update_type == UPDATE_TYPE_FAST &&
                                        dc->hwss.program_triplebuffer != NULL &&
-                                       !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+                                       !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
                                                /*triple buffer for VUpdate  only*/
-                                               plane_state->triplebuffer_flips = true;
+                                               pipe_ctx->plane_state->triplebuffer_flips = true;
                                }
                        }
                        if (update_type == UPDATE_TYPE_FULL) {
@@ -2756,8 +2782,7 @@ static void commit_planes_for_stream(struct dc *dc,
 
                if (!pipe_ctx->top_pipe &&
                        !pipe_ctx->prev_odm_pipe &&
-                       pipe_ctx->stream &&
-                       pipe_ctx->stream == stream) {
+                       should_update_pipe_for_stream(context, pipe_ctx, stream)) {
                        struct dc_stream_status *stream_status = NULL;
 
                        if (!pipe_ctx->plane_state)
@@ -2810,15 +2835,15 @@ static void commit_planes_for_stream(struct dc *dc,
                                for (j = 0; j < dc->res_pool->pipe_count; j++) {
                                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
-                                       if (pipe_ctx->stream != stream)
+                                       if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
                                                continue;
 
-                                       if (pipe_ctx->plane_state != plane_state)
+                                       if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
                                                continue;
 
                                        // GSL has to be used for flip immediate
                                        dc->hwss.set_flip_control_gsl(pipe_ctx,
-                                                       plane_state->flip_immediate);
+                                                       pipe_ctx->plane_state->flip_immediate);
                                }
                        }
 
@@ -2829,25 +2854,26 @@ static void commit_planes_for_stream(struct dc *dc,
                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
-                               if (pipe_ctx->stream != stream)
+                               if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
                                        continue;
 
-                               if (pipe_ctx->plane_state != plane_state)
+                               if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
                                        continue;
+
                                /*program triple buffer after lock based on flip type*/
                                if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
                                        /*only enable triplebuffer for  fast_update*/
                                        dc->hwss.program_triplebuffer(
-                                               dc, pipe_ctx, plane_state->triplebuffer_flips);
+                                               dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
                                }
-                               if (srf_updates[i].flip_addr)
+                               if (pipe_ctx->plane_state->update_flags.bits.addr_update)
                                        dc->hwss.update_plane_addr(dc, pipe_ctx);
                        }
                }
 
        }
 
-       if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
                dc->hwss.interdependent_update_lock(dc, context, false);
        else
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2891,7 +2917,7 @@ static void commit_planes_for_stream(struct dc *dc,
                        continue;
 
                if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
-                               !pipe_ctx->stream || pipe_ctx->stream != stream ||
+                               !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
                                !pipe_ctx->plane_state->update_flags.bits.addr_update ||
                                pipe_ctx->plane_state->skip_manual_trigger)
                        continue;
index 327fd19..f0f54f4 100644 (file)
@@ -246,6 +246,40 @@ struct dc_stream_status *dc_stream_get_status(
        return dc_stream_get_status_from_state(dc->current_state, stream);
 }
 
+static void program_cursor_attributes(
+       struct dc *dc,
+       struct dc_stream_state *stream,
+       const struct dc_cursor_attributes *attributes)
+{
+       int i;
+       struct resource_context *res_ctx;
+       struct pipe_ctx *pipe_to_program = NULL;
+
+       if (!stream)
+               return;
+
+       res_ctx = &dc->current_state->res_ctx;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+               if (pipe_ctx->stream != stream)
+                       continue;
+
+               if (!pipe_to_program) {
+                       pipe_to_program = pipe_ctx;
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
+               }
+
+               dc->hwss.set_cursor_attribute(pipe_ctx);
+               if (dc->hwss.set_cursor_sdr_white_level)
+                       dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+       }
+
+       if (pipe_to_program)
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
+}
+
 #ifndef TRIM_FSFT
 /*
  * dc_optimize_timing_for_fsft() - dc to optimize timing
@@ -270,10 +304,7 @@ bool dc_stream_set_cursor_attributes(
        struct dc_stream_state *stream,
        const struct dc_cursor_attributes *attributes)
 {
-       int i;
        struct dc  *dc;
-       struct resource_context *res_ctx;
-       struct pipe_ctx *pipe_to_program = NULL;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool reset_idle_optimizations = false;
 #endif
@@ -293,7 +324,6 @@ bool dc_stream_set_cursor_attributes(
        }
 
        dc = stream->ctx->dc;
-       res_ctx = &dc->current_state->res_ctx;
        stream->cursor_attributes = *attributes;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -305,11 +335,39 @@ bool dc_stream_set_cursor_attributes(
        }
 
 #endif
+       program_cursor_attributes(dc, stream, attributes);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /* re-enable idle optimizations if necessary */
+       if (reset_idle_optimizations)
+               dc_allow_idle_optimizations(dc, true);
+
+#endif
+       return true;
+}
+
+static void program_cursor_position(
+       struct dc *dc,
+       struct dc_stream_state *stream,
+       const struct dc_cursor_position *position)
+{
+       int i;
+       struct resource_context *res_ctx;
+       struct pipe_ctx *pipe_to_program = NULL;
+
+       if (!stream)
+               return;
+
+       res_ctx = &dc->current_state->res_ctx;
 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-               if (pipe_ctx->stream != stream)
+               if (pipe_ctx->stream != stream ||
+                               (!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
+                               !pipe_ctx->plane_state ||
+                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+                               (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
                        continue;
 
                if (!pipe_to_program) {
@@ -317,31 +375,18 @@ bool dc_stream_set_cursor_attributes(
                        dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
-               dc->hwss.set_cursor_attribute(pipe_ctx);
-               if (dc->hwss.set_cursor_sdr_white_level)
-                       dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+               dc->hwss.set_cursor_position(pipe_ctx);
        }
 
        if (pipe_to_program)
                dc->hwss.cursor_lock(dc, pipe_to_program, false);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       /* re-enable idle optimizations if necessary */
-       if (reset_idle_optimizations)
-               dc_allow_idle_optimizations(dc, true);
-
-#endif
-       return true;
 }
 
 bool dc_stream_set_cursor_position(
        struct dc_stream_state *stream,
        const struct dc_cursor_position *position)
 {
-       int i;
        struct dc  *dc;
-       struct resource_context *res_ctx;
-       struct pipe_ctx *pipe_to_program = NULL;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool reset_idle_optimizations = false;
 #endif
@@ -357,7 +402,6 @@ bool dc_stream_set_cursor_position(
        }
 
        dc = stream->ctx->dc;
-       res_ctx = &dc->current_state->res_ctx;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        dc_z10_restore(dc);
 
@@ -370,27 +414,7 @@ bool dc_stream_set_cursor_position(
 #endif
        stream->cursor_position = *position;
 
-       for (i = 0; i < MAX_PIPES; i++) {
-               struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
-               if (pipe_ctx->stream != stream ||
-                               (!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
-                               !pipe_ctx->plane_state ||
-                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
-                               (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
-                       continue;
-
-               if (!pipe_to_program) {
-                       pipe_to_program = pipe_ctx;
-                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
-               }
-
-               dc->hwss.set_cursor_position(pipe_ctx);
-       }
-
-       if (pipe_to_program)
-               dc->hwss.cursor_lock(dc, pipe_to_program, false);
-
+       program_cursor_position(dc, stream, position);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        /* re-enable idle optimizations if necessary */
        if (reset_idle_optimizations)
index f2b39ec..cde8ed2 100644 (file)
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
                 */
                memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
                dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               dc_z10_save_init(dc);
+#endif
        }
 
        return num_vmids;
index 62c222d..3ab52d9 100644 (file)
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.147"
+#define DC_VER "3.2.149"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
 #endif
 
 bool dc_enable_dmub_notifications(struct dc *dc);
index 058a935..e14f99b 100644 (file)
 #define DC_LOGGER \
        engine->ctx->logger
 
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
+#define LOG_FLAG_Error_I2cAux LOG_ERROR
+#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
+
 #include "reg_helper.h"
 
 #undef FN
@@ -623,6 +628,58 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
 #define AUX_MAX_INVALID_REPLY_RETRIES 2
 #define AUX_MAX_TIMEOUT_RETRIES 3
 
+static void dce_aux_log_payload(const char *payload_name,
+       unsigned char *payload, uint32_t length, uint32_t max_length_to_log)
+{
+       if (!IS_DC_I2CAUX_LOGGING_ENABLED())
+               return;
+
+       if (payload && length) {
+               char hex_str[128] = {0};
+               char *hex_str_ptr = &hex_str[0];
+               uint32_t hex_str_remaining = sizeof(hex_str);
+               unsigned char *payload_ptr = payload;
+               unsigned char *payload_max_to_log_ptr = payload_ptr + min(max_length_to_log, length);
+               unsigned int count;
+               char *padding = "";
+
+               while (payload_ptr < payload_max_to_log_ptr) {
+                       count = snprintf_count(hex_str_ptr, hex_str_remaining, "%s%02X", padding, *payload_ptr);
+                       padding = " ";
+                       hex_str_remaining -= count;
+                       hex_str_ptr += count;
+                       payload_ptr++;
+               }
+
+               count = snprintf_count(hex_str_ptr, hex_str_remaining, "   ");
+               hex_str_remaining -= count;
+               hex_str_ptr += count;
+
+               payload_ptr = payload;
+               while (payload_ptr < payload_max_to_log_ptr) {
+                       count = snprintf_count(hex_str_ptr, hex_str_remaining, "%c",
+                               *payload_ptr >= ' ' ? *payload_ptr : '.');
+                       hex_str_remaining -= count;
+                       hex_str_ptr += count;
+                       payload_ptr++;
+               }
+
+               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
+                                       LOG_FLAG_I2cAux_DceAux,
+                                       "dce_aux_log_payload: %s: length=%u: data: %s%s",
+                                       payload_name,
+                                       length,
+                                       hex_str,
+                                       (length > max_length_to_log ? " (...)" : " "));
+       } else {
+               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
+                                       LOG_FLAG_I2cAux_DceAux,
+                                       "dce_aux_log_payload: %s: length=%u: data: <empty payload>",
+                                       payload_name,
+                                       length);
+       }
+}
+
 bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                struct aux_payload *payload)
 {
@@ -648,7 +705,34 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
        }
 
        for (i = 0; i < AUX_MAX_RETRIES; i++) {
+               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                       LOG_FLAG_I2cAux_DceAux,
+                                       "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+                                       ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
+                                       i + 1,
+                                       (int)AUX_MAX_RETRIES,
+                                       payload->address,
+                                       payload->length,
+                                       (unsigned int) payload->write,
+                                       (unsigned int) payload->mot);
+               if (payload->write)
+                       dce_aux_log_payload("  write", payload->data, payload->length, 16);
                ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                       LOG_FLAG_I2cAux_DceAux,
+                                       "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+                                       ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
+                                       i + 1,
+                                       (int)AUX_MAX_RETRIES,
+                                       payload->address,
+                                       payload->length,
+                                       (unsigned int) payload->write,
+                                       (unsigned int) payload->mot,
+                                       ret,
+                                       (int)operation_result,
+                                       (unsigned int) *payload->reply);
+               if (!payload->write)
+                       dce_aux_log_payload("  read", payload->data, ret > 0 ? ret : 0, 16);
 
                switch (operation_result) {
                case AUX_RET_SUCCESS:
@@ -657,30 +741,64 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 
                        switch (*payload->reply) {
                        case AUX_TRANSACTION_REPLY_AUX_ACK:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                       LOG_FLAG_I2cAux_DceAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_ACK");
                                if (!payload->write && payload->length != ret) {
-                                       if (++aux_ack_retries >= AUX_MAX_RETRIES)
+                                       if (++aux_ack_retries >= AUX_MAX_RETRIES) {
+                                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                                       LOG_FLAG_Error_I2cAux,
+                                                                       "dce_aux_transfer_with_retries: FAILURE: aux_ack_retries=%d >= AUX_MAX_RETRIES=%d",
+                                                                       aux_defer_retries,
+                                                                       AUX_MAX_RETRIES);
                                                goto fail;
-                                       else
+                                       } else {
                                                udelay(300);
+                                       }
                                } else
                                        return true;
                        break;
 
                        case AUX_TRANSACTION_REPLY_AUX_DEFER:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                       LOG_FLAG_I2cAux_DceAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
+
                                /* polling_timeout_period is in us */
                                defer_time_in_ms += aux110->polling_timeout_period / 1000;
                                ++aux_defer_retries;
                                fallthrough;
                        case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+                               if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                               LOG_FLAG_I2cAux_DceAux,
+                                                               "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
+
                                retry_on_defer = true;
                                fallthrough;
                        case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+                               if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                               LOG_FLAG_I2cAux_DceAux,
+                                                               "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
+
                                if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
                                                && defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                               LOG_FLAG_Error_I2cAux,
+                                                               "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d && defer_time_in_ms=%d >= AUX_MAX_DEFER_TIMEOUT_MS=%d",
+                                                               aux_defer_retries,
+                                                               AUX_MIN_DEFER_RETRIES,
+                                                               defer_time_in_ms,
+                                                               AUX_MAX_DEFER_TIMEOUT_MS);
                                        goto fail;
                                } else {
                                        if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
                                                (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+                                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                                       LOG_FLAG_I2cAux_DceAux,
+                                                                       "dce_aux_transfer_with_retries: payload->defer_delay=%u",
+                                                                       payload->defer_delay);
                                                if (payload->defer_delay > 1) {
                                                        msleep(payload->defer_delay);
                                                        defer_time_in_ms += payload->defer_delay;
@@ -693,37 +811,86 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                                break;
 
                        case AUX_TRANSACTION_REPLY_I2C_DEFER:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                       LOG_FLAG_I2cAux_DceAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_DEFER");
+
                                aux_defer_retries = 0;
-                               if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+                               if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) {
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                               LOG_FLAG_Error_I2cAux,
+                                                               "dce_aux_transfer_with_retries: FAILURE: aux_i2c_defer_retries=%d >= AUX_MAX_I2C_DEFER_RETRIES=%d",
+                                                               aux_i2c_defer_retries,
+                                                               AUX_MAX_I2C_DEFER_RETRIES);
                                        goto fail;
+                               }
                                break;
 
                        case AUX_TRANSACTION_REPLY_AUX_NACK:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                       LOG_FLAG_I2cAux_DceAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_NACK");
+                               goto fail;
+
                        case AUX_TRANSACTION_REPLY_HPD_DISCON:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                       LOG_FLAG_I2cAux_DceAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_HPD_DISCON");
+                               goto fail;
+
                        default:
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                       LOG_FLAG_Error_I2cAux,
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case.");
                                goto fail;
                        }
                        break;
 
                case AUX_RET_ERROR_INVALID_REPLY:
-                       if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                               LOG_FLAG_I2cAux_DceAux,
+                                               "dce_aux_transfer_with_retries: AUX_RET_ERROR_INVALID_REPLY");
+                       if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) {
+                               DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                       LOG_FLAG_Error_I2cAux,
+                                                       "dce_aux_transfer_with_retries: FAILURE: aux_invalid_reply_retries=%d >= AUX_MAX_INVALID_REPLY_RETRIES=%d",
+                                                       aux_invalid_reply_retries,
+                                                       AUX_MAX_INVALID_REPLY_RETRIES);
                                goto fail;
-                       else
+                       else
                                udelay(400);
                        break;
 
                case AUX_RET_ERROR_TIMEOUT:
+                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                               LOG_FLAG_I2cAux_DceAux,
+                                               "dce_aux_transfer_with_retries: AUX_RET_ERROR_TIMEOUT");
                        // Check whether a DEFER had occurred before the timeout.
                        // If so, treat timeout as a DEFER.
                        if (retry_on_defer) {
-                               if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES)
+                               if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) {
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                               LOG_FLAG_Error_I2cAux,
+                                                               "dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d",
+                                                               aux_defer_retries,
+                                                               AUX_MIN_DEFER_RETRIES);
                                        goto fail;
-                               else if (payload->defer_delay > 0)
+                               } else if (payload->defer_delay > 0) {
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                                               LOG_FLAG_I2cAux_DceAux,
+                                                               "dce_aux_transfer_with_retries: payload->defer_delay=%u",
+                                                               payload->defer_delay);
                                        msleep(payload->defer_delay);
+                               }
                        } else {
-                               if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+                               if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) {
+                                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                                                               LOG_FLAG_Error_I2cAux,
+                                                               "dce_aux_transfer_with_retries: FAILURE: aux_timeout_retries=%d >= AUX_MAX_TIMEOUT_RETRIES=%d",
+                                                               aux_timeout_retries,
+                                                               AUX_MAX_TIMEOUT_RETRIES);
                                        goto fail;
-                               else {
+                               else {
                                        /*
                                         * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts
                                         * According to the DP spec there should be 3 retries total
@@ -738,11 +905,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                case AUX_RET_ERROR_ENGINE_ACQUIRE:
                case AUX_RET_ERROR_UNKNOWN:
                default:
+                       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+                                               LOG_FLAG_I2cAux_DceAux,
+                                               "dce_aux_transfer_with_retries: Failure: operation_result=%d",
+                                               (int)operation_result);
                        goto fail;
                }
        }
 
 fail:
+       DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+                               LOG_FLAG_Error_I2cAux,
+                               "dce_aux_transfer_with_retries: FAILURE");
        if (!payload_reply)
                payload->reply = NULL;
 
index 1ca8b1d..aa8403b 100644 (file)
@@ -29,7 +29,7 @@
 #include "dmub/dmub_srv.h"
 #include "core_types.h"
 
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...)    do {} while (0) /* do nothing */
 
 #define MAX_PIPES 6
 
index 35af040..df8a771 100644 (file)
@@ -3641,13 +3641,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
        struct dc_clock_config clock_cfg = {0};
        struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
 
-       if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
-                               dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
-                                               context, clock_type, &clock_cfg);
-
-       if (!dc->clk_mgr->funcs->get_clock)
+       if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
                return DC_FAIL_UNSUPPORTED_1;
 
+       dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
+               context, clock_type, &clock_cfg);
+
        if (clk_khz > clock_cfg.max_clock_khz)
                return DC_FAIL_CLK_EXCEED_MAX;
 
@@ -3665,7 +3664,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
        else
                return DC_ERROR_UNEXPECTED;
 
-       if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
+       if (dc->clk_mgr->funcs->update_clocks)
                                dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
                                context, true);
        return DC_OK;
index 5c28536..a47ba1d 100644 (file)
@@ -1723,13 +1723,15 @@ void dcn20_program_front_end_for_ctx(
 
                                pipe = pipe->bottom_pipe;
                        }
-                       /* Program secondary blending tree and writeback pipes */
-                       pipe = &context->res_ctx.pipe_ctx[i];
-                       if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
-                                       && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
-                                       && hws->funcs.program_all_writeback_pipes_in_tree)
-                               hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
                }
+               /* Program secondary blending tree and writeback pipes */
+               pipe = &context->res_ctx.pipe_ctx[i];
+               if (!pipe->top_pipe && !pipe->prev_odm_pipe
+                               && pipe->stream && pipe->stream->num_wb_info > 0
+                               && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+                                       || pipe->stream->update_flags.raw)
+                               && hws->funcs.program_all_writeback_pipes_in_tree)
+                       hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
        }
 }
 
index 3fe9e41..6a3d3a0 100644 (file)
 static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
        struct dcn3_xfer_func_reg *reg)
 {
+       reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
+       reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
+       reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
+       reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
+
        reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
        reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
        reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
@@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
        reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
        reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
        reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
-       reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
-       reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
        reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
        reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
        reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
@@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
        uint32_t state_mode;
        uint32_t ram_select;
 
-       REG_GET(DWB_OGAM_CONTROL,
-               DWB_OGAM_MODE, &state_mode);
-       REG_GET(DWB_OGAM_CONTROL,
-               DWB_OGAM_SELECT, &ram_select);
+       REG_GET_2(DWB_OGAM_CONTROL,
+               DWB_OGAM_MODE_CURRENT, &state_mode,
+               DWB_OGAM_SELECT_CURRENT, &ram_select);
 
        if (state_mode == 0) {
                mode = LUT_BYPASS;
        } else if (state_mode == 2) {
                if (ram_select == 0)
                        mode = LUT_RAM_A;
-               else
+               else if (ram_select == 1)
                        mode = LUT_RAM_B;
+               else
+                       mode = LUT_BYPASS;
        } else {
                // Reserved value
                mode = LUT_BYPASS;
@@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
        struct dcn30_dwbc *dwbc30,
        bool is_ram_a)
 {
-       REG_UPDATE(DWB_OGAM_LUT_CONTROL,
-               DWB_OGAM_LUT_READ_COLOR_SEL, 7);
-       REG_UPDATE(DWB_OGAM_CONTROL,
-               DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
+       REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
+               DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
+               DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
+
        REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
 }
 
@@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
 {
        uint32_t i;
 
-    // triple base implementation
-       for (i = 0; i < num/2; i++) {
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
-               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
+       uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+       uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+       uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+       if (is_rgb_equal(rgb,  num)) {
+               for (i = 0 ; i < num; i++)
+                       REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
+
+               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
+
+       } else {
+
+               REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+                               DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
+
+               for (i = 0 ; i < num; i++)
+                       REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
+
+               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
+
+               REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+
+               REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+                               DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
+
+               for (i = 0 ; i < num; i++)
+                       REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
+
+               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
+
+               REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+
+               REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+                               DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
+
+               for (i = 0 ; i < num; i++)
+                       REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
+
+               REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
        }
 }
 
@@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
                return false;
        }
 
+       REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
+
        current_mode = dwb3_get_ogam_current(dwbc30);
        if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
                next_mode = LUT_RAM_B;
@@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
        dwb3_program_ogam_pwl(
                dwbc30, params->rgb_resulted, params->hw_points_num);
 
-       REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
-       REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+       REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
 
        return true;
 }
@@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
 
        struct color_matrices_reg gam_regs;
 
-       REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
-
        if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
                REG_SET(DWB_GAMUT_REMAP_MODE, 0,
                                DWB_GAMUT_REMAP_MODE, 0);
                return;
        }
 
+       REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
+
+       gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
+       gam_regs.masks.csc_c11  = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
+       gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
+       gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
+
        switch (select) {
        case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
                gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
index 2e8ab97..fafed1e 100644 (file)
@@ -398,12 +398,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
                        for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
 
+                               if (!pipe_ctx->plane_state)
+                                       continue;
+
                                if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
                                        wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
                                        break;
                                }
                        }
-                       ASSERT(wb_info.mpcc_inst != -1);
+
+                       if (wb_info.mpcc_inst == -1) {
+                               /* Disable writeback pipe and disconnect from MPCC
+                                * if source plane has been removed
+                                */
+                               dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
+                               continue;
+                       }
 
                        ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
                        dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
index 253654d..28e15eb 100644 (file)
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
                }
                pri_pipe->next_odm_pipe = sec_pipe;
                sec_pipe->prev_odm_pipe = pri_pipe;
-               ASSERT(sec_pipe->top_pipe == NULL);
 
                if (!sec_pipe->top_pipe)
                        sec_pipe->stream_res.opp = pool->opps[pipe_idx];
index 9776d17..912285f 100644 (file)
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
-static void calculate_wm_set_for_vlevel(
-               int vlevel,
-               struct wm_range_table_entry *table_entry,
-               struct dcn_watermarks *wm_set,
-               struct display_mode_lib *dml,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt)
-{
-       double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
-       ASSERT(vlevel < dml->soc.num_states);
-       /* only pipe 0 is read for voltage and dcf/soc clocks */
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
-       pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
-       dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
-       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
-       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
-       wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
-       wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-       dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel_req)
-{
-       int i, pipe_idx;
-       int vlevel, vlevel_max;
-       struct wm_range_table_entry *table_entry;
-       struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
-       ASSERT(bw_params);
-
-       vlevel_max = bw_params->clk_table.num_entries - 1;
-
-       /* WM Set D */
-       table_entry = &bw_params->wm_table.entries[WM_D];
-       if (table_entry->wm_type == WM_TYPE_RETRAINING)
-               vlevel = 0;
-       else
-               vlevel = vlevel_max;
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set C */
-       table_entry = &bw_params->wm_table.entries[WM_C];
-       vlevel = min(max(vlevel_req, 2), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set B */
-       table_entry = &bw_params->wm_table.entries[WM_B];
-       vlevel = min(max(vlevel_req, 1), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       /* WM Set A */
-       table_entry = &bw_params->wm_table.entries[WM_A];
-       vlevel = min(vlevel_req, vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
 static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
index 83f7904..8189606 100644 (file)
@@ -407,6 +407,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
                        &pipe_ctx->stream_res.encoder_info_frame);
        }
 }
+void dcn31_z10_save_init(struct dc *dc)
+{
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+       cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
 
 void dcn31_z10_restore(struct dc *dc)
 {
index 40dfebe..140435e 100644 (file)
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
 
 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
 int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
index aaf2dbd..b30d923 100644 (file)
@@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .z10_restore = dcn31_z10_restore,
+       .z10_save_init = dcn31_z10_save_init,
        .is_abm_supported = dcn31_is_abm_supported,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
index 5ab008e..ad5f2ad 100644 (file)
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
                        int width, int height, int offset);
 
        void (*z10_restore)(struct dc *dc);
+       void (*z10_save_init)(struct dc *dc);
 
        void (*update_visual_confirm_color)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx,
index aa2707e..7b684e7 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x6d13d5e2c
+#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 77
+#define DMUB_FW_VERSION_REVISION 79
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -322,6 +322,10 @@ union dmub_fw_boot_status {
                uint32_t mailbox_rdy : 1; /**< 1 if mailbox ready */
                uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
                uint32_t restore_required : 1; /**< 1 if driver should call restore */
+               uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
+               uint32_t reserved : 1;
+               uint32_t detection_required: 1; /**<  if detection need to be triggered by driver */
+
        } bits; /**< status bits */
        uint32_t all; /**< 32-bit access to status bits */
 };
@@ -335,6 +339,7 @@ enum dmub_fw_boot_status_bit {
        DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */
        DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
        DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
+       DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
 };
 
 /* Register bit definition for SCRATCH5 */
@@ -489,6 +494,11 @@ enum dmub_gpint_command {
         * RETURN: PSR residency in milli-percent.
         */
        DMUB_GPINT__PSR_RESIDENCY = 9,
+
+       /**
+        * DESC: Notifies DMCUB detection is done so detection required can be cleared.
+        */
+       DMUB_GPINT__NOTIFY_DETECTION_DONE = 12,
 };
 
 /**
@@ -860,6 +870,11 @@ enum dmub_cmd_idle_opt_type {
         * DCN hardware restore.
         */
        DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+       /**
+        * DCN hardware save.
+        */
+       DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
 };
 
 /**
@@ -1438,7 +1453,7 @@ struct dmub_cmd_psr_set_level_data {
         * 16-bit value dicated by driver that will enable/disable different functionality.
         */
        uint16_t psr_level;
-               /**
+       /**
         * PSR control version.
         */
        uint8_t cmd_version;
index 6820012..fc667cb 100644 (file)
@@ -83,7 +83,7 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
 void dmub_dcn31_reset(struct dmub_srv *dmub)
 {
        union dmub_gpint_data_register cmd;
-       const uint32_t timeout = 30;
+       const uint32_t timeout = 100;
        uint32_t in_reset, scratch, i;
 
        REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -98,26 +98,22 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
                /**
                 * Timeout covers both the ACK and the wait
                 * for remaining work to finish.
-                *
-                * This is mostly bound by the PHY disable sequence.
-                * Each register check will be greater than 1us, so
-                * don't bother using udelay.
                 */
 
                for (i = 0; i < timeout; ++i) {
                        if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
                                break;
+
+                       udelay(1);
                }
 
                for (i = 0; i < timeout; ++i) {
                        scratch = dmub->hw_funcs.get_gpint_response(dmub);
                        if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
                                break;
-               }
 
-               /* Clear the GPINT command manually so we don't reset again. */
-               cmd.all = 0;
-               dmub->hw_funcs.set_gpint(dmub, cmd);
+                       udelay(1);
+               }
 
                /* Force reset in case we timed out, DMCUB is likely hung. */
        }
@@ -130,6 +126,10 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
        REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
        REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
        REG_WRITE(DMCUB_SCRATCH0, 0);
+
+       /* Clear the GPINT command manually so we don't send anything during boot. */
+       cmd.all = 0;
+       dmub->hw_funcs.set_gpint(dmub, cmd);
 }
 
 void dmub_dcn31_reset_release(struct dmub_srv *dmub)
index 06d60f0..3e81850 100644 (file)
@@ -145,6 +145,7 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
                        } else {
                                callback_in_ms(0, output);
                                set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+                               set_auth_complete(hdcp, output);
                        }
                else if (is_hdmi_dvi_sl_hdcp(hdcp))
                        if (is_cp_desired_hdcp2(hdcp)) {
@@ -156,10 +157,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
                        } else {
                                callback_in_ms(0, output);
                                set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+                               set_auth_complete(hdcp, output);
                        }
                else {
                        callback_in_ms(0, output);
                        set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+                       set_auth_complete(hdcp, output);
                }
        } else if (is_in_cp_not_desired_state(hdcp)) {
                increment_stay_counter(hdcp);
@@ -520,7 +523,7 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
 
        /* reset authentication if needed */
        if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
-               HDCP_FULL_DDC_TRACE(hdcp);
+               mod_hdcp_log_ddc_trace(hdcp);
                reset_status = reset_authentication(hdcp, output);
                if (reset_status != MOD_HDCP_STATUS_SUCCESS)
                        push_error_status(hdcp, reset_status);
index 7123f09..399fbca 100644 (file)
@@ -324,6 +324,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
 /* log functions */
 void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
                uint8_t *buf, uint32_t buf_size);
+void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp);
 /* TODO: add adjustment log */
 
 /* psp functions */
@@ -494,6 +495,13 @@ static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
        output->watchdog_timer_delay = time;
 }
 
+static inline void set_auth_complete(struct mod_hdcp *hdcp,
+               struct mod_hdcp_output *output)
+{
+       output->auth_complete = 1;
+       mod_hdcp_log_ddc_trace(hdcp);
+}
+
 /* connection topology helpers */
 static inline uint8_t is_display_active(struct mod_hdcp_display *display)
 {
index 3dda8c1..7f01119 100644 (file)
@@ -89,7 +89,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
                } else {
                        callback_in_ms(0, output);
                        set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
-                       HDCP_FULL_DDC_TRACE(hdcp);
+                       set_auth_complete(hdcp, output);
                }
                break;
        case H1_A45_AUTHENTICATED:
@@ -137,7 +137,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
                }
                callback_in_ms(0, output);
                set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
-               HDCP_FULL_DDC_TRACE(hdcp);
+               set_auth_complete(hdcp, output);
                break;
        default:
                status = MOD_HDCP_STATUS_INVALID_STATE;
@@ -239,7 +239,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
                        set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
                } else {
                        set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
-                       HDCP_FULL_DDC_TRACE(hdcp);
+                       set_auth_complete(hdcp, output);
                }
                break;
        case D1_A4_AUTHENTICATED:
@@ -311,7 +311,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
                        break;
                }
                set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
-               HDCP_FULL_DDC_TRACE(hdcp);
+               set_auth_complete(hdcp, output);
                break;
        default:
                fail_and_restart_in_ms(0, &status, output);
index 70cb230..1f4095b 100644 (file)
@@ -242,7 +242,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
                }
                callback_in_ms(0, output);
                set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
-               HDCP_FULL_DDC_TRACE(hdcp);
+               set_auth_complete(hdcp, output);
                break;
        case H2_A5_AUTHENTICATED:
                if (input->rxstatus_read == FAIL ||
@@ -559,7 +559,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
                        break;
                }
                set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
-               HDCP_FULL_DDC_TRACE(hdcp);
+               set_auth_complete(hdcp, output);
                break;
        case D2_A5_AUTHENTICATED:
                if (input->rxstatus_read == FAIL ||
index 1a0f7c3..6b3b5f6 100644 (file)
@@ -51,6 +51,80 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
        }
 }
 
+void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
+{
+       if (is_hdcp1(hdcp)) {
+               HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv,
+                               sizeof(hdcp->auth.msg.hdcp1.bksv));
+               HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps,
+                               sizeof(hdcp->auth.msg.hdcp1.bcaps));
+               HDCP_DDC_READ_TRACE(hdcp, "BSTATUS",
+                               (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+                               sizeof(hdcp->auth.msg.hdcp1.bstatus));
+               HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an,
+                               sizeof(hdcp->auth.msg.hdcp1.an));
+               HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv,
+                               sizeof(hdcp->auth.msg.hdcp1.aksv));
+               HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo,
+                               sizeof(hdcp->auth.msg.hdcp1.ainfo));
+               HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'",
+                               (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+                               sizeof(hdcp->auth.msg.hdcp1.r0p));
+               HDCP_DDC_READ_TRACE(hdcp, "BINFO",
+                               (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+                               sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+               HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist,
+                               hdcp->auth.msg.hdcp1.ksvlist_size);
+               HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp,
+                               sizeof(hdcp->auth.msg.hdcp1.vp));
+       } else if (is_hdcp2(hdcp)) {
+               HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version",
+                               &hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
+                               sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
+               HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp,
+                               sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
+               HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_init));
+               HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+               HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM",
+                               hdcp->auth.msg.hdcp2.ake_stored_km,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+               HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM",
+                               hdcp->auth.msg.hdcp2.ake_no_stored_km,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+               HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+               HDCP_DDC_READ_TRACE(hdcp, "Pairing Info",
+                               hdcp->auth.msg.hdcp2.ake_pairing_info,
+                               sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+               HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init,
+                               sizeof(hdcp->auth.msg.hdcp2.lc_init));
+               HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime,
+                               sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+               HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks,
+                               sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+               HDCP_DDC_READ_TRACE(hdcp, "Rx Status",
+                               (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
+                               sizeof(hdcp->auth.msg.hdcp2.rxstatus));
+               HDCP_DDC_READ_TRACE(hdcp, "Rx Id List",
+                               hdcp->auth.msg.hdcp2.rx_id_list,
+                               hdcp->auth.msg.hdcp2.rx_id_list_size);
+               HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack",
+                               hdcp->auth.msg.hdcp2.repeater_auth_ack,
+                               sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+               HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management",
+                               hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+                               hdcp->auth.msg.hdcp2.stream_manage_size);
+               HDCP_DDC_READ_TRACE(hdcp, "Stream Ready",
+                               hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+                               sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+               HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type",
+                               hdcp->auth.msg.hdcp2.content_stream_type_dp,
+                               sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+       }
+}
+
 char *mod_hdcp_status_to_str(int32_t status)
 {
        switch (status) {
index 47f8ee2..eb6f9b9 100644 (file)
                                hdcp->config.index, msg_name,\
                                hdcp->buf); \
 } while (0)
-#define HDCP_FULL_DDC_TRACE(hdcp) do { \
-       if (is_hdcp1(hdcp)) { \
-               HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
-                               sizeof(hdcp->auth.msg.hdcp1.bksv)); \
-               HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
-                               sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
-               HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
-                               (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
-                               sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
-                               sizeof(hdcp->auth.msg.hdcp1.an)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
-                               sizeof(hdcp->auth.msg.hdcp1.aksv)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
-                               sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
-               HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
-                               (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
-                               sizeof(hdcp->auth.msg.hdcp1.r0p)); \
-               HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
-                               (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
-                               sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
-               HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
-                               hdcp->auth.msg.hdcp1.ksvlist_size); \
-               HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
-                               sizeof(hdcp->auth.msg.hdcp1.vp)); \
-       } else { \
-               HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
-                               &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
-                               sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
-               HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
-                               sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
-               HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
-                               hdcp->auth.msg.hdcp2.ake_stored_km, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
-                               hdcp->auth.msg.hdcp2.ake_no_stored_km, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
-               HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
-               HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
-                               hdcp->auth.msg.hdcp2.ake_pairing_info, \
-                               sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
-                               sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
-               HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
-                               sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
-                               sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
-               HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
-                               (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
-                               sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
-               HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
-                               hdcp->auth.msg.hdcp2.rx_id_list, \
-                               hdcp->auth.msg.hdcp2.rx_id_list_size); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
-                               hdcp->auth.msg.hdcp2.repeater_auth_ack, \
-                               sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
-                               hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
-                               hdcp->auth.msg.hdcp2.stream_manage_size); \
-               HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
-                               hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
-                               sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
-               HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
-                               hdcp->auth.msg.hdcp2.content_stream_type_dp, \
-                               sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
-       } \
-} while (0)
 #define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
                HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
                                hdcp->config.index, i)
index ade86a0..e9bd84e 100644 (file)
@@ -54,7 +54,7 @@ static enum mod_hdcp_status remove_display_from_topology_v2(
                        get_active_display_at_index(hdcp, index);
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
-       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
 
        if (!display || !is_display_active(display))
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
@@ -90,7 +90,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
                get_active_display_at_index(hdcp, index);
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
-       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
 
        if (!display || !is_display_active(display))
                return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
@@ -128,13 +128,13 @@ static enum mod_hdcp_status add_display_to_topology_v2(
        struct mod_hdcp_link *link = &hdcp->connection.link;
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.context.initialized) {
                DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
                display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
 
        mutex_lock(&psp->dtm_context.mutex);
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -175,13 +175,13 @@ static enum mod_hdcp_status add_display_to_topology_v3(
        struct mod_hdcp_link *link = &hdcp->connection.link;
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.context.initialized) {
                DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
                display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+       dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
 
        mutex_lock(&psp->dtm_context.mutex);
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -253,12 +253,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
        struct ta_hdcp_shared_memory *hdcp_cmd;
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
                return MOD_HDCP_STATUS_FAILURE;
        }
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
 
        mutex_lock(&psp->hdcp_context.mutex);
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -293,7 +293,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
@@ -325,7 +325,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
@@ -367,7 +367,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
@@ -393,7 +393,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
@@ -436,7 +436,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
 
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 
@@ -471,7 +471,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
 
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
@@ -498,7 +498,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
                return MOD_HDCP_STATUS_FAILURE;
        }
@@ -508,7 +508,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 
        mutex_lock(&psp->hdcp_context.mutex);
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
@@ -545,7 +545,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
@@ -579,7 +579,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -611,7 +611,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -671,7 +671,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -717,7 +717,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -750,7 +750,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -785,7 +785,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -833,7 +833,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
 
        mutex_lock(&psp->hdcp_context.mutex);
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
@@ -862,7 +862,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
 
        mutex_lock(&psp->hdcp_context.mutex);
 
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -914,7 +914,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -958,7 +958,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
@@ -994,7 +994,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        mutex_lock(&psp->hdcp_context.mutex);
-       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+       hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
        memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
        msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
index a6eb86d..f37101f 100644 (file)
@@ -225,6 +225,7 @@ struct mod_hdcp_output {
        uint8_t watchdog_timer_stop;
        uint16_t callback_delay;
        uint16_t watchdog_timer_delay;
+       uint8_t auth_complete;
 };
 
 /* used to represent per display info */
index a485526..8474f41 100644 (file)
@@ -38,6 +38,9 @@
 #define mmCG_TACH_CTRL                                                                                 0x006a
 #define mmCG_TACH_CTRL_BASE_IDX                                                                        0
 
+#define mmCG_TACH_STATUS                                                                               0x006b
+#define mmCG_TACH_STATUS_BASE_IDX                                                                      0
+
 #define mmTHM_THERMAL_INT_ENA                                                                          0x000a
 #define mmTHM_THERMAL_INT_ENA_BASE_IDX                                                                 0
 #define mmTHM_THERMAL_INT_CTRL                                                                         0x000b
@@ -49,4 +52,7 @@
 #define mmTHM_BACO_CNTL                                                                                0x0081
 #define mmTHM_BACO_CNTL_BASE_IDX                                                                       0
 
+#define mmCG_THERMAL_STATUS                                                                            0x006C
+#define mmCG_THERMAL_STATUS_BASE_IDX                                                                   0
+
 #endif
index d130d92..f2f9eae 100644 (file)
@@ -92,5 +92,8 @@
 #define THM_TCON_THERM_TRIP__RSVD3_MASK                                                                       0x7FFFC000L
 #define THM_TCON_THERM_TRIP__SW_THERM_TP_MASK                                                                 0x80000000L
 
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT                                                                0x9
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK                                                                  0x0001FE00L
+
 #endif
 
index 95c656d..c84bd7b 100644 (file)
@@ -44,6 +44,7 @@ struct kgd_mem;
 enum kfd_preempt_type {
        KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
        KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+       KFD_PREEMPT_TYPE_WAVEFRONT_SAVE
 };
 
 struct kfd_vm_fault_info {
@@ -298,6 +299,8 @@ struct kfd2kgd_calls {
 
        void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
                        int *max_waves_per_cu);
+       void (*program_trap_handler_settings)(struct kgd_dev *kgd,
+                       uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
 };
 
 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */
index e38b191..bac15c4 100644 (file)
@@ -306,8 +306,8 @@ struct amd_pm_funcs {
 /* export for sysfs */
        void (*set_fan_control_mode)(void *handle, u32 mode);
        u32 (*get_fan_control_mode)(void *handle);
-       int (*set_fan_speed_percent)(void *handle, u32 speed);
-       int (*get_fan_speed_percent)(void *handle, u32 *speed);
+       int (*set_fan_speed_pwm)(void *handle, u32 speed);
+       int (*get_fan_speed_pwm)(void *handle, u32 *speed);
        int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
        int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
index 769f58d..2d55627 100644 (file)
@@ -2094,14 +2094,19 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                        *states = ATTR_STATE_UNSUPPORTED;
        }
 
-       if (asic_type == CHIP_ARCTURUS) {
-               /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+       switch (asic_type) {
+       case CHIP_ARCTURUS:
+       case CHIP_ALDEBARAN:
+               /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
                if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
                    DEVICE_ATTR_IS(pp_dpm_socclk) ||
                    DEVICE_ATTR_IS(pp_dpm_fclk)) {
                        dev_attr->attr.mode &= ~S_IWUGO;
                        dev_attr->store = NULL;
                }
+               break;
+       default:
+               break;
        }
 
        if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
@@ -2379,7 +2384,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
-       return sprintf(buf, "%u\n", pwm_mode);
+       return sysfs_emit(buf, "%u\n", pwm_mode);
 }
 
 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -2424,14 +2429,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       return sprintf(buf, "%i\n", 0);
+       return sysfs_emit(buf, "%i\n", 0);
 }
 
 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       return sprintf(buf, "%i\n", 255);
+       return sysfs_emit(buf, "%i\n", 255);
 }
 
 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
@@ -2469,10 +2474,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
                return err;
        }
 
-       value = (value * 100) / 255;
-
-       if (adev->powerplay.pp_funcs->set_fan_speed_percent)
-               err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+       if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
+               err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
        else
                err = -EINVAL;
 
@@ -2504,8 +2507,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
                return err;
        }
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_percent)
-               err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+       if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
+               err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
        else
                err = -EINVAL;
 
@@ -2515,9 +2518,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
        if (err)
                return err;
 
-       speed = (speed * 255) / 100;
-
-       return sprintf(buf, "%i\n", speed);
+       return sysfs_emit(buf, "%i\n", speed);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
@@ -2550,7 +2551,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
        if (err)
                return err;
 
-       return sprintf(buf, "%i\n", speed);
+       return sysfs_emit(buf, "%i\n", speed);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
@@ -2647,7 +2648,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
        if (err)
                return err;
 
-       return sprintf(buf, "%i\n", rpm);
+       return sysfs_emit(buf, "%i\n", rpm);
 }
 
 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
@@ -2729,7 +2730,7 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
-       return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
+       return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
 }
 
 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
@@ -2899,7 +2900,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       return sprintf(buf, "%i\n", 0);
+       return sysfs_emit(buf, "%i\n", 0);
 }
 
 
@@ -3174,6 +3175,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
  *
  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
  *
+ * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
+ *       That will get the former one overridden.
+ *
  * hwmon interfaces for GPU clocks:
  *
  * - freq1_input: the gfx/compute clock in hertz
@@ -3349,13 +3353,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 
        if (!is_support_sw_smu(adev)) {
                /* mask fan attributes if we have no bindings for this asic to expose */
-               if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+               if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
                     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
                    (!adev->powerplay.pp_funcs->get_fan_control_mode &&
                     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
                        effective_mode &= ~S_IRUGO;
 
-               if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+               if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
                     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
                    (!adev->powerplay.pp_funcs->set_fan_control_mode &&
                     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
@@ -3379,8 +3383,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 
        if (!is_support_sw_smu(adev)) {
                /* hide max/min values if we can't both query and manage the fan */
-               if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
-                    !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+               if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
+                    !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
                     (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
                     !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
                    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
index d03e6fa..98f1b3d 100644 (file)
@@ -280,11 +280,11 @@ enum amdgpu_pcie_gen {
 #define amdgpu_dpm_get_fan_control_mode(adev) \
                ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
 
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
-               ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
+#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
+               ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
 
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
-               ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
+#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
+               ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
 
 #define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
                ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
index c2c201b..715b422 100644 (file)
@@ -34,6 +34,8 @@
 #define SMU_FW_NAME_LEN                        0x24
 
 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+#define SMU_CUSTOM_FAN_SPEED_RPM     (1 << 1)
+#define SMU_CUSTOM_FAN_SPEED_PWM     (1 << 2)
 
 // Power Throttlers
 #define SMU_THROTTLER_PPT0_BIT                 0
@@ -229,7 +231,8 @@ enum smu_memory_pool_size
 struct smu_user_dpm_profile {
        uint32_t fan_mode;
        uint32_t power_limit;
-       uint32_t fan_speed_percent;
+       uint32_t fan_speed_pwm;
+       uint32_t fan_speed_rpm;
        uint32_t flags;
        uint32_t user_od;
 
@@ -540,7 +543,7 @@ struct smu_context
        struct work_struct interrupt_work;
 
        unsigned fan_max_rpm;
-       unsigned manual_fan_speed_percent;
+       unsigned manual_fan_speed_pwm;
 
        uint32_t gfx_default_hard_min_freq;
        uint32_t gfx_default_soft_max_freq;
@@ -722,9 +725,14 @@ struct pptable_funcs {
        bool (*is_dpm_running)(struct smu_context *smu);
 
        /**
-        * @get_fan_speed_percent: Get the current fan speed in percent.
+        * @get_fan_speed_pwm: Get the current fan speed in PWM.
         */
-       int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+       int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
+
+       /**
+        * @get_fan_speed_rpm: Get the current fan speed in rpm.
+        */
+       int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
 
        /**
         * @set_watermarks_table: Configure and upload the watermarks tables to
@@ -1043,9 +1051,14 @@ struct pptable_funcs {
        int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
 
        /**
-        * @set_fan_speed_percent: Set a static fan speed in percent.
+        * @set_fan_speed_pwm: Set a static fan speed in PWM.
+        */
+       int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
+
+       /**
+        * @set_fan_speed_rpm: Set a static fan speed in rpm.
         */
-       int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+       int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
 
        /**
         * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
index 490371b..8ed0107 100644 (file)
@@ -278,9 +278,9 @@ struct pp_hwmgr_func {
        int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
        void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
        uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
-       int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
-       int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
-       int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
+       int (*set_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t speed);
+       int (*get_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
+       int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t speed);
        int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
        int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr);
        int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr);
index 6239c30..6f1b1b5 100644 (file)
@@ -298,7 +298,6 @@ enum smu_clk_type {
        __SMU_DUMMY_MAP(DS_FCLK),                               \
        __SMU_DUMMY_MAP(DS_MP1CLK),                             \
        __SMU_DUMMY_MAP(DS_MP0CLK),                             \
-       __SMU_DUMMY_MAP(XGMI),                                  \
        __SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN),          \
        __SMU_DUMMY_MAP(DPM_GFX_PACE),                          \
        __SMU_DUMMY_MAP(MEM_VDDCI_SCALING),                     \
index 403bc1b..cbdae8a 100644 (file)
@@ -221,9 +221,18 @@ int
 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
                               uint32_t mode);
 
-int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_pwm(struct smu_context *smu,
                                    uint32_t speed);
 
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+                               uint32_t speed);
+
+int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
+                                   uint32_t *speed);
+
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+                               uint32_t *speed);
+
 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
                                     uint32_t pstate);
 
index d2a3824..3212150 100644 (file)
@@ -533,7 +533,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
        return mode;
 }
 
-static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
+static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
        struct pp_hwmgr *hwmgr = handle;
        int ret = 0;
@@ -541,17 +541,17 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
+       if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
        mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
+       ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
        mutex_unlock(&hwmgr->smu_lock);
        return ret;
 }
 
-static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
+static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
        struct pp_hwmgr *hwmgr = handle;
        int ret = 0;
@@ -559,13 +559,13 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
+       if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
 
        mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
+       ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
        mutex_unlock(&hwmgr->smu_lock);
        return ret;
 }
@@ -1691,8 +1691,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .dispatch_tasks = pp_dpm_dispatch_tasks,
        .set_fan_control_mode = pp_dpm_set_fan_control_mode,
        .get_fan_control_mode = pp_dpm_get_fan_control_mode,
-       .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
-       .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+       .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
+       .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
        .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
        .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
        .get_pp_num_states = pp_dpm_get_pp_num_states,
index 7dd92bd..1de3ae7 100644 (file)
@@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                else
                        i = 1;
 
-               size += sprintf(buf + size, "0: %uMhz %s\n",
+               size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
                                        data->gfx_min_freq_limit/100,
                                        i == 0 ? "*" : "");
-               size += sprintf(buf + size, "1: %uMhz %s\n",
+               size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
                                        i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
                                        i == 1 ? "*" : "");
-               size += sprintf(buf + size, "2: %uMhz %s\n",
+               size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
                                        data->gfx_max_freq_limit/100,
                                        i == 2 ? "*" : "");
                break;
@@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i,
                                        mclk_table->entries[i].clk / 100,
                                        ((mclk_table->entries[i].clk / 100)
@@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        if (ret)
                                return ret;
 
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                        (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
                }
                break;
@@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        if (ret)
                                return ret;
 
-                       size = sprintf(buf, "%s:\n", "OD_RANGE");
-                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
                                min_freq, max_freq);
                }
                break;
@@ -1456,11 +1456,11 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+       size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
                        title[1], title[2], title[3], title[4], title[5]);
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
-               size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+               size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
                        i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
                        profile_mode_setting[i][0], profile_mode_setting[i][1],
                        profile_mode_setting[i][2], profile_mode_setting[i][3]);
index 0541bfc..465ff8d 100644 (file)
@@ -3212,7 +3212,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 
        if (!ret) {
                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-                       smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+                       smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
                else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
                        smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
        }
@@ -4896,8 +4896,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
        struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
        struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
-       int i, now, size = 0;
-       uint32_t clock, pcie_speed;
+       int size = 0;
+       uint32_t i, now, clock, pcie_speed;
 
        switch (type) {
        case PP_SCLK:
@@ -4911,7 +4911,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < sclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, sclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, mclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4940,7 +4940,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < pcie_table->count; i++)
-                       size += sprintf(buf + size, "%d: %s %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
                                        (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
                                        (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
                                        (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@@ -4948,32 +4948,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
                        for (i = 0; i < odn_sclk_table->num_of_pl; i++)
-                               size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+                               size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
                                        i, odn_sclk_table->entries[i].clock/100,
                                        odn_sclk_table->entries[i].vddc);
                }
                break;
        case OD_MCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_MCLK");
+                       size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
                        for (i = 0; i < odn_mclk_table->num_of_pl; i++)
-                               size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+                               size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
                                        i, odn_mclk_table->entries[i].clock/100,
                                        odn_mclk_table->entries[i].vddc);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_RANGE");
-                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
-                       size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+                       size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
-                       size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+                       size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
                                data->odn_dpm_table.min_vddc,
                                data->odn_dpm_table.max_vddc);
                }
@@ -4988,7 +4988,7 @@ static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
        switch (mode) {
        case AMD_FAN_CTRL_NONE:
-               smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+               smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
                break;
        case AMD_FAN_CTRL_MANUAL:
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -5503,7 +5503,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
+       size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
                        title[0], title[1], title[2], title[3],
                        title[4], title[5], title[6], title[7]);
 
@@ -5511,7 +5511,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 
        for (i = 0; i < len; i++) {
                if (i == hwmgr->power_profile_mode) {
-                       size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+                       size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
                        i, profile_name[i], "*",
                        data->current_profile_setting.sclk_up_hyst,
                        data->current_profile_setting.sclk_down_hyst,
@@ -5522,21 +5522,21 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                        continue;
                }
                if (smu7_profiling[i].bupdate_sclk)
-                       size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
+                       size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
                        i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
                        smu7_profiling[i].sclk_down_hyst,
                        smu7_profiling[i].sclk_activity);
                else
-                       size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
+                       size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
                        i, profile_name[i], "-", "-", "-");
 
                if (smu7_profiling[i].bupdate_mclk)
-                       size += sprintf(buf + size, "%16d %16d %16d\n",
+                       size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
                        smu7_profiling[i].mclk_up_hyst,
                        smu7_profiling[i].mclk_down_hyst,
                        smu7_profiling[i].mclk_activity);
                else
-                       size += sprintf(buf + size, "%16s %16s %16s\n",
+                       size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
                        "-", "-", "-");
        }
 
@@ -5692,8 +5692,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
        .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
        .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
        .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+       .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
+       .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
        .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
        .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
        .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
index 6cfe148..a6c3610 100644 (file)
@@ -51,7 +51,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed)
 {
        uint32_t duty100;
@@ -70,12 +70,9 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
                return -EINVAL;
 
 
-       tmp64 = (uint64_t)duty * 100;
+       tmp64 = (uint64_t)duty * 255;
        do_div(tmp64, duty100);
-       *speed = (uint32_t)tmp64;
-
-       if (*speed > 100)
-               *speed = 100;
+       *speed = MIN((uint32_t)tmp64, 255);
 
        return 0;
 }
@@ -199,12 +196,11 @@ int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * smu7_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
+ * smu7_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
  * @hwmgr: the address of the powerplay hardware manager.
- * @speed: is the percentage value (0% - 100%) to be set.
- * Exception: Fails is the 100% setting appears to be 0.
+ * @speed: is the pwm value (0 - 255) to be set.
  */
-int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t speed)
 {
        uint32_t duty100;
@@ -214,8 +210,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
        if (hwmgr->thermal_controller.fanInfo.bNoFan)
                return 0;
 
-       if (speed > 100)
-               speed = 100;
+       speed = MIN(speed, 255);
 
        if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
                smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -227,7 +222,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
                return -EINVAL;
 
        tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
+       do_div(tmp64, 255);
        duty = (uint32_t)tmp64;
 
        PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
index 42c1ba0..a386a43 100644 (file)
 extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
 extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
 extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed);
 extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
 extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t speed);
 extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
 extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
 extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
index d425b02..b94a77e 100644 (file)
@@ -1547,7 +1547,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
        struct smu8_hwmgr *data = hwmgr->backend;
        struct phm_clock_voltage_dependency_table *sclk_table =
                        hwmgr->dyn_state.vddc_dependency_on_sclk;
-       int i, now, size = 0;
+       uint32_t i, now;
+       int size = 0;
 
        switch (type) {
        case PP_SCLK:
@@ -1558,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
                                CURR_SCLK_INDEX);
 
                for (i = 0; i < sclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, sclk_table->entries[i].clk / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -1570,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
                                CURR_MCLK_INDEX);
 
                for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
                                        (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
                break;
index 2597910..c152a61 100644 (file)
@@ -4199,7 +4199,7 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 
        switch (mode) {
        case AMD_FAN_CTRL_NONE:
-               vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+               vega10_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
                break;
        case AMD_FAN_CTRL_MANUAL:
                if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
@@ -4553,13 +4553,13 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                        "[EnableAllSmuFeatures] Failed to get enabled smc features!",
                        return ret);
 
-       size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
-       size += sprintf(buf + size, "%-19s %-22s %s\n",
+       size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+       size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
                                output_title[0],
                                output_title[1],
                                output_title[2]);
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
-               size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+               size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
                                        ppfeature_name[i],
                                        1ULL << i,
                                        (features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -4650,7 +4650,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                else
                        count = sclk_table->count;
                for (i = 0; i < count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, sclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4661,7 +4661,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, mclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4672,7 +4672,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
 
                for (i = 0; i < soc_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, soc_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4684,7 +4684,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
 
                for (i = 0; i < dcef_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, dcef_table->dpm_levels[i].value / 100,
                                        (dcef_table->dpm_levels[i].value / 100 == now) ?
                                        "*" : "");
@@ -4698,7 +4698,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        gen_speed = pptable->PcieGenSpeed[i];
                        lane_width = pptable->PcieLaneCount[i];
 
-                       size += sprintf(buf + size, "%d: %s %s %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
                                        (gen_speed == 0) ? "2.5GT/s," :
                                        (gen_speed == 1) ? "5.0GT/s," :
                                        (gen_speed == 2) ? "8.0GT/s," :
@@ -4717,34 +4717,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
                        podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
                        for (i = 0; i < podn_vdd_dep->count; i++)
-                               size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+                               size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
                                        i, podn_vdd_dep->entries[i].clk / 100,
                                                podn_vdd_dep->entries[i].vddc);
                }
                break;
        case OD_MCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_MCLK");
+                       size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
                        podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
                        for (i = 0; i < podn_vdd_dep->count; i++)
-                               size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+                               size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
                                        i, podn_vdd_dep->entries[i].clk/100,
                                                podn_vdd_dep->entries[i].vddc);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_RANGE");
-                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
-                       size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+                       size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
-                       size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+                       size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
                                data->odn_dpm_table.min_vddc,
                                data->odn_dpm_table.max_vddc);
                }
@@ -5112,21 +5112,28 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+       size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
                        title[1], title[2], title[3], title[4], title[5]);
 
        for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
-               size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+               size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
                        i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
                        profile_mode_setting[i][0], profile_mode_setting[i][1],
                        profile_mode_setting[i][2], profile_mode_setting[i][3]);
-       size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
+       size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
                        profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
                        data->custom_profile_mode[0], data->custom_profile_mode[1],
                        data->custom_profile_mode[2], data->custom_profile_mode[3]);
        return size;
 }
 
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (adev->pdev->device == 0x6860);
+}
+
 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
        }
 
 out:
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+       if (vega10_get_power_profile_mode_quirks(hwmgr))
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+                                               1 << power_profile_mode,
+                                               NULL);
+       else
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
                                                (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
                                                NULL);
+
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
@@ -5523,8 +5536,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
        .force_dpm_level = vega10_dpm_force_dpm_level,
        .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
        .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
-       .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
+       .get_fan_speed_pwm = vega10_fan_ctrl_get_fan_speed_pwm,
+       .set_fan_speed_pwm = vega10_fan_ctrl_set_fan_speed_pwm,
        .reset_fan_speed_to_default =
                        vega10_fan_ctrl_reset_fan_speed_to_default,
        .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
index 9b46b27..dad3e37 100644 (file)
@@ -64,7 +64,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed)
 {
        uint32_t current_rpm;
@@ -78,11 +78,11 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
 
        if (hwmgr->thermal_controller.
                        advanceFanControlParameters.usMaxFanRPM != 0)
-               percent = current_rpm * 100 /
+               percent = current_rpm * 255 /
                        hwmgr->thermal_controller.
                        advanceFanControlParameters.usMaxFanRPM;
 
-       *speed = percent > 100 ? 100 : percent;
+       *speed = MIN(percent, 255);
 
        return 0;
 }
@@ -241,12 +241,11 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * vega10_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
+ * vega10_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
  * @hwmgr:  the address of the powerplay hardware manager.
- * @speed: is the percentage value (0% - 100%) to be set.
- * Exception: Fails is the 100% setting appears to be 0.
+ * @speed: is the percentage value (0 - 255) to be set.
  */
-int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t speed)
 {
        struct amdgpu_device *adev = hwmgr->adev;
@@ -257,8 +256,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
        if (hwmgr->thermal_controller.fanInfo.bNoFan)
                return 0;
 
-       if (speed > 100)
-               speed = 100;
+       speed = MIN(speed, 255);
 
        if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
                vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -270,7 +268,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
                return -EINVAL;
 
        tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
+       do_div(tmp64, 255);
        duty = (uint32_t)tmp64;
 
        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
index 4a0ede7..6850a21 100644 (file)
@@ -54,12 +54,12 @@ extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
 extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
 extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
                struct phm_fan_speed_info *fan_speed_info);
-extern int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed);
 extern int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
 extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
                uint32_t mode);
-extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t speed);
 extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
 extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
index 29e0d1d..8558718 100644 (file)
@@ -2146,13 +2146,13 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                "[EnableAllSmuFeatures] Failed to get enabled smc features!",
                return ret);
 
-       size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
-       size += sprintf(buf + size, "%-19s %-22s %s\n",
+       size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+       size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
                                output_title[0],
                                output_title[1],
                                output_title[2]);
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
-               size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+               size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
                                ppfeature_name[i],
                                1ULL << i,
                                (features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -2256,7 +2256,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get gfx clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
@@ -2272,7 +2272,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get memory clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
@@ -2290,7 +2290,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get soc clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
                break;
@@ -2308,7 +2308,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get dcef clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
                break;
index 0791309..0cf39c1 100644 (file)
@@ -2769,7 +2769,7 @@ static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
        switch (mode) {
        case AMD_FAN_CTRL_NONE:
-               vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+               vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
                break;
        case AMD_FAN_CTRL_MANUAL:
                if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
@@ -3243,13 +3243,13 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                        "[EnableAllSmuFeatures] Failed to get enabled smc features!",
                        return ret);
 
-       size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
-       size += sprintf(buf + size, "%-19s %-22s %s\n",
+       size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+       size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
                                output_title[0],
                                output_title[1],
                                output_title[2]);
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
-               size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+               size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
                                        ppfeature_name[i],
                                        1ULL << i,
                                        (features_enabled & (1ULL << i)) ? "Y" : "N");
@@ -3372,13 +3372,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_sclks(hwmgr, &clocks)) {
-                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3390,13 +3390,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_memclocks(hwmgr, &clocks)) {
-                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3408,13 +3408,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_socclocks(hwmgr, &clocks)) {
-                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3426,7 +3426,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                for (i = 0; i < fclk_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, fclk_dpm_table->dpm_levels[i].value,
                                fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
                break;
@@ -3438,13 +3438,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_dcefclocks(hwmgr, &clocks)) {
-                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3458,7 +3458,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                        gen_speed = pptable->PcieGenSpeed[i];
                        lane_width = pptable->PcieLaneCount[i];
 
-                       size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
                                        (gen_speed == 0) ? "2.5GT/s," :
                                        (gen_speed == 1) ? "5.0GT/s," :
                                        (gen_speed == 2) ? "8.0GT/s," :
@@ -3479,18 +3479,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
        case OD_SCLK:
                if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                                od_table->GfxclkFmin);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                                od_table->GfxclkFmax);
                }
                break;
 
        case OD_MCLK:
                if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
-                       size = sprintf(buf, "%s:\n", "OD_MCLK");
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                                od_table->UclkFmax);
                }
 
@@ -3503,14 +3503,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
-                       size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
-                       size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE");
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq1,
                                od_table->GfxclkVolt1 / VOLTAGE_SCALE);
-                       size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq2,
                                od_table->GfxclkVolt2 / VOLTAGE_SCALE);
-                       size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+                       size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq3,
                                od_table->GfxclkVolt3 / VOLTAGE_SCALE);
                }
@@ -3518,17 +3518,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
 
        case OD_RANGE:
-               size = sprintf(buf, "%s:\n", "OD_RANGE");
+               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
 
                if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
-                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
                }
 
                if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
-                       size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
                                od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
                }
@@ -3539,22 +3539,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
                }
@@ -4003,7 +4003,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+       size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
                        title[0], title[1], title[2], title[3], title[4], title[5],
                        title[6], title[7], title[8], title[9], title[10]);
 
@@ -4016,10 +4016,10 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                                "[GetPowerProfile] Failed to get activity monitor!",
                                return result);
 
-               size += sprintf(buf + size, "%2d %14s%s:\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
                        i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        0,
                        "GFXCLK",
@@ -4033,7 +4033,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                        activity_monitor.Gfx_PD_Data_error_coeff,
                        activity_monitor.Gfx_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        1,
                        "SOCCLK",
@@ -4047,7 +4047,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                        activity_monitor.Soc_PD_Data_error_coeff,
                        activity_monitor.Soc_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        2,
                        "UCLK",
@@ -4061,7 +4061,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                        activity_monitor.Mem_PD_Data_error_coeff,
                        activity_monitor.Mem_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        3,
                        "FCLK",
@@ -4409,8 +4409,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
        .register_irq_handlers = smu9_register_irq_handlers,
        .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
        /* fan control related */
-       .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
-       .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+       .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm,
+       .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm,
        .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
        .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
        .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
index 269dd7e..f4f4efd 100644 (file)
@@ -114,26 +114,29 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
        return 0;
 }
 
-int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed)
 {
-       struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-       PPTable_t *pp_table = &(data->smc_state_table.pp_table);
-       uint32_t current_rpm, percent = 0;
-       int ret = 0;
+       struct amdgpu_device *adev = hwmgr->adev;
+       uint32_t duty100, duty;
+       uint64_t tmp64;
 
-       ret = vega20_get_current_rpm(hwmgr, &current_rpm);
-       if (ret)
-               return ret;
+       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+                               CG_FDO_CTRL1, FMAX_DUTY100);
+       duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+                               CG_THERMAL_STATUS, FDO_PWM_DUTY);
 
-       percent = current_rpm * 100 / pp_table->FanMaximumRpm;
+       if (!duty100)
+               return -EINVAL;
 
-       *speed = percent > 100 ? 100 : percent;
+       tmp64 = (uint64_t)duty * 255;
+       do_div(tmp64, duty100);
+       *speed = MIN((uint32_t)tmp64, 255);
 
        return 0;
 }
 
-int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t speed)
 {
        struct amdgpu_device *adev = hwmgr->adev;
@@ -141,8 +144,7 @@ int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
        uint32_t duty;
        uint64_t tmp64;
 
-       if (speed > 100)
-               speed = 100;
+       speed = MIN(speed, 255);
 
        if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
                vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
@@ -154,7 +156,7 @@ int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
                return -EINVAL;
 
        tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
+       do_div(tmp64, 255);
        duty = (uint32_t)tmp64;
 
        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
index 2d1769b..b18d09c 100644 (file)
@@ -56,9 +56,9 @@ extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
                uint32_t *speed);
 extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
                uint32_t speed);
-extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed);
-extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+extern int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t speed);
 extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
 extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
index 15c0b8a..bdbbeb9 100644 (file)
@@ -6539,7 +6539,7 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
        }
 }
 
-static int si_dpm_get_fan_speed_percent(void *handle,
+static int si_dpm_get_fan_speed_pwm(void *handle,
                                      u32 *speed)
 {
        u32 duty, duty100;
@@ -6555,17 +6555,14 @@ static int si_dpm_get_fan_speed_percent(void *handle,
        if (duty100 == 0)
                return -EINVAL;
 
-       tmp64 = (u64)duty * 100;
+       tmp64 = (u64)duty * 255;
        do_div(tmp64, duty100);
-       *speed = (u32)tmp64;
-
-       if (*speed > 100)
-               *speed = 100;
+       *speed = MIN((u32)tmp64, 255);
 
        return 0;
 }
 
-static int si_dpm_set_fan_speed_percent(void *handle,
+static int si_dpm_set_fan_speed_pwm(void *handle,
                                      u32 speed)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6580,7 +6577,7 @@ static int si_dpm_set_fan_speed_percent(void *handle,
        if (si_pi->fan_is_controlled_by_smc)
                return -EINVAL;
 
-       if (speed > 100)
+       if (speed > 255)
                return -EINVAL;
 
        duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
@@ -6589,7 +6586,7 @@ static int si_dpm_set_fan_speed_percent(void *handle,
                return -EINVAL;
 
        tmp64 = (u64)speed * duty100;
-       do_div(tmp64, 100);
+       do_div(tmp64, 255);
        duty = (u32)tmp64;
 
        tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
@@ -8059,8 +8056,8 @@ static const struct amd_pm_funcs si_dpm_funcs = {
        .vblank_too_short = &si_dpm_vblank_too_short,
        .set_fan_control_mode = &si_dpm_set_fan_control_mode,
        .get_fan_control_mode = &si_dpm_get_fan_control_mode,
-       .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
-       .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+       .set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm,
+       .get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm,
        .check_state_equal = &si_check_state_equal,
        .get_vce_clock_state = amdgpu_get_vce_clock_state,
        .read_sensor = &si_dpm_read_sensor,
index 71afc2d..3ab1ce4 100644 (file)
@@ -58,7 +58,7 @@ static int smu_handle_task(struct smu_context *smu,
                           enum amd_pp_task task_id,
                           bool lock_needed);
 static int smu_reset(struct smu_context *smu);
-static int smu_set_fan_speed_percent(void *handle, u32 speed);
+static int smu_set_fan_speed_pwm(void *handle, u32 speed);
 static int smu_set_fan_control_mode(struct smu_context *smu, int value);
 static int smu_set_power_limit(void *handle, uint32_t limit);
 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
@@ -403,17 +403,26 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
        }
 
        /* set the user dpm fan configurations */
-       if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
+       if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
+           smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
                ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
                if (ret) {
+                       smu->user_dpm_profile.fan_speed_pwm = 0;
+                       smu->user_dpm_profile.fan_speed_rpm = 0;
+                       smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
                        dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
-                       return;
                }
 
-               if (!ret && smu->user_dpm_profile.fan_speed_percent) {
-                       ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
+               if (smu->user_dpm_profile.fan_speed_pwm) {
+                       ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
                        if (ret)
-                               dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
+                               dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
+               }
+
+               if (smu->user_dpm_profile.fan_speed_rpm) {
+                       ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
+                       if (ret)
+                               dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
                }
        }
 
@@ -620,6 +629,7 @@ static int smu_early_init(void *handle)
        mutex_init(&smu->smu_baco.mutex);
        smu->smu_baco.state = SMU_BACO_STATE_EXIT;
        smu->smu_baco.platform_support = false;
+       smu->user_dpm_profile.fan_mode = -1;
 
        adev->powerplay.pp_handle = smu;
        adev->powerplay.pp_funcs = &swsmu_pm_funcs;
@@ -2179,7 +2189,6 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 {
        struct smu_context *smu = handle;
-       u32 percent;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2187,11 +2196,16 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->set_fan_speed_percent) {
-               percent = speed * 100 / smu->fan_max_rpm;
-               ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
-               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
-                       smu->user_dpm_profile.fan_speed_percent = percent;
+       if (smu->ppt_funcs->set_fan_speed_rpm) {
+               ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+                       smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
+                       smu->user_dpm_profile.fan_speed_rpm = speed;
+
+                       /* Override custom PWM setting as they cannot co-exist */
+                       smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
+                       smu->user_dpm_profile.fan_speed_pwm = 0;
+               }
        }
 
        mutex_unlock(&smu->mutex);
@@ -2551,8 +2565,11 @@ static int smu_set_fan_control_mode(struct smu_context *smu, int value)
 
        /* reset user dpm fan speed */
        if (!ret && value != AMD_FAN_CTRL_MANUAL &&
-                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
-               smu->user_dpm_profile.fan_speed_percent = 0;
+                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+               smu->user_dpm_profile.fan_speed_pwm = 0;
+               smu->user_dpm_profile.fan_speed_rpm = 0;
+               smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+       }
 
        return ret;
 }
@@ -2565,31 +2582,25 @@ static void smu_pp_set_fan_control_mode(void *handle, u32 value)
 }
 
 
-static int smu_get_fan_speed_percent(void *handle, u32 *speed)
+static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
-       uint32_t percent;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->get_fan_speed_percent) {
-               ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
-               if (!ret) {
-                       *speed = percent > 100 ? 100 : percent;
-               }
-       }
+       if (smu->ppt_funcs->get_fan_speed_pwm)
+               ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
 
        mutex_unlock(&smu->mutex);
 
-
        return ret;
 }
 
-static int smu_set_fan_speed_percent(void *handle, u32 speed)
+static int smu_set_fan_speed_pwm(void *handle, u32 speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2599,12 +2610,16 @@ static int smu_set_fan_speed_percent(void *handle, u32 speed)
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->set_fan_speed_percent) {
-               if (speed > 100)
-                       speed = 100;
-               ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
-               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
-                       smu->user_dpm_profile.fan_speed_percent = speed;
+       if (smu->ppt_funcs->set_fan_speed_pwm) {
+               ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
+               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+                       smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
+                       smu->user_dpm_profile.fan_speed_pwm = speed;
+
+                       /* Override custom RPM setting as they cannot co-exist */
+                       smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
+                       smu->user_dpm_profile.fan_speed_rpm = 0;
+               }
        }
 
        mutex_unlock(&smu->mutex);
@@ -2616,17 +2631,14 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
-       u32 percent;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->get_fan_speed_percent) {
-               ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
-               *speed = percent * smu->fan_max_rpm / 100;
-       }
+       if (smu->ppt_funcs->get_fan_speed_rpm)
+               ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
 
        mutex_unlock(&smu->mutex);
 
@@ -3043,8 +3055,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
        /* export for sysfs */
        .set_fan_control_mode    = smu_pp_set_fan_control_mode,
        .get_fan_control_mode    = smu_get_fan_control_mode,
-       .set_fan_speed_percent   = smu_set_fan_speed_percent,
-       .get_fan_speed_percent   = smu_get_fan_speed_percent,
+       .set_fan_speed_pwm   = smu_set_fan_speed_pwm,
+       .get_fan_speed_pwm   = smu_get_fan_speed_pwm,
        .force_clock_level       = smu_force_ppclk_levels,
        .print_clock_levels      = smu_print_ppclk_levels,
        .force_performance_level = smu_force_performance_level,
index 6ec8492..273df66 100644 (file)
 
 #define smnPCIE_ESM_CTRL                       0x111003D0
 
+#define mmCG_FDO_CTRL0_ARCT                    0x8B
+#define mmCG_FDO_CTRL0_ARCT_BASE_IDX           0
+
+#define mmCG_FDO_CTRL1_ARCT                    0x8C
+#define mmCG_FDO_CTRL1_ARCT_BASE_IDX           0
+
+#define mmCG_FDO_CTRL2_ARCT                    0x8D
+#define mmCG_FDO_CTRL2_ARCT_BASE_IDX           0
+
+#define mmCG_TACH_CTRL_ARCT                    0x8E
+#define mmCG_TACH_CTRL_ARCT_BASE_IDX           0
+
+#define mmCG_TACH_STATUS_ARCT                  0x8F
+#define mmCG_TACH_STATUS_ARCT_BASE_IDX         0
+
+#define mmCG_THERMAL_STATUS_ARCT               0x90
+#define mmCG_THERMAL_STATUS_ARCT_BASE_IDX      0
+
 static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                         PPSMC_MSG_TestMessage,                     0),
        MSG_MAP(GetSmuVersion,                       PPSMC_MSG_GetSmuVersion,                   1),
@@ -163,14 +181,14 @@ static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT
        FEA_MAP(DPM_SOCCLK),
        FEA_MAP(DPM_FCLK),
        FEA_MAP(DPM_MP0CLK),
-       ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
+       FEA_MAP(DPM_XGMI),
        FEA_MAP(DS_GFXCLK),
        FEA_MAP(DS_SOCCLK),
        FEA_MAP(DS_LCLK),
        FEA_MAP(DS_FCLK),
        FEA_MAP(DS_UCLK),
        FEA_MAP(GFX_ULV),
-       ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT),
+       ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN_BIT),
        FEA_MAP(RSMU_SMN_CG),
        FEA_MAP(WAFL_CG),
        FEA_MAP(PPT),
@@ -721,13 +739,13 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
                        member_type = METRICS_AVERAGE_SOCCLK;
                break;
        case PPCLK_VCLK:
-               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT))
                        member_type = METRICS_CURR_VCLK;
                else
                        member_type = METRICS_AVERAGE_VCLK;
                break;
        case PPCLK_DCLK:
-               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT))
                        member_type = METRICS_CURR_DCLK;
                else
                        member_type = METRICS_AVERAGE_DCLK;
@@ -756,7 +774,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        uint32_t gen_speed, lane_width;
 
        if (amdgpu_ras_intr_triggered())
-               return snprintf(buf, PAGE_SIZE, "unavailable\n");
+               return sysfs_emit(buf, "unavailable\n");
 
        dpm_context = smu_dpm->dpm_context;
 
@@ -780,7 +798,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                 * And it's safe to assume that is always the current clock.
                 */
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
                                        clocks.data[i].clocks_in_khz / 1000,
                                        (clocks.num_levels == 1) ? "*" :
                                        (arcturus_freqs_in_same_level(
@@ -803,7 +821,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.num_levels == 1) ? "*" :
                                (arcturus_freqs_in_same_level(
@@ -826,7 +844,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.num_levels == 1) ? "*" :
                                (arcturus_freqs_in_same_level(
@@ -849,7 +867,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, single_dpm_table->dpm_levels[i].value,
                                (clocks.num_levels == 1) ? "*" :
                                (arcturus_freqs_in_same_level(
@@ -872,7 +890,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, single_dpm_table->dpm_levels[i].value,
                                (clocks.num_levels == 1) ? "*" :
                                (arcturus_freqs_in_same_level(
@@ -895,7 +913,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                i, single_dpm_table->dpm_levels[i].value,
                                (clocks.num_levels == 1) ? "*" :
                                (arcturus_freqs_in_same_level(
@@ -906,7 +924,7 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        case SMU_PCIE:
                gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
                lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
-               size += sprintf(buf + size, "0: %s %s %dMhz *\n",
+               size += sysfs_emit_at(buf, size, "0: %s %s %dMhz *\n",
                                (gen_speed == 0) ? "2.5GT/s," :
                                (gen_speed == 1) ? "5.0GT/s," :
                                (gen_speed == 2) ? "8.0GT/s," :
@@ -1162,11 +1180,29 @@ static int arcturus_read_sensor(struct smu_context *smu,
        return ret;
 }
 
-static int arcturus_get_fan_speed_percent(struct smu_context *smu,
-                                         uint32_t *speed)
+static int arcturus_set_fan_static_mode(struct smu_context *smu,
+                                       uint32_t mode)
 {
-       int ret;
-       u32 rpm;
+       struct amdgpu_device *adev = smu->adev;
+
+       WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),
+                                  CG_FDO_CTRL2, TMIN, 0));
+       WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),
+                                  CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+       return 0;
+}
+
+static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
+                                     uint32_t *speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_status;
+       uint64_t tmp64;
+       int ret = 0;
 
        if (!speed)
                return -EINVAL;
@@ -1175,14 +1211,112 @@ static int arcturus_get_fan_speed_percent(struct smu_context *smu,
        case AMD_FAN_CTRL_AUTO:
                ret = arcturus_get_smu_metrics_data(smu,
                                                    METRICS_CURR_FANSPEED,
-                                                   &rpm);
-               if (!ret && smu->fan_max_rpm)
-                       *speed = rpm * 100 / smu->fan_max_rpm;
-               return ret;
+                                                   speed);
+               break;
        default:
-               *speed = smu->user_dpm_profile.fan_speed_percent;
+               /*
+                * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+                * detected via register retrieving. To workaround this, we will
+                * report the fan speed as 0 RPM if user just requested such.
+                */
+               if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
+                    && !smu->user_dpm_profile.fan_speed_rpm) {
+                       *speed = 0;
+                       return 0;
+               }
+
+               tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
+               tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS_ARCT);
+               if (tach_status) {
+                       do_div(tmp64, tach_status);
+                       *speed = (uint32_t)tmp64;
+               } else {
+                       *speed = 0;
+               }
+
+               break;
+       }
+
+       return ret;
+}
+
+static int arcturus_set_fan_speed_pwm(struct smu_context *smu,
+                                     uint32_t speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t duty100, duty;
+       uint64_t tmp64;
+
+       speed = MIN(speed, 255);
+
+       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),
+                               CG_FDO_CTRL1, FMAX_DUTY100);
+       if (!duty100)
+               return -EINVAL;
+
+       tmp64 = (uint64_t)speed * duty100;
+       do_div(tmp64, 255);
+       duty = (uint32_t)tmp64;
+
+       WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT),
+                                  CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+       return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
+                                     uint32_t speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       /*
+        * crystal_clock_freq used for fan speed rpm calculation is
+        * always 25Mhz. So, hardcode it as 2500(in 10K unit).
+        */
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_period;
+
+       tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+       WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
+                                  CG_TACH_CTRL, TARGET_PERIOD,
+                                  tach_period));
+
+       return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+}
+
+static int arcturus_get_fan_speed_pwm(struct smu_context *smu,
+                                     uint32_t *speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t duty100, duty;
+       uint64_t tmp64;
+
+       /*
+        * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+        * detected via register retrieving. To workaround this, we will
+        * report the fan speed as 0 PWM if user just requested such.
+        */
+       if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
+            && !smu->user_dpm_profile.fan_speed_pwm) {
+               *speed = 0;
                return 0;
        }
+
+       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),
+                               CG_FDO_CTRL1, FMAX_DUTY100);
+       duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS_ARCT),
+                               CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+       if (duty100) {
+               tmp64 = (uint64_t)duty * 255;
+               do_div(tmp64, duty100);
+               *speed = MIN((uint32_t)tmp64, 255);
+       } else {
+               *speed = 0;
+       }
+
+       return 0;
 }
 
 static int arcturus_get_fan_parameters(struct smu_context *smu)
@@ -1272,11 +1406,11 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
                return result;
 
        if (smu_version >= 0x360d00)
-               size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+               size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
                        title[0], title[1], title[2], title[3], title[4], title[5],
                        title[6], title[7], title[8], title[9], title[10]);
        else
-               size += sprintf(buf + size, "%16s\n",
+               size += sysfs_emit_at(buf, size, "%16s\n",
                        title[0]);
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -1302,11 +1436,11 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
                        }
                }
 
-               size += sprintf(buf + size, "%2d %14s%s\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
 
                if (smu_version >= 0x360d00) {
-                       size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+                       size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                                " ",
                                0,
                                "GFXCLK",
@@ -1320,7 +1454,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
                                activity_monitor.Gfx_PD_Data_error_coeff,
                                activity_monitor.Gfx_PD_Data_error_rate_coeff);
 
-                       size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+                       size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                                " ",
                                1,
                                "UCLK",
@@ -1916,16 +2050,16 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
        int ret = 0;
 
        if (enable) {
-               if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+               if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) {
+                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 1);
                        if (ret) {
                                dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
                                return ret;
                        }
                }
        } else {
-               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) {
+                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 0);
                        if (ret) {
                                dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
                                return ret;
@@ -2270,7 +2404,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .print_clk_levels = arcturus_print_clk_levels,
        .force_clk_levels = arcturus_force_clk_levels,
        .read_sensor = arcturus_read_sensor,
-       .get_fan_speed_percent = arcturus_get_fan_speed_percent,
+       .get_fan_speed_pwm = arcturus_get_fan_speed_pwm,
+       .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
        .get_power_profile_mode = arcturus_get_power_profile_mode,
        .set_power_profile_mode = arcturus_set_power_profile_mode,
        .set_performance_level = arcturus_set_performance_level,
@@ -2315,7 +2450,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
-       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+       .set_fan_speed_pwm = arcturus_set_fan_speed_pwm,
+       .set_fan_speed_rpm = arcturus_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
        .register_irq_handler = smu_v11_0_register_irq_handler,
index d7722c2..f966817 100644 (file)
@@ -1303,7 +1303,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                                if (ret)
                                        return size;
 
-                               size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                                cur_value == value ? "*" : "");
                        }
                } else {
@@ -1321,7 +1321,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                                freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
 
                        for (i = 0; i < 3; i++) {
-                               size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
                                                i == mark_index ? "*" : "");
                        }
 
@@ -1331,7 +1331,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
                lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
                for (i = 0; i < NUM_LINK_LEVELS; i++)
-                       size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
@@ -1352,23 +1352,24 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                        break;
                if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
                        break;
-               size += sprintf(buf + size, "OD_SCLK:\n");
-               size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+               size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+               size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+                                     od_table->GfxclkFmin, od_table->GfxclkFmax);
                break;
        case SMU_OD_MCLK:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
                if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
                        break;
-               size += sprintf(buf + size, "OD_MCLK:\n");
-               size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
+               size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+               size += sysfs_emit_at(buf, size, "1: %uMHz\n", od_table->UclkFmax);
                break;
        case SMU_OD_VDDC_CURVE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
                if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
                        break;
-               size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+               size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
                for (i = 0; i < 3; i++) {
                        switch (i) {
                        case 0:
@@ -1383,55 +1384,57 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                        default:
                                break;
                        }
-                       size += sprintf(buf + size, "%d: %uMHz %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+                       size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n",
+                                             i, curve_settings[0],
+                                       curve_settings[1] / NAVI10_VOLTAGE_SCALE);
                }
                break;
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
-               size = sprintf(buf, "%s:\n", "OD_RANGE");
+               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
                                                    &min_value, NULL);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
                                                    NULL, &max_value);
-                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                        min_value, max_value);
                }
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
                                        min_value, max_value);
                }
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+                                             min_value, max_value);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+                                             min_value, max_value);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+                                             min_value, max_value);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+                                             min_value, max_value);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+                                             min_value, max_value);
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
                                                    &min_value, &max_value);
-                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
-                                       min_value, max_value);
+                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+                                             min_value, max_value);
                }
 
                break;
@@ -1668,27 +1671,27 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
-static int navi10_get_fan_speed_percent(struct smu_context *smu,
-                                       uint32_t *speed)
+static int navi10_get_fan_speed_rpm(struct smu_context *smu,
+                                   uint32_t *speed)
 {
-       int ret;
-       u32 rpm;
+       int ret = 0;
 
        if (!speed)
                return -EINVAL;
 
        switch (smu_v11_0_get_fan_control_mode(smu)) {
        case AMD_FAN_CTRL_AUTO:
-               ret = navi1x_get_smu_metrics_data(smu,
+               ret = navi10_get_smu_metrics_data(smu,
                                                  METRICS_CURR_FANSPEED,
-                                                 &rpm);
-               if (!ret && smu->fan_max_rpm)
-                       *speed = rpm * 100 / smu->fan_max_rpm;
-               return ret;
+                                                 speed);
+               break;
        default:
-               *speed = smu->user_dpm_profile.fan_speed_percent;
-               return 0;
+               ret = smu_v11_0_get_fan_speed_rpm(smu,
+                                                 speed);
+               break;
        }
+
+       return ret;
 }
 
 static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -1730,7 +1733,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+       size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
                        title[0], title[1], title[2], title[3], title[4], title[5],
                        title[6], title[7], title[8], title[9], title[10]);
 
@@ -1750,10 +1753,10 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
                        return result;
                }
 
-               size += sprintf(buf + size, "%2d %14s%s:\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        0,
                        "GFXCLK",
@@ -1767,7 +1770,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
                        activity_monitor.Gfx_PD_Data_error_coeff,
                        activity_monitor.Gfx_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        1,
                        "SOCCLK",
@@ -1781,7 +1784,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
                        activity_monitor.Soc_PD_Data_error_coeff,
                        activity_monitor.Soc_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        2,
                        "MEMLK",
@@ -3224,7 +3227,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .display_config_changed = navi10_display_config_changed,
        .notify_smc_display_config = navi10_notify_smc_display_config,
        .is_dpm_running = navi10_is_dpm_running,
-       .get_fan_speed_percent = navi10_get_fan_speed_percent,
+       .get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
+       .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
        .get_power_profile_mode = navi10_get_power_profile_mode,
        .set_power_profile_mode = navi10_set_power_profile_mode,
        .set_watermarks_table = navi10_set_watermarks_table,
@@ -3267,7 +3271,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
-       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+       .set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
+       .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
        .register_irq_handler = smu_v11_0_register_irq_handler,
index 261ef8c..5e292c3 100644 (file)
@@ -1088,7 +1088,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                                if (ret)
                                        goto print_clk_out;
 
-                               size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                                cur_value == value ? "*" : "");
                        }
                } else {
@@ -1110,7 +1110,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                        }
 
                        for (i = 0; i < count; i++) {
-                               size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
                                                cur_value  == freq_values[i] ? "*" : "");
                        }
 
@@ -1121,7 +1121,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
                GET_PPTABLE_MEMBER(LclkFreq, &table_member);
                for (i = 0; i < NUM_LINK_LEVELS; i++)
-                       size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+                       size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
@@ -1144,8 +1144,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
                        break;
 
-               size += sprintf(buf + size, "OD_SCLK:\n");
-               size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+               size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+               size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
                break;
 
        case SMU_OD_MCLK:
@@ -1155,8 +1155,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
                        break;
 
-               size += sprintf(buf + size, "OD_MCLK:\n");
-               size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+               size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+               size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
                break;
 
        case SMU_OD_VDDGFX_OFFSET:
@@ -1172,22 +1172,22 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                     (smu_version < 0x003a2900))
                        break;
 
-               size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
-               size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+               size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+               size += sysfs_emit_at(buf, size, "%dmV\n", od_table->VddGfxOffset);
                break;
 
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
 
-               size = sprintf(buf, "%s:\n", "OD_RANGE");
+               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
 
                if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
                                                            &min_value, NULL);
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
                                                            NULL, &max_value);
-                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                        min_value, max_value);
                }
 
@@ -1196,7 +1196,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                                                            &min_value, NULL);
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
                                                            NULL, &max_value);
-                       size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
                                        min_value, max_value);
                }
                break;
@@ -1354,27 +1354,20 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
-static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
-                                               uint32_t *speed)
+static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
+                                           uint32_t *speed)
 {
-       int ret;
-       u32 rpm;
-
        if (!speed)
                return -EINVAL;
 
-       switch (smu_v11_0_get_fan_control_mode(smu)) {
-       case AMD_FAN_CTRL_AUTO:
-               ret = sienna_cichlid_get_smu_metrics_data(smu,
-                                                         METRICS_CURR_FANSPEED,
-                                                         &rpm);
-               if (!ret && smu->fan_max_rpm)
-                       *speed = rpm * 100 / smu->fan_max_rpm;
-               return ret;
-       default:
-               *speed = smu->user_dpm_profile.fan_speed_percent;
-               return 0;
-       }
+       /*
+        * For Sienna_Cichlid and later, the fan speed(rpm) reported
+        * by pmfw is always trustable(even when the fan control feature
+        * disabled or 0 RPM kicked in).
+        */
+       return sienna_cichlid_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_FANSPEED,
+                                                  speed);
 }
 
 static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
@@ -1419,7 +1412,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
        if (!buf)
                return -EINVAL;
 
-       size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+       size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
                        title[0], title[1], title[2], title[3], title[4], title[5],
                        title[6], title[7], title[8], title[9], title[10]);
 
@@ -1439,10 +1432,10 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
                        return result;
                }
 
-               size += sprintf(buf + size, "%2d %14s%s:\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        0,
                        "GFXCLK",
@@ -1456,7 +1449,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
                        activity_monitor->Gfx_PD_Data_error_coeff,
                        activity_monitor->Gfx_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        1,
                        "SOCCLK",
@@ -1470,7 +1463,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
                        activity_monitor->Fclk_PD_Data_error_coeff,
                        activity_monitor->Fclk_PD_Data_error_rate_coeff);
 
-               size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+               size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
                        " ",
                        2,
                        "MEMLK",
@@ -3859,7 +3852,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .display_config_changed = sienna_cichlid_display_config_changed,
        .notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
        .is_dpm_running = sienna_cichlid_is_dpm_running,
-       .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
+       .get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
+       .get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
        .get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
        .set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
        .set_watermarks_table = sienna_cichlid_set_watermarks_table,
@@ -3902,7 +3896,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
-       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+       .set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
+       .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
        .register_irq_handler = smu_v11_0_register_irq_handler,
index b5419e8..87b0554 100644 (file)
@@ -1200,17 +1200,13 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
 }
 
 int
-smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
 {
        struct amdgpu_device *adev = smu->adev;
        uint32_t duty100, duty;
        uint64_t tmp64;
 
-       if (speed > 100)
-               speed = 100;
-
-       if (smu_v11_0_auto_fan_control(smu, 0))
-               return -EINVAL;
+       speed = MIN(speed, 255);
 
        duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
                                CG_FDO_CTRL1, FMAX_DUTY100);
@@ -1218,7 +1214,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
                return -EINVAL;
 
        tmp64 = (uint64_t)speed * duty100;
-       do_div(tmp64, 100);
+       do_div(tmp64, 255);
        duty = (uint32_t)tmp64;
 
        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
@@ -1228,6 +1224,99 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
        return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
 }
 
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+                               uint32_t speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       /*
+        * crystal_clock_freq used for fan speed rpm calculation is
+        * always 25Mhz. So, hardcode it as 2500(in 10K unit).
+        */
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_period;
+
+       /*
+        * To prevent from possible overheat, some ASICs may have requirement
+        * for minimum fan speed:
+        * - For some NV10 SKU, the fan speed cannot be set lower than
+        *   700 RPM.
+        * - For some Sienna Cichlid SKU, the fan speed cannot be set
+        *   lower than 500 RPM.
+        */
+       tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+       WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+                                  CG_TACH_CTRL, TARGET_PERIOD,
+                                  tach_period));
+
+       return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+}
+
+int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
+                               uint32_t *speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t duty100, duty;
+       uint64_t tmp64;
+
+       /*
+        * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+        * detected via register retrieving. To workaround this, we will
+        * report the fan speed as 0 PWM if user just requested such.
+        */
+       if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
+            && !smu->user_dpm_profile.fan_speed_pwm) {
+               *speed = 0;
+               return 0;
+       }
+
+       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+                               CG_FDO_CTRL1, FMAX_DUTY100);
+       duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+                               CG_THERMAL_STATUS, FDO_PWM_DUTY);
+       if (!duty100)
+               return -EINVAL;
+
+       tmp64 = (uint64_t)duty * 255;
+       do_div(tmp64, duty100);
+       *speed = MIN((uint32_t)tmp64, 255);
+
+       return 0;
+}
+
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+                               uint32_t *speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_status;
+       uint64_t tmp64;
+
+       /*
+        * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
+        * detected via register retrieving. To workaround this, we will
+        * report the fan speed as 0 RPM if user just requested such.
+        */
+       if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
+            && !smu->user_dpm_profile.fan_speed_rpm) {
+               *speed = 0;
+               return 0;
+       }
+
+       tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
+
+       tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS);
+       if (tach_status) {
+               do_div(tmp64, tach_status);
+               *speed = (uint32_t)tmp64;
+       } else {
+               dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n");
+               *speed = 0;
+       }
+
+       return 0;
+}
+
 int
 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
                               uint32_t mode)
@@ -1236,7 +1325,9 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
 
        switch (mode) {
        case AMD_FAN_CTRL_NONE:
-               ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+               ret = smu_v11_0_auto_fan_control(smu, 0);
+               if (!ret)
+                       ret = smu_v11_0_set_fan_speed_pwm(smu, 255);
                break;
        case AMD_FAN_CTRL_MANUAL:
                ret = smu_v11_0_auto_fan_control(smu, 0);
index bcaaa08..6eb50b0 100644 (file)
@@ -592,28 +592,28 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                        (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                }
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                        (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
                }
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "%s:\n", "OD_RANGE");
-                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
-                       size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
                                smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
                }
                break;
@@ -656,14 +656,14 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                                return ret;
                        if (!value)
                                continue;
-                       size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                        cur_value == value ? "*" : "");
                        if (cur_value == value)
                                cur_value_match_level = true;
                }
 
                if (!cur_value_match_level)
-                       size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+                       size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
                break;
        default:
                break;
@@ -691,28 +691,28 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                        (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                }
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
-                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
-                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                        (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
                }
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sprintf(buf, "%s:\n", "OD_RANGE");
-                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
-                       size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+                       size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
                                smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
                }
                break;
@@ -755,14 +755,14 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                                return ret;
                        if (!value)
                                continue;
-                       size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                        cur_value == value ? "*" : "");
                        if (cur_value == value)
                                cur_value_match_level = true;
                }
 
                if (!cur_value_match_level)
-                       size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+                       size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
                break;
        default:
                break;
@@ -1035,7 +1035,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
                if (workload_type < 0)
                        continue;
 
-               size += sprintf(buf + size, "%2d %14s%s\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
        }
 
index 9a9c24a..b391380 100644 (file)
@@ -510,16 +510,16 @@ static int renoir_print_clk_levels(struct smu_context *smu,
                                                0, &max);
                        if (ret)
                                return ret;
-                       size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+                       size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
                }
                break;
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
                        min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
                        max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
-                       size += sprintf(buf + size, "OD_SCLK\n");
-                       size += sprintf(buf + size, "0:%10uMhz\n", min);
-                       size += sprintf(buf + size, "1:%10uMhz\n", max);
+                       size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+                       size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+                       size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
                }
                break;
        case SMU_GFXCLK:
@@ -536,12 +536,12 @@ static int renoir_print_clk_levels(struct smu_context *smu,
                        else
                                i = 1;
 
-                       size += sprintf(buf + size, "0: %uMhz %s\n", min,
+                       size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
                                        i == 0 ? "*" : "");
-                       size += sprintf(buf + size, "1: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
                                        i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK,
                                        i == 1 ? "*" : "");
-                       size += sprintf(buf + size, "2: %uMhz %s\n", max,
+                       size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
                                        i == 2 ? "*" : "");
                }
                return size;
@@ -588,14 +588,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
                                return ret;
                        if (!value)
                                continue;
-                       size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                        cur_value == value ? "*" : "");
                        if (cur_value == value)
                                cur_value_match_level = true;
                }
 
                if (!cur_value_match_level)
-                       size += sprintf(buf + size, "   %uMhz *\n", cur_value);
+                       size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
 
                break;
        default:
@@ -1118,7 +1118,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
                if (workload_type < 0)
                        continue;
 
-               size += sprintf(buf + size, "%2d %14s%s\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
        }
 
index 562783d..ec8c30d 100644 (file)
@@ -90,8 +90,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
        MSG_MAP(GetDriverIfVersion,                  PPSMC_MSG_GetDriverIfVersion,              1),
        MSG_MAP(EnableAllSmuFeatures,                PPSMC_MSG_EnableAllSmuFeatures,            0),
        MSG_MAP(DisableAllSmuFeatures,               PPSMC_MSG_DisableAllSmuFeatures,           0),
-       MSG_MAP(GetEnabledSmuFeaturesLow,            PPSMC_MSG_GetEnabledSmuFeaturesLow,        0),
-       MSG_MAP(GetEnabledSmuFeaturesHigh,           PPSMC_MSG_GetEnabledSmuFeaturesHigh,       0),
+       MSG_MAP(GetEnabledSmuFeaturesLow,            PPSMC_MSG_GetEnabledSmuFeaturesLow,        1),
+       MSG_MAP(GetEnabledSmuFeaturesHigh,           PPSMC_MSG_GetEnabledSmuFeaturesHigh,       1),
        MSG_MAP(SetDriverDramAddrHigh,               PPSMC_MSG_SetDriverDramAddrHigh,           1),
        MSG_MAP(SetDriverDramAddrLow,                PPSMC_MSG_SetDriverDramAddrLow,            1),
        MSG_MAP(SetToolsDramAddrHigh,                PPSMC_MSG_SetToolsDramAddrHigh,            0),
@@ -156,14 +156,14 @@ static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUN
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT,                   FEATURE_DPM_SOCCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT,                     FEATURE_DPM_FCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT,                     FEATURE_DPM_LCLK_BIT),
-       ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_BIT,                                 FEATURE_DPM_XGMI_BIT),
+       ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT,                             FEATURE_DPM_XGMI_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT,                    FEATURE_DS_GFXCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT,                    FEATURE_DS_SOCCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT,                              FEATURE_DS_LCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT,                              FEATURE_DS_FCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT,                              FEATURE_DS_UCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT,                               FEATURE_GFX_SS_BIT),
-       ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_PG_BIT,                               FEATURE_DPM_VCN_BIT),
+       ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT,                              FEATURE_DPM_VCN_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT,                  FEATURE_RSMU_SMN_CG_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT,                              FEATURE_WAFL_CG_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT,                                  FEATURE_PPT_BIT),
@@ -735,14 +735,14 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
        uint32_t min_clk, max_clk;
 
        if (amdgpu_ras_intr_triggered())
-               return snprintf(buf, PAGE_SIZE, "unavailable\n");
+               return sysfs_emit(buf, "unavailable\n");
 
        dpm_context = smu_dpm->dpm_context;
 
        switch (type) {
 
        case SMU_OD_SCLK:
-               size = sprintf(buf, "%s:\n", "GFXCLK");
+               size = sysfs_emit(buf, "%s:\n", "GFXCLK");
                fallthrough;
        case SMU_SCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -779,8 +779,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                 */
                if (display_levels == clocks.num_levels) {
                        for (i = 0; i < clocks.num_levels; i++)
-                               size += sprintf(
-                                       buf + size, "%d: %uMhz %s\n", i,
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
                                        freq_values[i],
                                        (clocks.num_levels == 1) ?
                                                "*" :
@@ -790,14 +789,14 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                                                         ""));
                } else {
                        for (i = 0; i < display_levels; i++)
-                               size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+                               size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
                                                freq_values[i], i == 1 ? "*" : "");
                }
 
                break;
 
        case SMU_OD_MCLK:
-               size = sprintf(buf, "%s:\n", "MCLK");
+               size = sysfs_emit(buf, "%s:\n", "MCLK");
                fallthrough;
        case SMU_MCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
@@ -814,7 +813,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, clocks.data[i].clocks_in_khz / 1000,
                                        (clocks.num_levels == 1) ? "*" :
                                        (aldebaran_freqs_in_same_level(
@@ -837,7 +836,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, clocks.data[i].clocks_in_khz / 1000,
                                        (clocks.num_levels == 1) ? "*" :
                                        (aldebaran_freqs_in_same_level(
@@ -860,7 +859,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, single_dpm_table->dpm_levels[i].value,
                                        (clocks.num_levels == 1) ? "*" :
                                        (aldebaran_freqs_in_same_level(
@@ -883,7 +882,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, single_dpm_table->dpm_levels[i].value,
                                        (clocks.num_levels == 1) ? "*" :
                                        (aldebaran_freqs_in_same_level(
@@ -906,7 +905,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                }
 
                for (i = 0; i < single_dpm_table->count; i++)
-                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
                                        i, single_dpm_table->dpm_levels[i].value,
                                        (clocks.num_levels == 1) ? "*" :
                                        (aldebaran_freqs_in_same_level(
@@ -1194,8 +1193,19 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
        uint32_t power_limit = 0;
        int ret;
 
-       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
-               return -EINVAL;
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+               if (current_power_limit)
+                       *current_power_limit = 0;
+               if (default_power_limit)
+                       *default_power_limit = 0;
+               if (max_power_limit)
+                       *max_power_limit = 0;
+
+               dev_warn(smu->adev->dev,
+                       "PPT feature is not enabled, power values can't be fetched.");
+
+               return 0;
+       }
 
        /* Valid power data is available only from primary die.
         * For secondary die show the value as 0.
index a421ba8..a0e50f2 100644 (file)
@@ -85,6 +85,10 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
        const struct common_firmware_header *header;
        struct amdgpu_firmware_info *ucode = NULL;
 
+       /* doesn't need to load smu firmware in IOV mode */
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        switch (adev->asic_type) {
        case CHIP_ALDEBARAN:
                chip_name = "aldebaran";
@@ -268,52 +272,86 @@ static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
        return 0;
 }
 
-int smu_v13_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
 {
        struct amdgpu_device *adev = smu->adev;
-       const struct smc_firmware_header_v1_0 *hdr;
-       int ret, index;
-       uint32_t size = 0;
        uint16_t atom_table_size;
        uint8_t frev, crev;
-       void *table;
-       uint16_t version_major, version_minor;
+       int ret, index;
 
+       dev_info(adev->dev, "use vbios provided pptable\n");
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                                           powerplayinfo);
 
-       if (amdgpu_smu_pptable_id >= 0) {
-               smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
-               dev_info(adev->dev, "override pptable id %d\n", amdgpu_smu_pptable_id);
-       }
+       ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+                                            (uint8_t **)table);
+       if (ret)
+               return ret;
+
+       if (size)
+               *size = atom_table_size;
+
+       return 0;
+}
+
+static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
+                                              uint32_t pptable_id)
+{
+       const struct smc_firmware_header_v1_0 *hdr;
+       struct amdgpu_device *adev = smu->adev;
+       uint16_t version_major, version_minor;
+       int ret;
 
        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+       if (!hdr)
+               return -EINVAL;
+
+       dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
        version_major = le16_to_cpu(hdr->header.header_version_major);
        version_minor = le16_to_cpu(hdr->header.header_version_minor);
-       if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
-               dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
-               switch (version_minor) {
-               case 1:
-                       ret = smu_v13_0_set_pptable_v2_1(smu, &table, &size,
-                                                        smu->smu_table.boot_values.pp_table_id);
-                       break;
-               default:
-                       ret = -EINVAL;
-                       break;
-               }
-               if (ret)
-                       return ret;
+       if (version_major != 2) {
+               dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+                       version_major, version_minor);
+               return -EINVAL;
+       }
 
-       } else {
-               dev_info(adev->dev, "use vbios provided pptable\n");
-               index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-                                                   powerplayinfo);
+       switch (version_minor) {
+       case 1:
+               ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
 
-               ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
-                                                    (uint8_t **)&table);
-               if (ret)
-                       return ret;
-               size = atom_table_size;
+       return ret;
+}
+
+int smu_v13_0_setup_pptable(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t size = 0, pptable_id = 0;
+       void *table;
+       int ret = 0;
+
+       /* override pptable_id from driver parameter */
+       if (amdgpu_smu_pptable_id >= 0) {
+               pptable_id = amdgpu_smu_pptable_id;
+               dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+       } else {
+               pptable_id = smu->smu_table.boot_values.pp_table_id;
        }
 
+       /* force using vbios pptable in sriov mode */
+       if (amdgpu_sriov_vf(adev) || !pptable_id)
+               ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
+       else
+               ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+       if (ret)
+               return ret;
+
        if (!smu->smu_table.power_play_table)
                smu->smu_table.power_play_table = table;
        if (!smu->smu_table.power_play_table_size)
index 0cfeb9f..0f17c25 100644 (file)
@@ -572,7 +572,7 @@ static int yellow_carp_get_power_profile_mode(struct smu_context *smu,
                if (workload_type < 0)
                        continue;
 
-               size += sprintf(buf + size, "%2d %14s%s\n",
+               size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
                        i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
        }
 
@@ -1054,15 +1054,15 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
 
        switch (clk_type) {
        case SMU_OD_SCLK:
-               size = sprintf(buf, "%s:\n", "OD_SCLK");
-               size += sprintf(buf + size, "0: %10uMhz\n",
+               size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
-               size += sprintf(buf + size, "1: %10uMhz\n",
+               size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                break;
        case SMU_OD_RANGE:
-               size = sprintf(buf, "%s:\n", "OD_RANGE");
-               size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                break;
        case SMU_SOCCLK:
@@ -1083,7 +1083,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
                        if (ret)
                                goto print_clk_out;
 
-                       size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
                                        cur_value == value ? "*" : "");
                }
                break;
index 415be74..66711ab 100644 (file)
@@ -710,7 +710,7 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
                        return 0;
        }
 
-       size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+       size =  sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
                        feature_mask[1], feature_mask[0]);
 
        memset(sort_feature, -1, sizeof(sort_feature));
@@ -725,14 +725,14 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
                sort_feature[feature_index] = i;
        }
 
-       size += sprintf(buf + size, "%-2s. %-20s  %-3s : %-s\n",
+       size += sysfs_emit_at(buf, size, "%-2s. %-20s  %-3s : %-s\n",
                        "No", "Feature", "Bit", "State");
 
        for (i = 0; i < SMU_FEATURE_COUNT; i++) {
                if (sort_feature[i] < 0)
                        continue;
 
-               size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+               size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
                                count++,
                                smu_get_feature_name(smu, sort_feature[i]),
                                i,
index f0a0727..7dcc639 100644 (file)
@@ -468,17 +468,7 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
        ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
 };
 
-static const struct file_operations fops = {
-       .owner              = THIS_MODULE,
-       .open               = drm_open,
-       .release            = drm_release,
-       .unlocked_ioctl     = drm_ioctl,
-       .compat_ioctl       = drm_compat_ioctl,
-       .poll               = drm_poll,
-       .read               = drm_read,
-       .llseek             = no_llseek,
-       .mmap               = etnaviv_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(fops);
 
 static const struct drm_driver etnaviv_drm_driver = {
        .driver_features    = DRIVER_GEM | DRIVER_RENDER,
@@ -487,7 +477,7 @@ static const struct drm_driver etnaviv_drm_driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
-       .gem_prime_mmap     = etnaviv_gem_prime_mmap,
+       .gem_prime_mmap     = drm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = etnaviv_debugfs_init,
 #endif
index 003288e..049ae87 100644 (file)
@@ -47,12 +47,9 @@ struct etnaviv_drm_private {
 int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
-int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
-                          struct vm_area_struct *vma);
 struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
        struct dma_buf_attachment *attach, struct sg_table *sg);
 int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
index b8fa6ed..8f1b5af 100644 (file)
@@ -130,8 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 {
        pgprot_t vm_page_prot;
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
@@ -154,19 +153,11 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
        return 0;
 }
 
-int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
-       struct etnaviv_gem_object *obj;
-       int ret;
-
-       ret = drm_gem_mmap(filp, vma);
-       if (ret) {
-               DBG("mmap failed: %d", ret);
-               return ret;
-       }
+       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 
-       obj = to_etnaviv_bo(vma->vm_private_data);
-       return obj->ops->mmap(obj, vma);
+       return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
 }
 
 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
@@ -567,6 +558,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
        .unpin = etnaviv_gem_prime_unpin,
        .get_sg_table = etnaviv_gem_prime_get_sg_table,
        .vmap = etnaviv_gem_prime_vmap,
+       .mmap = etnaviv_gem_mmap,
        .vm_ops = &vm_ops,
 };
 
index d741b1d..6d8bed9 100644 (file)
@@ -34,19 +34,6 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
        return 0;
 }
 
-int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
-                          struct vm_area_struct *vma)
-{
-       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-       int ret;
-
-       ret = drm_gem_mmap_obj(obj, obj->size, vma);
-       if (ret < 0)
-               return ret;
-
-       return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
-}
-
 int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
 {
        if (!obj->import_attach) {
index 4102bce..c297fff 100644 (file)
@@ -613,6 +613,12 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
            etnaviv_is_model_rev(gpu, GC2000, 0x5108))
                pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
 
+       /* Disable SE, RA and TX clock gating on affected core revisions. */
+       if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE |
+                      VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA |
+                      VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
        pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
        pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
 
index dfc0f53..f2fc645 100644 (file)
@@ -39,6 +39,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
        },
        {
                .model = 0x7000,
+               .revision = 0x6202,
+               .product_id = 0x70003,
+               .customer_id = 0,
+               .eco_id = 0,
+               .stream_count = 8,
+               .register_max = 64,
+               .thread_count = 512,
+               .shader_core_count = 2,
+               .vertex_cache_size = 16,
+               .vertex_output_buffer_size = 1024,
+               .pixel_pipes = 1,
+               .instruction_count = 512,
+               .num_constants = 320,
+               .buffer_size = 0,
+               .varyings_count = 16,
+               .features = 0xe0287cad,
+               .minor_features0 = 0xc1489eff,
+               .minor_features1 = 0xfefbfad9,
+               .minor_features2 = 0xeb9d4fbf,
+               .minor_features3 = 0xedfffced,
+               .minor_features4 = 0xdb0dafc7,
+               .minor_features5 = 0x3b5ac333,
+               .minor_features6 = 0xfccee201,
+               .minor_features7 = 0x03fffa6f,
+               .minor_features8 = 0x00e10ef0,
+               .minor_features9 = 0x0088003c,
+               .minor_features10 = 0x00004040,
+               .minor_features11 = 0x00000024,
+       },
+       {
+               .model = 0x7000,
                .revision = 0x6204,
                .product_id = ~0U,
                .customer_id = ~0U,
index 6640b7c..ca382fb 100644 (file)
@@ -168,6 +168,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
                break;
        case 2:
                tiling_flags |= RADEON_TILING_SWAP_16BIT;
+               break;
        default:
                break;
        }
index 5043dca..1650a44 100644 (file)
@@ -9,6 +9,7 @@ config DRM_TEGRA
        select DRM_MIPI_DSI
        select DRM_PANEL
        select TEGRA_HOST1X
+       select INTERCONNECT
        select IOMMU_IOVA
        select CEC_CORE if CEC_NOTIFIER
        help
index d6cf202..d801909 100644 (file)
@@ -3,6 +3,9 @@ ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
 
 tegra-drm-y := \
        drm.o \
+       uapi.o \
+       submit.o \
+       firewall.o \
        gem.o \
        fb.o \
        dp.o \
index 51bbbc4..16c7aab 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/iommu.h>
+#include <linux/interconnect.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
@@ -618,9 +619,14 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
        struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
        int err;
 
+       plane_state->peak_memory_bandwidth = 0;
+       plane_state->avg_memory_bandwidth = 0;
+
        /* no need for further checks if the plane is being disabled */
-       if (!new_plane_state->crtc)
+       if (!new_plane_state->crtc) {
+               plane_state->total_peak_memory_bandwidth = 0;
                return 0;
+       }
 
        err = tegra_plane_format(new_plane_state->fb->format->format,
                                 &plane_state->format,
@@ -808,6 +814,12 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
        formats = dc->soc->primary_formats;
        modifiers = dc->soc->modifiers;
 
+       err = tegra_plane_interconnect_init(plane);
+       if (err) {
+               kfree(plane);
+               return ERR_PTR(err);
+       }
+
        err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
                                       &tegra_plane_funcs, formats,
                                       num_formats, modifiers, type, NULL);
@@ -845,12 +857,18 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
 {
        struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
                                                                                 plane);
+       struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
        struct tegra_plane *tegra = to_tegra_plane(plane);
        int err;
 
+       plane_state->peak_memory_bandwidth = 0;
+       plane_state->avg_memory_bandwidth = 0;
+
        /* no need for further checks if the plane is being disabled */
-       if (!new_plane_state->crtc)
+       if (!new_plane_state->crtc) {
+               plane_state->total_peak_memory_bandwidth = 0;
                return 0;
+       }
 
        /* scaling not supported for cursor */
        if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) ||
@@ -1030,6 +1048,12 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
        if (!dc->soc->has_nvdisplay) {
                num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
                formats = tegra_legacy_cursor_plane_formats;
+
+               err = tegra_plane_interconnect_init(plane);
+               if (err) {
+                       kfree(plane);
+                       return ERR_PTR(err);
+               }
        } else {
                num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
                formats = tegra_cursor_plane_formats;
@@ -1149,6 +1173,12 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
        num_formats = dc->soc->num_overlay_formats;
        formats = dc->soc->overlay_formats;
 
+       err = tegra_plane_interconnect_init(plane);
+       if (err) {
+               kfree(plane);
+               return ERR_PTR(err);
+       }
+
        if (!cursor)
                type = DRM_PLANE_TYPE_OVERLAY;
        else
@@ -1572,6 +1602,11 @@ static int tegra_dc_show_stats(struct seq_file *s, void *data)
        seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
        seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
 
+       seq_printf(s, "frames total: %lu\n", dc->stats.frames_total);
+       seq_printf(s, "vblank total: %lu\n", dc->stats.vblank_total);
+       seq_printf(s, "underflow total: %lu\n", dc->stats.underflow_total);
+       seq_printf(s, "overflow total: %lu\n", dc->stats.overflow_total);
+
        return 0;
 }
 
@@ -1804,6 +1839,106 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
        return -ETIMEDOUT;
 }
 
+static void
+tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
+                                  struct drm_atomic_state *state,
+                                  bool prepare_bandwidth_transition)
+{
+       const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
+       const struct tegra_dc_state *old_dc_state, *new_dc_state;
+       u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
+       const struct drm_plane_state *old_plane_state;
+       const struct drm_crtc_state *old_crtc_state;
+       struct tegra_dc_window window, old_window;
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       struct tegra_plane *tegra;
+       struct drm_plane *plane;
+
+       if (dc->soc->has_nvdisplay)
+               return;
+
+       old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+       old_dc_state = to_const_dc_state(old_crtc_state);
+       new_dc_state = to_const_dc_state(crtc->state);
+
+       if (!crtc->state->active) {
+               if (!old_crtc_state->active)
+                       return;
+
+               /*
+                * When CRTC is disabled on DPMS, the state of attached planes
+                * is kept unchanged. Hence we need to enforce removal of the
+                * bandwidths from the ICC paths.
+                */
+               drm_atomic_crtc_for_each_plane(plane, crtc) {
+                       tegra = to_tegra_plane(plane);
+
+                       icc_set_bw(tegra->icc_mem, 0, 0);
+                       icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+               }
+
+               return;
+       }
+
+       for_each_old_plane_in_state(old_crtc_state->state, plane,
+                                   old_plane_state, i) {
+               old_tegra_state = to_const_tegra_plane_state(old_plane_state);
+               new_tegra_state = to_const_tegra_plane_state(plane->state);
+               tegra = to_tegra_plane(plane);
+
+               /*
+                * We're iterating over the global atomic state and it contains
+                * planes from another CRTC, hence we need to filter out the
+                * planes unrelated to this CRTC.
+                */
+               if (tegra->dc != dc)
+                       continue;
+
+               new_avg_bw = new_tegra_state->avg_memory_bandwidth;
+               old_avg_bw = old_tegra_state->avg_memory_bandwidth;
+
+               new_peak_bw = new_tegra_state->total_peak_memory_bandwidth;
+               old_peak_bw = old_tegra_state->total_peak_memory_bandwidth;
+
+               /*
+                * See the comment related to !crtc->state->active above,
+                * which explains why bandwidths need to be updated when
+                * CRTC is turning ON.
+                */
+               if (new_avg_bw == old_avg_bw && new_peak_bw == old_peak_bw &&
+                   old_crtc_state->active)
+                       continue;
+
+               window.src.h = drm_rect_height(&plane->state->src) >> 16;
+               window.dst.h = drm_rect_height(&plane->state->dst);
+
+               old_window.src.h = drm_rect_height(&old_plane_state->src) >> 16;
+               old_window.dst.h = drm_rect_height(&old_plane_state->dst);
+
+               /*
+                * During the preparation phase (atomic_begin), the memory
+                * freq should go high before the DC changes are committed
+                * if bandwidth requirement goes up, otherwise memory freq
+                * should to stay high if BW requirement goes down.  The
+                * opposite applies to the completion phase (post_commit).
+                */
+               if (prepare_bandwidth_transition) {
+                       new_avg_bw = max(old_avg_bw, new_avg_bw);
+                       new_peak_bw = max(old_peak_bw, new_peak_bw);
+
+                       if (tegra_plane_use_vertical_filtering(tegra, &old_window))
+                               window = old_window;
+               }
+
+               icc_set_bw(tegra->icc_mem, new_avg_bw, new_peak_bw);
+
+               if (tegra_plane_use_vertical_filtering(tegra, &window))
+                       icc_set_bw(tegra->icc_mem_vfilter, new_avg_bw, new_peak_bw);
+               else
+                       icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+       }
+}
+
 static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
                                      struct drm_atomic_state *state)
 {
@@ -1985,6 +2120,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
 {
        unsigned long flags;
 
+       tegra_crtc_update_memory_bandwidth(crtc, state, true);
+
        if (crtc->state->event) {
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
@@ -2017,7 +2154,207 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
        value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
 }
 
+static bool tegra_plane_is_cursor(const struct drm_plane_state *state)
+{
+       const struct tegra_dc_soc_info *soc = to_tegra_dc(state->crtc)->soc;
+       const struct drm_format_info *fmt = state->fb->format;
+       unsigned int src_w = drm_rect_width(&state->src) >> 16;
+       unsigned int dst_w = drm_rect_width(&state->dst);
+
+       if (state->plane->type != DRM_PLANE_TYPE_CURSOR)
+               return false;
+
+       if (soc->supports_cursor)
+               return true;
+
+       if (src_w != dst_w || fmt->num_planes != 1 || src_w * fmt->cpp[0] > 256)
+               return false;
+
+       return true;
+}
+
+static unsigned long
+tegra_plane_overlap_mask(struct drm_crtc_state *state,
+                        const struct drm_plane_state *plane_state)
+{
+       const struct drm_plane_state *other_state;
+       const struct tegra_plane *tegra;
+       unsigned long overlap_mask = 0;
+       struct drm_plane *plane;
+       struct drm_rect rect;
+
+       if (!plane_state->visible || !plane_state->fb)
+               return 0;
+
+       /*
+        * Data-prefetch FIFO will easily help to overcome temporal memory
+        * pressure if other plane overlaps with the cursor plane.
+        */
+       if (tegra_plane_is_cursor(plane_state))
+               return 0;
+
+       drm_atomic_crtc_state_for_each_plane_state(plane, other_state, state) {
+               rect = plane_state->dst;
+
+               tegra = to_tegra_plane(other_state->plane);
+
+               if (!other_state->visible || !other_state->fb)
+                       continue;
+
+               /*
+                * Ignore cursor plane overlaps because it's not practical to
+                * assume that it contributes to the bandwidth in overlapping
+                * area if window width is small.
+                */
+               if (tegra_plane_is_cursor(other_state))
+                       continue;
+
+               if (drm_rect_intersect(&rect, &other_state->dst))
+                       overlap_mask |= BIT(tegra->index);
+       }
+
+       return overlap_mask;
+}
+
+static int tegra_crtc_calculate_memory_bandwidth(struct drm_crtc *crtc,
+                                                struct drm_atomic_state *state)
+{
+       ulong overlap_mask[TEGRA_DC_LEGACY_PLANES_NUM] = {}, mask;
+       u32 plane_peak_bw[TEGRA_DC_LEGACY_PLANES_NUM] = {};
+       bool all_planes_overlap_simultaneously = true;
+       const struct tegra_plane_state *tegra_state;
+       const struct drm_plane_state *plane_state;
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       const struct drm_crtc_state *old_state;
+       struct drm_crtc_state *new_state;
+       struct tegra_plane *tegra;
+       struct drm_plane *plane;
+
+       /*
+        * The nv-display uses shared planes.  The algorithm below assumes
+        * maximum 3 planes per-CRTC, this assumption isn't applicable to
+        * the nv-display.  Note that T124 support has additional windows,
+        * but currently they aren't supported by the driver.
+        */
+       if (dc->soc->has_nvdisplay)
+               return 0;
+
+       new_state = drm_atomic_get_new_crtc_state(state, crtc);
+       old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+       /*
+        * For overlapping planes pixel's data is fetched for each plane at
+        * the same time, hence bandwidths are accumulated in this case.
+        * This needs to be taken into account for calculating total bandwidth
+        * consumed by all planes.
+        *
+        * Here we get the overlapping state of each plane, which is a
+        * bitmask of plane indices telling with what planes there is an
+        * overlap. Note that bitmask[plane] includes BIT(plane) in order
+        * to make further code nicer and simpler.
+        */
+       drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+               tegra_state = to_const_tegra_plane_state(plane_state);
+               tegra = to_tegra_plane(plane);
+
+               if (WARN_ON_ONCE(tegra->index >= TEGRA_DC_LEGACY_PLANES_NUM))
+                       return -EINVAL;
+
+               plane_peak_bw[tegra->index] = tegra_state->peak_memory_bandwidth;
+               mask = tegra_plane_overlap_mask(new_state, plane_state);
+               overlap_mask[tegra->index] = mask;
+
+               if (hweight_long(mask) != 3)
+                       all_planes_overlap_simultaneously = false;
+       }
+
+       /*
+        * Then we calculate maximum bandwidth of each plane state.
+        * The bandwidth includes the plane BW + BW of the "simultaneously"
+        * overlapping planes, where "simultaneously" means areas where DC
+        * fetches from the planes simultaneously during of scan-out process.
+        *
+        * For example, if plane A overlaps with planes B and C, but B and C
+        * don't overlap, then the peak bandwidth will be either in area where
+        * A-and-B or A-and-C planes overlap.
+        *
+        * The plane_peak_bw[] contains peak memory bandwidth values of
+        * each plane, this information is needed by interconnect provider
+        * in order to set up latency allowance based on the peak BW, see
+        * tegra_crtc_update_memory_bandwidth().
+        */
+       drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+               u32 i, old_peak_bw, new_peak_bw, overlap_bw = 0;
+
+               /*
+                * Note that plane's atomic check doesn't touch the
+                * total_peak_memory_bandwidth of enabled plane, hence the
+                * current state contains the old bandwidth state from the
+                * previous CRTC commit.
+                */
+               tegra_state = to_const_tegra_plane_state(plane_state);
+               tegra = to_tegra_plane(plane);
+
+               for_each_set_bit(i, &overlap_mask[tegra->index], 3) {
+                       if (i == tegra->index)
+                               continue;
+
+                       if (all_planes_overlap_simultaneously)
+                               overlap_bw += plane_peak_bw[i];
+                       else
+                               overlap_bw = max(overlap_bw, plane_peak_bw[i]);
+               }
+
+               new_peak_bw = plane_peak_bw[tegra->index] + overlap_bw;
+               old_peak_bw = tegra_state->total_peak_memory_bandwidth;
+
+               /*
+                * If plane's peak bandwidth changed (for example plane isn't
+                * overlapped anymore) and plane isn't in the atomic state,
+                * then add plane to the state in order to have the bandwidth
+                * updated.
+                */
+               if (old_peak_bw != new_peak_bw) {
+                       struct tegra_plane_state *new_tegra_state;
+                       struct drm_plane_state *new_plane_state;
+
+                       new_plane_state = drm_atomic_get_plane_state(state, plane);
+                       if (IS_ERR(new_plane_state))
+                               return PTR_ERR(new_plane_state);
+
+                       new_tegra_state = to_tegra_plane_state(new_plane_state);
+                       new_tegra_state->total_peak_memory_bandwidth = new_peak_bw;
+               }
+       }
+
+       return 0;
+}
+
+static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
+                                  struct drm_atomic_state *state)
+{
+       int err;
+
+       err = tegra_crtc_calculate_memory_bandwidth(crtc, state);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+                                  struct drm_atomic_state *state)
+{
+       /*
+        * Display bandwidth is allowed to go down only once hardware state
+        * is known to be armed, i.e. state was committed and VBLANK event
+        * received.
+        */
+       tegra_crtc_update_memory_bandwidth(crtc, state, false);
+}
+
 static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+       .atomic_check = tegra_crtc_atomic_check,
        .atomic_begin = tegra_crtc_atomic_begin,
        .atomic_flush = tegra_crtc_atomic_flush,
        .atomic_enable = tegra_crtc_atomic_enable,
@@ -2036,6 +2373,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
                /*
                dev_dbg(dc->dev, "%s(): frame end\n", __func__);
                */
+               dc->stats.frames_total++;
                dc->stats.frames++;
        }
 
@@ -2044,6 +2382,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
                dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
                */
                drm_crtc_handle_vblank(&dc->base);
+               dc->stats.vblank_total++;
                dc->stats.vblank++;
        }
 
@@ -2051,6 +2390,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
                /*
                dev_dbg(dc->dev, "%s(): underflow\n", __func__);
                */
+               dc->stats.underflow_total++;
                dc->stats.underflow++;
        }
 
@@ -2058,11 +2398,13 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
                /*
                dev_dbg(dc->dev, "%s(): overflow\n", __func__);
                */
+               dc->stats.overflow_total++;
                dc->stats.overflow++;
        }
 
        if (status & HEAD_UF_INT) {
                dev_dbg_ratelimited(dc->dev, "%s(): head underflow\n", __func__);
+               dc->stats.underflow_total++;
                dc->stats.underflow++;
        }
 
@@ -2343,7 +2685,9 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
        .overlay_formats = tegra20_overlay_formats,
        .modifiers = tegra20_modifiers,
        .has_win_a_without_filters = true,
+       .has_win_b_vfilter_mem_client = true,
        .has_win_c_without_vert_filter = true,
+       .plane_tiled_memory_bandwidth_x2 = false,
 };
 
 static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -2363,7 +2707,9 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
        .overlay_formats = tegra20_overlay_formats,
        .modifiers = tegra20_modifiers,
        .has_win_a_without_filters = false,
+       .has_win_b_vfilter_mem_client = true,
        .has_win_c_without_vert_filter = false,
+       .plane_tiled_memory_bandwidth_x2 = true,
 };
 
 static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -2383,7 +2729,9 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
        .overlay_formats = tegra114_overlay_formats,
        .modifiers = tegra20_modifiers,
        .has_win_a_without_filters = false,
+       .has_win_b_vfilter_mem_client = false,
        .has_win_c_without_vert_filter = false,
+       .plane_tiled_memory_bandwidth_x2 = true,
 };
 
 static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2403,7 +2751,9 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
        .overlay_formats = tegra124_overlay_formats,
        .modifiers = tegra124_modifiers,
        .has_win_a_without_filters = false,
+       .has_win_b_vfilter_mem_client = false,
        .has_win_c_without_vert_filter = false,
+       .plane_tiled_memory_bandwidth_x2 = false,
 };
 
 static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2423,7 +2773,9 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
        .overlay_formats = tegra114_overlay_formats,
        .modifiers = tegra124_modifiers,
        .has_win_a_without_filters = false,
+       .has_win_b_vfilter_mem_client = false,
        .has_win_c_without_vert_filter = false,
+       .plane_tiled_memory_bandwidth_x2 = false,
 };
 
 static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
@@ -2473,6 +2825,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
        .has_nvdisplay = true,
        .wgrps = tegra186_dc_wgrps,
        .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
+       .plane_tiled_memory_bandwidth_x2 = false,
 };
 
 static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
@@ -2522,6 +2875,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
        .has_nvdisplay = true,
        .wgrps = tegra194_dc_wgrps,
        .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
+       .plane_tiled_memory_bandwidth_x2 = false,
 };
 
 static const struct of_device_id tegra_dc_of_match[] = {
index 5e13f1c..f0cb691 100644 (file)
@@ -15,6 +15,8 @@
 
 struct tegra_output;
 
+#define TEGRA_DC_LEGACY_PLANES_NUM     7
+
 struct tegra_dc_state {
        struct drm_crtc_state base;
 
@@ -33,11 +35,22 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
        return NULL;
 }
 
+static inline const struct tegra_dc_state *
+to_const_dc_state(const struct drm_crtc_state *state)
+{
+       return to_dc_state((struct drm_crtc_state *)state);
+}
+
 struct tegra_dc_stats {
        unsigned long frames;
        unsigned long vblank;
        unsigned long underflow;
        unsigned long overflow;
+
+       unsigned long frames_total;
+       unsigned long vblank_total;
+       unsigned long underflow_total;
+       unsigned long overflow_total;
 };
 
 struct tegra_windowgroup_soc {
@@ -66,7 +79,9 @@ struct tegra_dc_soc_info {
        unsigned int num_overlay_formats;
        const u64 *modifiers;
        bool has_win_a_without_filters;
+       bool has_win_b_vfilter_mem_client;
        bool has_win_c_without_vert_filter;
+       bool plane_tiled_memory_bandwidth_x2;
 };
 
 struct tegra_dc {
@@ -152,6 +167,8 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
                               struct drm_crtc_state *crtc_state,
                               struct clk *clk, unsigned long pclk,
                               unsigned int div);
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+                                  struct drm_atomic_state *state);
 
 /* from rgb.c */
 int tegra_dc_rgb_probe(struct tegra_dc *dc);
index 8c6069b..8d37d6b 100644 (file)
 #include <drm/drm_prime.h>
 #include <drm/drm_vblank.h>
 
+#include "dc.h"
 #include "drm.h"
 #include "gem.h"
+#include "uapi.h"
 
 #define DRIVER_NAME "tegra"
 #define DRIVER_DESC "NVIDIA Tegra graphics"
 #define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
+#define DRIVER_MAJOR 1
 #define DRIVER_MINOR 0
 #define DRIVER_PATCHLEVEL 0
 
 #define CARVEOUT_SZ SZ_64M
 #define CDMA_GATHER_FETCHES_MAX_NB 16383
 
-struct tegra_drm_file {
-       struct idr contexts;
-       struct mutex lock;
-};
-
 static int tegra_atomic_check(struct drm_device *drm,
                              struct drm_atomic_state *state)
 {
@@ -60,6 +57,17 @@ static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
+static void tegra_atomic_post_commit(struct drm_device *drm,
+                                    struct drm_atomic_state *old_state)
+{
+       struct drm_crtc_state *old_crtc_state __maybe_unused;
+       struct drm_crtc *crtc;
+       unsigned int i;
+
+       for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+               tegra_crtc_atomic_post_commit(crtc, old_state);
+}
+
 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
        struct drm_device *drm = old_state->dev;
@@ -79,6 +87,8 @@ static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
        } else {
                drm_atomic_helper_commit_tail_rpm(old_state);
        }
+
+       tegra_atomic_post_commit(drm, old_state);
 }
 
 static const struct drm_mode_config_helper_funcs
@@ -94,7 +104,9 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
        if (!fpriv)
                return -ENOMEM;
 
-       idr_init_base(&fpriv->contexts, 1);
+       idr_init_base(&fpriv->legacy_contexts, 1);
+       xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
+       xa_init(&fpriv->syncpoints);
        mutex_init(&fpriv->lock);
        filp->driver_priv = fpriv;
 
@@ -107,20 +119,6 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
        kfree(context);
 }
 
-static struct host1x_bo *
-host1x_bo_lookup(struct drm_file *file, u32 handle)
-{
-       struct drm_gem_object *gem;
-       struct tegra_bo *bo;
-
-       gem = drm_gem_object_lookup(file, handle);
-       if (!gem)
-               return NULL;
-
-       bo = to_tegra_bo(gem);
-       return &bo->base;
-}
-
 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
                                       struct drm_tegra_reloc __user *src,
                                       struct drm_device *drm,
@@ -151,11 +149,11 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
 
        dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
 
-       dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
+       dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
        if (!dest->cmdbuf.bo)
                return -ENOENT;
 
-       dest->target.bo = host1x_bo_lookup(file, target);
+       dest->target.bo = tegra_gem_lookup(file, target);
        if (!dest->target.bo)
                return -ENOENT;
 
@@ -193,7 +191,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                return -EINVAL;
 
        job = host1x_job_alloc(context->channel, args->num_cmdbufs,
-                              args->num_relocs);
+                              args->num_relocs, false);
        if (!job)
                return -ENOMEM;
 
@@ -201,6 +199,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
        job->client = client;
        job->class = client->class;
        job->serialize = true;
+       job->syncpt_recovery = true;
 
        /*
         * Track referenced BOs so that they can be unreferenced after the
@@ -237,7 +236,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                        goto fail;
                }
 
-               bo = host1x_bo_lookup(file, cmdbuf.handle);
+               bo = tegra_gem_lookup(file, cmdbuf.handle);
                if (!bo) {
                        err = -ENOENT;
                        goto fail;
@@ -432,7 +431,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
        if (err < 0)
                return err;
 
-       err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
+       err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
        if (err < 0) {
                client->ops->close_channel(context);
                return err;
@@ -487,13 +486,13 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = idr_find(&fpriv->contexts, args->context);
+       context = idr_find(&fpriv->legacy_contexts, args->context);
        if (!context) {
                err = -EINVAL;
                goto unlock;
        }
 
-       idr_remove(&fpriv->contexts, context->id);
+       idr_remove(&fpriv->legacy_contexts, context->id);
        tegra_drm_context_free(context);
 
 unlock:
@@ -512,7 +511,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = idr_find(&fpriv->contexts, args->context);
+       context = idr_find(&fpriv->legacy_contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
@@ -541,7 +540,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = idr_find(&fpriv->contexts, args->context);
+       context = idr_find(&fpriv->legacy_contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
@@ -566,7 +565,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
 
        mutex_lock(&fpriv->lock);
 
-       context = idr_find(&fpriv->contexts, args->context);
+       context = idr_find(&fpriv->legacy_contexts, args->context);
        if (!context) {
                err = -ENODEV;
                goto unlock;
@@ -735,10 +734,25 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
 
 static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
 #ifdef CONFIG_DRM_TEGRA_STAGING
-       DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
+       DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
+                         DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
+                         DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
+                         DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
+                         DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
+                         DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
                          DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
                          DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
+                         DRM_RENDER_ALLOW),
+
+       DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
                          DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
@@ -792,10 +806,11 @@ static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
        struct tegra_drm_file *fpriv = file->driver_priv;
 
        mutex_lock(&fpriv->lock);
-       idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
+       idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
+       tegra_drm_uapi_close_file(fpriv);
        mutex_unlock(&fpriv->lock);
 
-       idr_destroy(&fpriv->contexts);
+       idr_destroy(&fpriv->legacy_contexts);
        mutex_destroy(&fpriv->lock);
        kfree(fpriv);
 }
@@ -853,7 +868,7 @@ static void tegra_debugfs_init(struct drm_minor *minor)
 
 static const struct drm_driver tegra_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM |
-                          DRIVER_ATOMIC | DRIVER_RENDER,
+                          DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
        .open = tegra_drm_open,
        .postclose = tegra_drm_postclose,
        .lastclose = drm_fb_helper_lastclose,
@@ -883,6 +898,14 @@ static const struct drm_driver tegra_drm_driver = {
 int tegra_drm_register_client(struct tegra_drm *tegra,
                              struct tegra_drm_client *client)
 {
+       /*
+        * When MLOCKs are implemented, change to allocate a shared channel
+        * only when MLOCKs are disabled.
+        */
+       client->shared_channel = host1x_channel_request(&client->base);
+       if (!client->shared_channel)
+               return -EBUSY;
+
        mutex_lock(&tegra->clients_lock);
        list_add_tail(&client->list, &tegra->clients);
        client->drm = tegra;
@@ -899,6 +922,9 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
        client->drm = NULL;
        mutex_unlock(&tegra->clients_lock);
 
+       if (client->shared_channel)
+               host1x_channel_put(client->shared_channel);
+
        return 0;
 }
 
index 0cb8680..8b28327 100644 (file)
@@ -64,12 +64,22 @@ struct tegra_drm {
        struct tegra_display_hub *hub;
 };
 
+static inline struct host1x *tegra_drm_to_host1x(struct tegra_drm *tegra)
+{
+       return dev_get_drvdata(tegra->drm->dev->parent);
+}
+
 struct tegra_drm_client;
 
 struct tegra_drm_context {
        struct tegra_drm_client *client;
        struct host1x_channel *channel;
+
+       /* Only used by legacy UAPI. */
        unsigned int id;
+
+       /* Only used by new UAPI. */
+       struct xarray mappings;
 };
 
 struct tegra_drm_client_ops {
@@ -91,7 +101,9 @@ struct tegra_drm_client {
        struct host1x_client base;
        struct list_head list;
        struct tegra_drm *drm;
+       struct host1x_channel *shared_channel;
 
+       /* Set by driver */
        unsigned int version;
        const struct tegra_drm_client_ops *ops;
 };
diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
new file mode 100644 (file)
index 0000000..1824d2d
--- /dev/null
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010-2020 NVIDIA Corporation */
+
+#include "drm.h"
+#include "submit.h"
+#include "uapi.h"
+
+struct tegra_drm_firewall {
+       struct tegra_drm_submit_data *submit;
+       struct tegra_drm_client *client;
+       u32 *data;
+       u32 pos;
+       u32 end;
+       u32 class;
+};
+
+static int fw_next(struct tegra_drm_firewall *fw, u32 *word)
+{
+       if (fw->pos == fw->end)
+               return -EINVAL;
+
+       *word = fw->data[fw->pos++];
+
+       return 0;
+}
+
+static bool fw_check_addr_valid(struct tegra_drm_firewall *fw, u32 offset)
+{
+       u32 i;
+
+       for (i = 0; i < fw->submit->num_used_mappings; i++) {
+               struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping;
+
+               if (offset >= m->iova && offset <= m->iova_end)
+                       return true;
+       }
+
+       return false;
+}
+
+static int fw_check_reg(struct tegra_drm_firewall *fw, u32 offset)
+{
+       bool is_addr;
+       u32 word;
+       int err;
+
+       err = fw_next(fw, &word);
+       if (err)
+               return err;
+
+       if (!fw->client->ops->is_addr_reg)
+               return 0;
+
+       is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+                                              offset);
+
+       if (!is_addr)
+               return 0;
+
+       if (!fw_check_addr_valid(fw, word))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fw_check_regs_seq(struct tegra_drm_firewall *fw, u32 offset,
+                            u32 count, bool incr)
+{
+       u32 i;
+
+       for (i = 0; i < count; i++) {
+               if (fw_check_reg(fw, offset))
+                       return -EINVAL;
+
+               if (incr)
+                       offset++;
+       }
+
+       return 0;
+}
+
+static int fw_check_regs_mask(struct tegra_drm_firewall *fw, u32 offset,
+                             u16 mask)
+{
+       unsigned long bmask = mask;
+       unsigned int bit;
+
+       for_each_set_bit(bit, &bmask, 16) {
+               if (fw_check_reg(fw, offset+bit))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
+{
+       bool is_addr;
+
+       is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+                                              offset);
+       if (is_addr)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fw_check_class(struct tegra_drm_firewall *fw, u32 class)
+{
+       if (!fw->client->ops->is_valid_class) {
+               if (class == fw->client->base.class)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+
+       if (!fw->client->ops->is_valid_class(class))
+               return -EINVAL;
+
+       return 0;
+}
+
+enum {
+       HOST1X_OPCODE_SETCLASS  = 0x00,
+       HOST1X_OPCODE_INCR      = 0x01,
+       HOST1X_OPCODE_NONINCR   = 0x02,
+       HOST1X_OPCODE_MASK      = 0x03,
+       HOST1X_OPCODE_IMM       = 0x04,
+       HOST1X_OPCODE_RESTART   = 0x05,
+       HOST1X_OPCODE_GATHER    = 0x06,
+       HOST1X_OPCODE_SETSTRMID = 0x07,
+       HOST1X_OPCODE_SETAPPID  = 0x08,
+       HOST1X_OPCODE_SETPYLD   = 0x09,
+       HOST1X_OPCODE_INCR_W    = 0x0a,
+       HOST1X_OPCODE_NONINCR_W = 0x0b,
+       HOST1X_OPCODE_GATHER_W  = 0x0c,
+       HOST1X_OPCODE_RESTART_W = 0x0d,
+       HOST1X_OPCODE_EXTEND    = 0x0e,
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+                         u32 words, struct tegra_drm_submit_data *submit,
+                         u32 *job_class)
+{
+       struct tegra_drm_firewall fw = {
+               .submit = submit,
+               .client = client,
+               .data = data,
+               .pos = start,
+               .end = start+words,
+               .class = *job_class,
+       };
+       bool payload_valid = false;
+       u32 payload;
+       int err;
+
+       while (fw.pos != fw.end) {
+               u32 word, opcode, offset, count, mask, class;
+
+               err = fw_next(&fw, &word);
+               if (err)
+                       return err;
+
+               opcode = (word & 0xf0000000) >> 28;
+
+               switch (opcode) {
+               case HOST1X_OPCODE_SETCLASS:
+                       offset = word >> 16 & 0xfff;
+                       mask = word & 0x3f;
+                       class = (word >> 6) & 0x3ff;
+                       err = fw_check_class(&fw, class);
+                       fw.class = class;
+                       *job_class = class;
+                       if (!err)
+                               err = fw_check_regs_mask(&fw, offset, mask);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal SETCLASS(offset=0x%x, mask=0x%x, class=0x%x) at word %u",
+                                        offset, mask, class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_INCR:
+                       offset = (word >> 16) & 0xfff;
+                       count = word & 0xffff;
+                       err = fw_check_regs_seq(&fw, offset, count, true);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal INCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+                                        offset, count, fw.class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_NONINCR:
+                       offset = (word >> 16) & 0xfff;
+                       count = word & 0xffff;
+                       err = fw_check_regs_seq(&fw, offset, count, false);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal NONINCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+                                        offset, count, fw.class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_MASK:
+                       offset = (word >> 16) & 0xfff;
+                       mask = word & 0xffff;
+                       err = fw_check_regs_mask(&fw, offset, mask);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal MASK(offset=0x%x, mask=0x%x) in class 0x%x at word %u",
+                                        offset, mask, fw.class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_IMM:
+                       /* IMM cannot reasonably be used to write a pointer */
+                       offset = (word >> 16) & 0xfff;
+                       err = fw_check_regs_imm(&fw, offset);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal IMM(offset=0x%x) in class 0x%x at word %u",
+                                        offset, fw.class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_SETPYLD:
+                       payload = word & 0xffff;
+                       payload_valid = true;
+                       break;
+               case HOST1X_OPCODE_INCR_W:
+                       if (!payload_valid)
+                               return -EINVAL;
+
+                       offset = word & 0x3fffff;
+                       err = fw_check_regs_seq(&fw, offset, payload, true);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal INCR_W(offset=0x%x) in class 0x%x at word %u",
+                                        offset, fw.class, fw.pos-1);
+                       break;
+               case HOST1X_OPCODE_NONINCR_W:
+                       if (!payload_valid)
+                               return -EINVAL;
+
+                       offset = word & 0x3fffff;
+                       err = fw_check_regs_seq(&fw, offset, payload, false);
+                       if (err)
+                               dev_warn(client->base.dev,
+                                        "illegal NONINCR(offset=0x%x) in class 0x%x at word %u",
+                                        offset, fw.class, fw.pos-1);
+                       break;
+               default:
+                       dev_warn(client->base.dev, "illegal opcode at word %u",
+                                fw.pos-1);
+                       return -EINVAL;
+               }
+
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
index 26af8da..6ec598f 100644 (file)
@@ -707,3 +707,16 @@ struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
 
        return &bo->gem;
 }
+
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
+{
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
+
+       gem = drm_gem_object_lookup(file, handle);
+       if (!gem)
+               return NULL;
+
+       bo = to_tegra_bo(gem);
+       return &bo->base;
+}
index c15fd99..cb5146a 100644 (file)
@@ -80,4 +80,6 @@ struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
                                              struct dma_buf *buf);
 
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle);
+
 #endif
index 2e65b40..e00ec3f 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/iommu.h>
+#include <linux/interconnect.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -64,6 +65,9 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
        copy->reflect_x = state->reflect_x;
        copy->reflect_y = state->reflect_y;
        copy->opaque = state->opaque;
+       copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
+       copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
+       copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
 
        for (i = 0; i < 2; i++)
                copy->blending[i] = state->blending[i];
@@ -244,6 +248,78 @@ void tegra_plane_cleanup_fb(struct drm_plane *plane,
                tegra_dc_unpin(dc, to_tegra_plane_state(state));
 }
 
+static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
+{
+       struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
+       unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
+       const struct tegra_dc_soc_info *soc;
+       const struct drm_format_info *fmt;
+       struct drm_crtc_state *crtc_state;
+       u64 avg_bandwidth, peak_bandwidth;
+
+       if (!state->visible)
+               return 0;
+
+       crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+       if (!crtc_state)
+               return -EINVAL;
+
+       src_w = drm_rect_width(&state->src) >> 16;
+       src_h = drm_rect_height(&state->src) >> 16;
+       dst_w = drm_rect_width(&state->dst);
+       dst_h = drm_rect_height(&state->dst);
+
+       fmt = state->fb->format;
+       soc = to_tegra_dc(state->crtc)->soc;
+
+       /*
+        * Note that real memory bandwidth vary depending on format and
+        * memory layout, we are not taking that into account because small
+        * estimation error isn't important since bandwidth is rounded up
+        * anyway.
+        */
+       for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
+               unsigned int bpp_plane = fmt->cpp[i] * 8;
+
+               /*
+                * Sub-sampling is relevant for chroma planes only and vertical
+                * readouts are not cached, hence only horizontal sub-sampling
+                * matters.
+                */
+               if (i > 0)
+                       bpp_plane /= fmt->hsub;
+
+               bpp += bpp_plane;
+       }
+
+       /* average bandwidth in kbytes/sec */
+       avg_bandwidth  = min(src_w, dst_w) * min(src_h, dst_h);
+       avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
+       avg_bandwidth  = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
+       do_div(avg_bandwidth, 1000);
+
+       /* mode.clock in kHz, peak bandwidth in kbytes/sec */
+       peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
+
+       /*
+        * Tegra30/114 Memory Controller can't interleave DC memory requests
+        * for the tiled windows because DC uses 16-bytes atom, while DDR3
+        * uses 32-bytes atom.  Hence there is x2 memory overfetch for tiled
+        * framebuffer and DDR3 on these SoCs.
+        */
+       if (soc->plane_tiled_memory_bandwidth_x2 &&
+           tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
+               mul = 2;
+       else
+               mul = 1;
+
+       /* ICC bandwidth in kbytes/sec */
+       tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
+       tegra_state->avg_memory_bandwidth  = kBps_to_icc(avg_bandwidth)  * mul;
+
+       return 0;
+}
+
 int tegra_plane_state_add(struct tegra_plane *plane,
                          struct drm_plane_state *state)
 {
@@ -262,6 +338,10 @@ int tegra_plane_state_add(struct tegra_plane *plane,
        if (err < 0)
                return err;
 
+       err = tegra_plane_calculate_memory_bandwidth(state);
+       if (err < 0)
+               return err;
+
        tegra = to_dc_state(crtc_state);
 
        tegra->planes |= WIN_A_ACT_REQ << plane->index;
@@ -646,3 +726,40 @@ int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
 
        return 0;
 }
+
+static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
+       "wina", "winb", "winc", NULL, NULL, NULL, "cursor",
+};
+
+int tegra_plane_interconnect_init(struct tegra_plane *plane)
+{
+       const char *icc_name = tegra_plane_icc_names[plane->index];
+       struct device *dev = plane->dc->dev;
+       struct tegra_dc *dc = plane->dc;
+       int err;
+
+       if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
+           WARN_ON(!tegra_plane_icc_names[plane->index]))
+               return -EINVAL;
+
+       plane->icc_mem = devm_of_icc_get(dev, icc_name);
+       err = PTR_ERR_OR_ZERO(plane->icc_mem);
+       if (err) {
+               dev_err_probe(dev, err, "failed to get %s interconnect\n",
+                             icc_name);
+               return err;
+       }
+
+       /* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
+       if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
+               plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
+               err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
+               if (err) {
+                       dev_err_probe(dev, err, "failed to get %s interconnect\n",
+                                     "winb-vfilter");
+                       return err;
+               }
+       }
+
+       return 0;
+}
index 1785c15..d947078 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <drm/drm_plane.h>
 
+struct icc_path;
 struct tegra_bo;
 struct tegra_dc;
 
@@ -16,6 +17,9 @@ struct tegra_plane {
        struct tegra_dc *dc;
        unsigned int offset;
        unsigned int index;
+
+       struct icc_path *icc_mem;
+       struct icc_path *icc_mem_vfilter;
 };
 
 struct tegra_cursor {
@@ -52,6 +56,11 @@ struct tegra_plane_state {
        /* used for legacy blending support only */
        struct tegra_plane_legacy_blending_state blending[2];
        bool opaque;
+
+       /* bandwidths are in ICC units, i.e. kbytes/sec */
+       u32 total_peak_memory_bandwidth;
+       u32 peak_memory_bandwidth;
+       u32 avg_memory_bandwidth;
 };
 
 static inline struct tegra_plane_state *
@@ -63,6 +72,12 @@ to_tegra_plane_state(struct drm_plane_state *state)
        return NULL;
 }
 
+static inline const struct tegra_plane_state *
+to_const_tegra_plane_state(const struct drm_plane_state *state)
+{
+       return to_tegra_plane_state((struct drm_plane_state *)state);
+}
+
 extern const struct drm_plane_funcs tegra_plane_funcs;
 
 int tegra_plane_prepare_fb(struct drm_plane *plane,
@@ -78,5 +93,6 @@ bool tegra_plane_format_is_indexed(unsigned int format);
 bool tegra_plane_format_is_yuv(unsigned int format, bool *planar, unsigned int *bpc);
 int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
                                   struct tegra_plane_state *state);
+int tegra_plane_interconnect_init(struct tegra_plane *plane);
 
 #endif /* TEGRA_PLANE_H */
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
new file mode 100644 (file)
index 0000000..776f825
--- /dev/null
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/dma-fence-array.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "submit.h"
+#include "uapi.h"
+
+#define SUBMIT_ERR(context, fmt, ...) \
+       dev_err_ratelimited(context->client->base.dev, \
+               "%s: job submission failed: " fmt "\n", \
+               current->comm, ##__VA_ARGS__)
+
+struct gather_bo {
+       struct host1x_bo base;
+
+       struct kref ref;
+
+       struct device *dev;
+       u32 *gather_data;
+       dma_addr_t gather_data_dma;
+       size_t gather_data_words;
+};
+
+static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
+{
+       struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+       kref_get(&bo->ref);
+
+       return host_bo;
+}
+
+static void gather_bo_release(struct kref *ref)
+{
+       struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
+
+       dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
+                      0);
+       kfree(bo);
+}
+
+static void gather_bo_put(struct host1x_bo *host_bo)
+{
+       struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+       kref_put(&bo->ref, gather_bo_release);
+}
+
+static struct sg_table *
+gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys)
+{
+       struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+       struct sg_table *sgt;
+       int err;
+
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
+
+       err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma,
+                             bo->gather_data_words * 4);
+       if (err) {
+               kfree(sgt);
+               return ERR_PTR(err);
+       }
+
+       return sgt;
+}
+
+static void gather_bo_unpin(struct device *dev, struct sg_table *sgt)
+{
+       if (sgt) {
+               sg_free_table(sgt);
+               kfree(sgt);
+       }
+}
+
+static void *gather_bo_mmap(struct host1x_bo *host_bo)
+{
+       struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+       return bo->gather_data;
+}
+
+static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
+{
+}
+
+const struct host1x_bo_ops gather_bo_ops = {
+       .get = gather_bo_get,
+       .put = gather_bo_put,
+       .pin = gather_bo_pin,
+       .unpin = gather_bo_unpin,
+       .mmap = gather_bo_mmap,
+       .munmap = gather_bo_munmap,
+};
+
+static struct tegra_drm_mapping *
+tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
+{
+       struct tegra_drm_mapping *mapping;
+
+       xa_lock(&context->mappings);
+
+       mapping = xa_load(&context->mappings, id);
+       if (mapping)
+               kref_get(&mapping->ref);
+
+       xa_unlock(&context->mappings);
+
+       return mapping;
+}
+
+static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
+{
+       size_t copy_len;
+       void *data;
+
+       if (check_mul_overflow(count, size, &copy_len))
+               return ERR_PTR(-EINVAL);
+
+       if (copy_len > 0x4000)
+               return ERR_PTR(-E2BIG);
+
+       data = kvmalloc(copy_len, GFP_KERNEL);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       if (copy_from_user(data, from, copy_len)) {
+               kvfree(data);
+               return ERR_PTR(-EFAULT);
+       }
+
+       return data;
+}
+
+static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
+                                  struct tegra_drm_context *context,
+                                  struct drm_tegra_channel_submit *args)
+{
+       struct gather_bo *bo;
+       size_t copy_len;
+
+       if (args->gather_data_words == 0) {
+               SUBMIT_ERR(context, "gather_data_words cannot be zero");
+               return -EINVAL;
+       }
+
+       if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) {
+               SUBMIT_ERR(context, "gather_data_words is too large");
+               return -EINVAL;
+       }
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (!bo) {
+               SUBMIT_ERR(context, "failed to allocate memory for bo info");
+               return -ENOMEM;
+       }
+
+       host1x_bo_init(&bo->base, &gather_bo_ops);
+       kref_init(&bo->ref);
+       bo->dev = dev;
+
+       bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
+                                         GFP_KERNEL | __GFP_NOWARN, 0);
+       if (!bo->gather_data) {
+               SUBMIT_ERR(context, "failed to allocate memory for gather data");
+               kfree(bo);
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
+               SUBMIT_ERR(context, "failed to copy gather data from userspace");
+               dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
+               kfree(bo);
+               return -EFAULT;
+       }
+
+       bo->gather_data_words = args->gather_data_words;
+
+       *pbo = bo;
+
+       return 0;
+}
+
+static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
+                             struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
+{
+       /* TODO check that target_offset is within bounds */
+       dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
+       u32 written_ptr;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
+               iova |= BIT_ULL(39);
+#endif
+
+       written_ptr = iova >> buf->reloc.shift;
+
+       if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
+               SUBMIT_ERR(context,
+                          "relocation has too large gather offset (%u vs gather length %zu)",
+                          buf->reloc.gather_offset_words, bo->gather_data_words);
+               return -EINVAL;
+       }
+
+       buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
+                                                           bo->gather_data_words);
+
+       bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
+
+       return 0;
+}
+
+static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
+                              struct drm_tegra_channel_submit *args,
+                              struct tegra_drm_submit_data *job_data)
+{
+       struct tegra_drm_used_mapping *mappings;
+       struct drm_tegra_submit_buf *bufs;
+       int err;
+       u32 i;
+
+       bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
+                                    sizeof(*bufs));
+       if (IS_ERR(bufs)) {
+               SUBMIT_ERR(context, "failed to copy bufs array from userspace");
+               return PTR_ERR(bufs);
+       }
+
+       mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
+       if (!mappings) {
+               SUBMIT_ERR(context, "failed to allocate memory for mapping info");
+               err = -ENOMEM;
+               goto done;
+       }
+
+       for (i = 0; i < args->num_bufs; i++) {
+               struct drm_tegra_submit_buf *buf = &bufs[i];
+               struct tegra_drm_mapping *mapping;
+
+               if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
+                       SUBMIT_ERR(context, "invalid flag specified for buffer");
+                       err = -EINVAL;
+                       goto drop_refs;
+               }
+
+               mapping = tegra_drm_mapping_get(context, buf->mapping);
+               if (!mapping) {
+                       SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
+                       err = -EINVAL;
+                       goto drop_refs;
+               }
+
+               err = submit_write_reloc(context, bo, buf, mapping);
+               if (err) {
+                       tegra_drm_mapping_put(mapping);
+                       goto drop_refs;
+               }
+
+               mappings[i].mapping = mapping;
+               mappings[i].flags = buf->flags;
+       }
+
+       job_data->used_mappings = mappings;
+       job_data->num_used_mappings = i;
+
+       err = 0;
+
+       goto done;
+
+drop_refs:
+       while (i--)
+               tegra_drm_mapping_put(mappings[i].mapping);
+
+       kfree(mappings);
+       job_data->used_mappings = NULL;
+
+done:
+       kvfree(bufs);
+
+       return err;
+}
+
+static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
+                            struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
+{
+       struct host1x_syncpt *sp;
+
+       if (args->syncpt.flags) {
+               SUBMIT_ERR(context, "invalid flag specified for syncpt");
+               return -EINVAL;
+       }
+
+       /* Syncpt ref will be dropped on job release */
+       sp = xa_load(syncpoints, args->syncpt.id);
+       if (!sp) {
+               SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
+               return -EINVAL;
+       }
+
+       job->syncpt = host1x_syncpt_get(sp);
+       job->syncpt_incrs = args->syncpt.increments;
+
+       return 0;
+}
+
+static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
+                                struct drm_tegra_submit_cmd_gather_uptr *cmd,
+                                struct gather_bo *bo, u32 *offset,
+                                struct tegra_drm_submit_data *job_data,
+                                u32 *class)
+{
+       u32 next_offset;
+
+       if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
+               SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
+               return -EINVAL;
+       }
+
+       /* Check for maximum gather size */
+       if (cmd->words > 16383) {
+               SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
+               return -EINVAL;
+       }
+
+       if (check_add_overflow(*offset, cmd->words, &next_offset)) {
+               SUBMIT_ERR(context, "too many total words in job");
+               return -EINVAL;
+       }
+
+       if (next_offset > bo->gather_data_words) {
+               SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
+               return -EINVAL;
+       }
+
+       if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
+                                 cmd->words, job_data, class)) {
+               SUBMIT_ERR(context, "job was rejected by firewall");
+               return -EINVAL;
+       }
+
+       host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
+
+       *offset = next_offset;
+
+       return 0;
+}
+
+static struct host1x_job *
+submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
+                 struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
+                 struct xarray *syncpoints)
+{
+       struct drm_tegra_submit_cmd *cmds;
+       u32 i, gather_offset = 0, class;
+       struct host1x_job *job;
+       int err;
+
+       /* Set initial class for firewall. */
+       class = context->client->base.class;
+
+       cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
+                                    sizeof(*cmds));
+       if (IS_ERR(cmds)) {
+               SUBMIT_ERR(context, "failed to copy cmds array from userspace");
+               return ERR_CAST(cmds);
+       }
+
+       job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
+       if (!job) {
+               SUBMIT_ERR(context, "failed to allocate memory for job");
+               job = ERR_PTR(-ENOMEM);
+               goto done;
+       }
+
+       err = submit_get_syncpt(context, job, syncpoints, args);
+       if (err < 0)
+               goto free_job;
+
+       job->client = &context->client->base;
+       job->class = context->client->base.class;
+       job->serialize = true;
+
+       for (i = 0; i < args->num_cmds; i++) {
+               struct drm_tegra_submit_cmd *cmd = &cmds[i];
+
+               if (cmd->flags) {
+                       SUBMIT_ERR(context, "unknown flags given for cmd");
+                       err = -EINVAL;
+                       goto free_job;
+               }
+
+               if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
+                       err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
+                                                   &gather_offset, job_data, &class);
+                       if (err)
+                               goto free_job;
+               } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
+                       if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+                               SUBMIT_ERR(context, "non-zero reserved value");
+                               err = -EINVAL;
+                               goto free_job;
+                       }
+
+                       host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+                                           false, class);
+               } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
+                       if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+                               SUBMIT_ERR(context, "non-zero reserved value");
+                               err = -EINVAL;
+                               goto free_job;
+                       }
+
+                       if (cmd->wait_syncpt.id != args->syncpt.id) {
+                               SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
+                               err = -EINVAL;
+                               goto free_job;
+                       }
+
+                       host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+                                           true, class);
+               } else {
+                       SUBMIT_ERR(context, "unknown cmd type");
+                       err = -EINVAL;
+                       goto free_job;
+               }
+       }
+
+       if (gather_offset == 0) {
+               SUBMIT_ERR(context, "job must have at least one gather");
+               err = -EINVAL;
+               goto free_job;
+       }
+
+       goto done;
+
+free_job:
+       host1x_job_put(job);
+       job = ERR_PTR(err);
+
+done:
+       kvfree(cmds);
+
+       return job;
+}
+
+static void release_job(struct host1x_job *job)
+{
+       struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
+       struct tegra_drm_submit_data *job_data = job->user_data;
+       u32 i;
+
+       for (i = 0; i < job_data->num_used_mappings; i++)
+               tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+       kfree(job_data->used_mappings);
+       kfree(job_data);
+
+       if (pm_runtime_enabled(client->base.dev))
+               pm_runtime_put_autosuspend(client->base.dev);
+}
+
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+                                  struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_channel_submit *args = data;
+       struct tegra_drm_submit_data *job_data;
+       struct drm_syncobj *syncobj = NULL;
+       struct tegra_drm_context *context;
+       struct host1x_job *job;
+       struct gather_bo *bo;
+       u32 i;
+       int err;
+
+       mutex_lock(&fpriv->lock);
+
+       context = xa_load(&fpriv->contexts, args->context);
+       if (!context) {
+               mutex_unlock(&fpriv->lock);
+               pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
+                                  current->comm, args->context);
+               return -EINVAL;
+       }
+
+       if (args->syncobj_in) {
+               struct dma_fence *fence;
+
+               err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
+               if (err) {
+                       SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
+                       goto unlock;
+               }
+
+               err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
+               dma_fence_put(fence);
+               if (err) {
+                       SUBMIT_ERR(context, "wait for syncobj_in timed out");
+                       goto unlock;
+               }
+       }
+
+       if (args->syncobj_out) {
+               syncobj = drm_syncobj_find(file, args->syncobj_out);
+               if (!syncobj) {
+                       SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
+                       err = -ENOENT;
+                       goto unlock;
+               }
+       }
+
+       /* Allocate gather BO and copy gather words in. */
+       err = submit_copy_gather_data(&bo, drm->dev, context, args);
+       if (err)
+               goto unlock;
+
+       job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
+       if (!job_data) {
+               SUBMIT_ERR(context, "failed to allocate memory for job data");
+               err = -ENOMEM;
+               goto put_bo;
+       }
+
+       /* Get data buffer mappings and do relocation patching. */
+       err = submit_process_bufs(context, bo, args, job_data);
+       if (err)
+               goto free_job_data;
+
+       /* Allocate host1x_job and add gathers and waits to it. */
+       job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
+       if (IS_ERR(job)) {
+               err = PTR_ERR(job);
+               goto free_job_data;
+       }
+
+       /* Map gather data for Host1x. */
+       err = host1x_job_pin(job, context->client->base.dev);
+       if (err) {
+               SUBMIT_ERR(context, "failed to pin job: %d", err);
+               goto put_job;
+       }
+
+       /* Boot engine. */
+       if (pm_runtime_enabled(context->client->base.dev)) {
+               err = pm_runtime_resume_and_get(context->client->base.dev);
+               if (err < 0) {
+                       SUBMIT_ERR(context, "could not power up engine: %d", err);
+                       goto unpin_job;
+               }
+       }
+
+       job->user_data = job_data;
+       job->release = release_job;
+       job->timeout = 10000;
+
+       /*
+        * job_data is now part of job reference counting, so don't release
+        * it from here.
+        */
+       job_data = NULL;
+
+       /* Submit job to hardware. */
+       err = host1x_job_submit(job);
+       if (err) {
+               SUBMIT_ERR(context, "host1x job submission failed: %d", err);
+               goto unpin_job;
+       }
+
+       /* Return postfences to userspace and add fences to DMA reservations. */
+       args->syncpt.value = job->syncpt_end;
+
+       if (syncobj) {
+               struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end);
+               if (IS_ERR(fence)) {
+                       err = PTR_ERR(fence);
+                       SUBMIT_ERR(context, "failed to create postfence: %d", err);
+               }
+
+               drm_syncobj_replace_fence(syncobj, fence);
+       }
+
+       goto put_job;
+
+unpin_job:
+       host1x_job_unpin(job);
+put_job:
+       host1x_job_put(job);
+free_job_data:
+       if (job_data && job_data->used_mappings) {
+               for (i = 0; i < job_data->num_used_mappings; i++)
+                       tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+               kfree(job_data->used_mappings);
+       }
+
+       if (job_data)
+               kfree(job_data);
+put_bo:
+       gather_bo_put(&bo->base);
+unlock:
+       if (syncobj)
+               drm_syncobj_put(syncobj);
+
+       mutex_unlock(&fpriv->lock);
+       return err;
+}
diff --git a/drivers/gpu/drm/tegra/submit.h b/drivers/gpu/drm/tegra/submit.h
new file mode 100644 (file)
index 0000000..cf6a2f0
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_SUBMIT_H
+#define _TEGRA_DRM_UAPI_SUBMIT_H
+
+struct tegra_drm_used_mapping {
+       struct tegra_drm_mapping *mapping;
+       u32 flags;
+};
+
+struct tegra_drm_submit_data {
+       struct tegra_drm_used_mapping *used_mappings;
+       u32 num_used_mappings;
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+                         u32 words, struct tegra_drm_submit_data *submit,
+                         u32 *job_class);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
new file mode 100644 (file)
index 0000000..dc16a24
--- /dev/null
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "drm.h"
+#include "uapi.h"
+
+static void tegra_drm_mapping_release(struct kref *ref)
+{
+       struct tegra_drm_mapping *mapping =
+               container_of(ref, struct tegra_drm_mapping, ref);
+
+       if (mapping->sgt)
+               dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+                                 DMA_ATTR_SKIP_CPU_SYNC);
+
+       host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+       host1x_bo_put(mapping->bo);
+
+       kfree(mapping);
+}
+
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
+{
+       kref_put(&mapping->ref, tegra_drm_mapping_release);
+}
+
+static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
+{
+       struct tegra_drm_mapping *mapping;
+       unsigned long id;
+
+       xa_for_each(&context->mappings, id, mapping)
+               tegra_drm_mapping_put(mapping);
+
+       xa_destroy(&context->mappings);
+
+       host1x_channel_put(context->channel);
+
+       kfree(context);
+}
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
+{
+       struct tegra_drm_context *context;
+       struct host1x_syncpt *sp;
+       unsigned long id;
+
+       xa_for_each(&file->contexts, id, context)
+               tegra_drm_channel_context_close(context);
+
+       xa_for_each(&file->syncpoints, id, sp)
+               host1x_syncpt_put(sp);
+
+       xa_destroy(&file->contexts);
+       xa_destroy(&file->syncpoints);
+}
+
+static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
+{
+       struct tegra_drm_client *client;
+
+       list_for_each_entry(client, &tegra->clients, list)
+               if (client->base.class == class)
+                       return client;
+
+       return NULL;
+}
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct tegra_drm *tegra = drm->dev_private;
+       struct drm_tegra_channel_open *args = data;
+       struct tegra_drm_client *client = NULL;
+       struct tegra_drm_context *context;
+       int err;
+
+       if (args->flags)
+               return -EINVAL;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return -ENOMEM;
+
+       client = tegra_drm_find_client(tegra, args->host1x_class);
+       if (!client) {
+               err = -ENODEV;
+               goto free;
+       }
+
+       if (client->shared_channel) {
+               context->channel = host1x_channel_get(client->shared_channel);
+       } else {
+               context->channel = host1x_channel_request(&client->base);
+               if (!context->channel) {
+                       err = -EBUSY;
+                       goto free;
+               }
+       }
+
+       err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
+                      GFP_KERNEL);
+       if (err < 0)
+               goto put_channel;
+
+       context->client = client;
+       xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
+
+       args->version = client->version;
+       args->capabilities = 0;
+
+       if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
+               args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
+
+       return 0;
+
+put_channel:
+       host1x_channel_put(context->channel);
+free:
+       kfree(context);
+
+       return err;
+}
+
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_channel_close *args = data;
+       struct tegra_drm_context *context;
+
+       mutex_lock(&fpriv->lock);
+
+       context = xa_load(&fpriv->contexts, args->context);
+       if (!context) {
+               mutex_unlock(&fpriv->lock);
+               return -EINVAL;
+       }
+
+       xa_erase(&fpriv->contexts, args->context);
+
+       mutex_unlock(&fpriv->lock);
+
+       tegra_drm_channel_context_close(context);
+
+       return 0;
+}
+
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_channel_map *args = data;
+       struct tegra_drm_mapping *mapping;
+       struct tegra_drm_context *context;
+       int err = 0;
+
+       if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
+               return -EINVAL;
+
+       mutex_lock(&fpriv->lock);
+
+       context = xa_load(&fpriv->contexts, args->context);
+       if (!context) {
+               mutex_unlock(&fpriv->lock);
+               return -EINVAL;
+       }
+
+       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+       if (!mapping) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       kref_init(&mapping->ref);
+
+       mapping->dev = context->client->base.dev;
+       mapping->bo = tegra_gem_lookup(file, args->handle);
+       if (!mapping->bo) {
+               err = -EINVAL;
+               goto unlock;
+       }
+
+       if (context->client->base.group) {
+               /* IOMMU domain managed directly using IOMMU API */
+               host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
+       } else {
+               switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
+               case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
+                       mapping->direction = DMA_BIDIRECTIONAL;
+                       break;
+
+               case DRM_TEGRA_CHANNEL_MAP_WRITE:
+                       mapping->direction = DMA_FROM_DEVICE;
+                       break;
+
+               case DRM_TEGRA_CHANNEL_MAP_READ:
+                       mapping->direction = DMA_TO_DEVICE;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+
+               mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
+               if (IS_ERR(mapping->sgt)) {
+                       err = PTR_ERR(mapping->sgt);
+                       goto put_gem;
+               }
+
+               err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+                                     DMA_ATTR_SKIP_CPU_SYNC);
+               if (err)
+                       goto unpin;
+
+               mapping->iova = sg_dma_address(mapping->sgt->sgl);
+       }
+
+       mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+
+       err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
+                      GFP_KERNEL);
+       if (err < 0)
+               goto unmap;
+
+       mutex_unlock(&fpriv->lock);
+
+       return 0;
+
+unmap:
+       if (mapping->sgt) {
+               dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
+                                 DMA_ATTR_SKIP_CPU_SYNC);
+       }
+unpin:
+       host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+put_gem:
+       host1x_bo_put(mapping->bo);
+       kfree(mapping);
+unlock:
+       mutex_unlock(&fpriv->lock);
+       return err;
+}
+
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_channel_unmap *args = data;
+       struct tegra_drm_mapping *mapping;
+       struct tegra_drm_context *context;
+
+       mutex_lock(&fpriv->lock);
+
+       context = xa_load(&fpriv->contexts, args->context);
+       if (!context) {
+               mutex_unlock(&fpriv->lock);
+               return -EINVAL;
+       }
+
+       mapping = xa_erase(&context->mappings, args->mapping);
+
+       mutex_unlock(&fpriv->lock);
+
+       if (!mapping)
+               return -EINVAL;
+
+       tegra_drm_mapping_put(mapping);
+       return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_syncpoint_allocate *args = data;
+       struct host1x_syncpt *sp;
+       int err;
+
+       if (args->id)
+               return -EINVAL;
+
+       sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
+       if (!sp)
+               return -EBUSY;
+
+       args->id = host1x_syncpt_id(sp);
+
+       err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
+       if (err) {
+               host1x_syncpt_put(sp);
+               return err;
+       }
+
+       return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct drm_tegra_syncpoint_allocate *args = data;
+       struct host1x_syncpt *sp;
+
+       mutex_lock(&fpriv->lock);
+       sp = xa_erase(&fpriv->syncpoints, args->id);
+       mutex_unlock(&fpriv->lock);
+
+       if (!sp)
+               return -EINVAL;
+
+       host1x_syncpt_put(sp);
+
+       return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
+{
+       struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+       struct drm_tegra_syncpoint_wait *args = data;
+       signed long timeout_jiffies;
+       struct host1x_syncpt *sp;
+
+       if (args->padding != 0)
+               return -EINVAL;
+
+       sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
+       if (!sp)
+               return -EINVAL;
+
+       timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+       return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
+}
diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h
new file mode 100644 (file)
index 0000000..12adad7
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_H
+#define _TEGRA_DRM_UAPI_H
+
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/xarray.h>
+
+#include <drm/drm.h>
+
+struct drm_file;
+struct drm_device;
+
+struct tegra_drm_file {
+       /* Legacy UAPI state */
+       struct idr legacy_contexts;
+       struct mutex lock;
+
+       /* New UAPI state */
+       struct xarray contexts;
+       struct xarray syncpoints;
+};
+
+struct tegra_drm_mapping {
+       struct kref ref;
+
+       struct device *dev;
+       struct host1x_bo *bo;
+       struct sg_table *sgt;
+       enum dma_data_direction direction;
+       dma_addr_t iova;
+       dma_addr_t iova_end;
+};
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
+                                struct drm_file *file);
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data,
+                                 struct drm_file *file);
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data,
+                               struct drm_file *file);
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data,
+                                 struct drm_file *file);
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+                                  struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data,
+                                      struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data,
+                                  struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data,
+                                  struct drm_file *file);
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file);
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping);
+
+#endif
index c9d55a9..c02010f 100644 (file)
@@ -29,7 +29,6 @@ struct vic_config {
 
 struct vic {
        struct falcon falcon;
-       bool booted;
 
        void __iomem *regs;
        struct tegra_drm_client client;
@@ -52,48 +51,6 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
        writel(value, vic->regs + offset);
 }
 
-static int vic_runtime_resume(struct device *dev)
-{
-       struct vic *vic = dev_get_drvdata(dev);
-       int err;
-
-       err = clk_prepare_enable(vic->clk);
-       if (err < 0)
-               return err;
-
-       usleep_range(10, 20);
-
-       err = reset_control_deassert(vic->rst);
-       if (err < 0)
-               goto disable;
-
-       usleep_range(10, 20);
-
-       return 0;
-
-disable:
-       clk_disable_unprepare(vic->clk);
-       return err;
-}
-
-static int vic_runtime_suspend(struct device *dev)
-{
-       struct vic *vic = dev_get_drvdata(dev);
-       int err;
-
-       err = reset_control_assert(vic->rst);
-       if (err < 0)
-               return err;
-
-       usleep_range(2000, 4000);
-
-       clk_disable_unprepare(vic->clk);
-
-       vic->booted = false;
-
-       return 0;
-}
-
 static int vic_boot(struct vic *vic)
 {
 #ifdef CONFIG_IOMMU_API
@@ -103,9 +60,6 @@ static int vic_boot(struct vic *vic)
        void *hdr;
        int err = 0;
 
-       if (vic->booted)
-               return 0;
-
 #ifdef CONFIG_IOMMU_API
        if (vic->config->supports_sid && spec) {
                u32 value;
@@ -168,8 +122,6 @@ static int vic_boot(struct vic *vic)
                return err;
        }
 
-       vic->booted = true;
-
        return 0;
 }
 
@@ -323,35 +275,74 @@ cleanup:
        return err;
 }
 
-static int vic_open_channel(struct tegra_drm_client *client,
-                           struct tegra_drm_context *context)
+
+static int vic_runtime_resume(struct device *dev)
 {
-       struct vic *vic = to_vic(client);
+       struct vic *vic = dev_get_drvdata(dev);
        int err;
 
-       err = pm_runtime_resume_and_get(vic->dev);
+       err = clk_prepare_enable(vic->clk);
        if (err < 0)
                return err;
 
+       usleep_range(10, 20);
+
+       err = reset_control_deassert(vic->rst);
+       if (err < 0)
+               goto disable;
+
+       usleep_range(10, 20);
+
        err = vic_load_firmware(vic);
        if (err < 0)
-               goto rpm_put;
+               goto assert;
 
        err = vic_boot(vic);
        if (err < 0)
-               goto rpm_put;
+               goto assert;
+
+       return 0;
+
+assert:
+       reset_control_assert(vic->rst);
+disable:
+       clk_disable_unprepare(vic->clk);
+       return err;
+}
+
+static int vic_runtime_suspend(struct device *dev)
+{
+       struct vic *vic = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(vic->rst);
+       if (err < 0)
+               return err;
+
+       usleep_range(2000, 4000);
+
+       clk_disable_unprepare(vic->clk);
+
+       return 0;
+}
+
+static int vic_open_channel(struct tegra_drm_client *client,
+                           struct tegra_drm_context *context)
+{
+       struct vic *vic = to_vic(client);
+       int err;
+
+       err = pm_runtime_resume_and_get(vic->dev);
+       if (err < 0)
+               return err;
 
        context->channel = host1x_channel_get(vic->channel);
        if (!context->channel) {
-               err = -ENOMEM;
-               goto rpm_put;
+               pm_runtime_put(vic->dev);
+               return -ENOMEM;
        }
 
        return 0;
-
-rpm_put:
-       pm_runtime_put(vic->dev);
-       return err;
 }
 
 static void vic_close_channel(struct tegra_drm_context *context)
@@ -359,7 +350,6 @@ static void vic_close_channel(struct tegra_drm_context *context)
        struct vic *vic = to_vic(context->client);
 
        host1x_channel_put(context->channel);
-
        pm_runtime_put(vic->dev);
 }
 
index 096017b..d2b6f7d 100644 (file)
@@ -9,6 +9,7 @@ host1x-y = \
        job.o \
        debug.o \
        mipi.o \
+       fence.o \
        hw/host1x01.o \
        hw/host1x02.o \
        hw/host1x04.o \
index 6e6ca77..765e5aa 100644 (file)
@@ -312,10 +312,6 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
        bool signal = false;
        struct host1x_job *job, *n;
 
-       /* If CDMA is stopped, queue is cleared and we can return */
-       if (!cdma->running)
-               return;
-
        /*
         * Walk the sync queue, reading the sync point registers as necessary,
         * to consume as many sync queue entries as possible without blocking
@@ -324,7 +320,8 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
                struct host1x_syncpt *sp = job->syncpt;
 
                /* Check whether this syncpt has completed, and bail if not */
-               if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
+               if (!host1x_syncpt_is_expired(sp, job->syncpt_end) &&
+                   !job->cancelled) {
                        /* Start timer on next pending syncpt */
                        if (job->timeout)
                                cdma_start_timer_locked(cdma, job);
@@ -413,8 +410,11 @@ syncpt_incr:
        else
                restart_addr = cdma->last_pos;
 
+       if (!job)
+               goto resume;
+
        /* do CPU increments for the remaining syncpts */
-       if (job) {
+       if (job->syncpt_recovery) {
                dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
                        __func__);
 
@@ -433,8 +433,44 @@ syncpt_incr:
 
                dev_dbg(dev, "%s: finished sync_queue modification\n",
                        __func__);
+       } else {
+               struct host1x_job *failed_job = job;
+
+               host1x_job_dump(dev, job);
+
+               host1x_syncpt_set_locked(job->syncpt);
+               failed_job->cancelled = true;
+
+               list_for_each_entry_continue(job, &cdma->sync_queue, list) {
+                       unsigned int i;
+
+                       if (job->syncpt != failed_job->syncpt)
+                               continue;
+
+                       for (i = 0; i < job->num_slots; i++) {
+                               unsigned int slot = (job->first_get/8 + i) %
+                                                   HOST1X_PUSHBUFFER_SLOTS;
+                               u32 *mapped = cdma->push_buffer.mapped;
+
+                               /*
+                                * Overwrite opcodes with 0 word writes
+                                * to offset 0xbad. This does nothing but
+                                * has a easily detected signature in debug
+                                * traces.
+                                */
+                               mapped[2*slot+0] = 0x1bad0000;
+                               mapped[2*slot+1] = 0x1bad0000;
+                       }
+
+                       job->cancelled = true;
+               }
+
+               wmb();
+
+               update_cdma_locked(cdma);
        }
 
+resume:
        /* roll back DMAGET and start up channel again */
        host1x_hw_cdma_resume(host1x, cdma, restart_addr);
 }
@@ -490,6 +526,16 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
 
        mutex_lock(&cdma->lock);
 
+       /*
+        * Check if syncpoint was locked due to previous job timeout.
+        * This needs to be done within the cdma lock to avoid a race
+        * with the timeout handler.
+        */
+       if (job->syncpt->locked) {
+               mutex_unlock(&cdma->lock);
+               return -EPERM;
+       }
+
        if (job->timeout) {
                /* init state on first submit with timeout value */
                if (!cdma->timeout.initialized) {
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
new file mode 100644 (file)
index 0000000..6941add
--- /dev/null
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Syncpoint dma_fence implementation
+ *
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "fence.h"
+#include "intr.h"
+#include "syncpt.h"
+
+DEFINE_SPINLOCK(lock);
+
+struct host1x_syncpt_fence {
+       struct dma_fence base;
+
+       atomic_t signaling;
+
+       struct host1x_syncpt *sp;
+       u32 threshold;
+
+       struct host1x_waitlist *waiter;
+       void *waiter_ref;
+
+       struct delayed_work timeout_work;
+};
+
+static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
+{
+       return "host1x";
+}
+
+static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
+{
+       return "syncpoint";
+}
+
+static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
+{
+       return container_of(f, struct host1x_syncpt_fence, base);
+}
+
+static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
+{
+       struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+       int err;
+
+       if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
+               return false;
+
+       dma_fence_get(f);
+
+       /*
+        * The dma_fence framework requires the fence driver to keep a
+        * reference to any fences for which 'enable_signaling' has been
+        * called (and that have not been signalled).
+        *
+        * We provide a userspace API to create arbitrary syncpoint fences,
+        * so we cannot normally guarantee that all fences get signalled.
+        * As such, setup a timeout, so that long-lasting fences will get
+        * reaped eventually.
+        */
+       schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
+
+       err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
+                                    HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
+                                    sf->waiter, &sf->waiter_ref);
+       if (err) {
+               cancel_delayed_work_sync(&sf->timeout_work);
+               dma_fence_put(f);
+               return false;
+       }
+
+       /* intr framework takes ownership of waiter */
+       sf->waiter = NULL;
+
+       /*
+        * The fence may get signalled at any time after the above call,
+        * so we need to initialize all state used by signalling
+        * before it.
+        */
+
+       return true;
+}
+
+static void host1x_syncpt_fence_release(struct dma_fence *f)
+{
+       struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+
+       if (sf->waiter)
+               kfree(sf->waiter);
+
+       dma_fence_free(f);
+}
+
+const struct dma_fence_ops host1x_syncpt_fence_ops = {
+       .get_driver_name = host1x_syncpt_fence_get_driver_name,
+       .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
+       .enable_signaling = host1x_syncpt_fence_enable_signaling,
+       .release = host1x_syncpt_fence_release,
+};
+
+void host1x_fence_signal(struct host1x_syncpt_fence *f)
+{
+       if (atomic_xchg(&f->signaling, 1))
+               return;
+
+       /*
+        * Cancel pending timeout work - if it races, it will
+        * not get 'f->signaling' and return.
+        */
+       cancel_delayed_work_sync(&f->timeout_work);
+
+       host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
+
+       dma_fence_signal(&f->base);
+       dma_fence_put(&f->base);
+}
+
+static void do_fence_timeout(struct work_struct *work)
+{
+       struct delayed_work *dwork = (struct delayed_work *)work;
+       struct host1x_syncpt_fence *f =
+               container_of(dwork, struct host1x_syncpt_fence, timeout_work);
+
+       if (atomic_xchg(&f->signaling, 1))
+               return;
+
+       /*
+        * Cancel pending timeout work - if it races, it will
+        * not get 'f->signaling' and return.
+        */
+       host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
+
+       dma_fence_set_error(&f->base, -ETIMEDOUT);
+       dma_fence_signal(&f->base);
+       dma_fence_put(&f->base);
+}
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
+{
+       struct host1x_syncpt_fence *fence;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return ERR_PTR(-ENOMEM);
+
+       fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
+       if (!fence->waiter)
+               return ERR_PTR(-ENOMEM);
+
+       fence->sp = sp;
+       fence->threshold = threshold;
+
+       dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
+                      dma_fence_context_alloc(1), 0);
+
+       INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
+
+       return &fence->base;
+}
+EXPORT_SYMBOL(host1x_fence_create);
diff --git a/drivers/gpu/host1x/fence.h b/drivers/gpu/host1x/fence.h
new file mode 100644 (file)
index 0000000..70c91de
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA Corporation.
+ */
+
+#ifndef HOST1X_FENCE_H
+#define HOST1X_FENCE_H
+
+struct host1x_syncpt_fence;
+
+void host1x_fence_signal(struct host1x_syncpt_fence *fence);
+
+#endif
index d4c28fa..1999780 100644 (file)
@@ -47,39 +47,84 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
        }
 }
 
-static void submit_gathers(struct host1x_job *job)
+static void submit_wait(struct host1x_cdma *cdma, u32 id, u32 threshold,
+                       u32 next_class)
+{
+#if HOST1X_HW >= 2
+       host1x_cdma_push_wide(cdma,
+               host1x_opcode_setclass(
+                       HOST1X_CLASS_HOST1X,
+                       HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
+                       /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
+                       BIT(0) | BIT(2)
+               ),
+               threshold,
+               id,
+               host1x_opcode_setclass(next_class, 0, 0)
+       );
+#else
+       /* TODO add waitchk or use waitbases or other mitigation */
+       host1x_cdma_push(cdma,
+               host1x_opcode_setclass(
+                       HOST1X_CLASS_HOST1X,
+                       host1x_uclass_wait_syncpt_r(),
+                       BIT(0)
+               ),
+               host1x_class_host_wait_syncpt(id, threshold)
+       );
+       host1x_cdma_push(cdma,
+               host1x_opcode_setclass(next_class, 0, 0),
+               HOST1X_OPCODE_NOP
+       );
+#endif
+}
+
+static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
 {
        struct host1x_cdma *cdma = &job->channel->cdma;
 #if HOST1X_HW < 6
        struct device *dev = job->channel->dev;
 #endif
        unsigned int i;
+       u32 threshold;
 
-       for (i = 0; i < job->num_gathers; i++) {
-               struct host1x_job_gather *g = &job->gathers[i];
-               dma_addr_t addr = g->base + g->offset;
-               u32 op2, op3;
+       for (i = 0; i < job->num_cmds; i++) {
+               struct host1x_job_cmd *cmd = &job->cmds[i];
 
-               op2 = lower_32_bits(addr);
-               op3 = upper_32_bits(addr);
+               if (cmd->is_wait) {
+                       if (cmd->wait.relative)
+                               threshold = job_syncpt_base + cmd->wait.threshold;
+                       else
+                               threshold = cmd->wait.threshold;
 
-               trace_write_gather(cdma, g->bo, g->offset, g->words);
+                       submit_wait(cdma, cmd->wait.id, threshold, cmd->wait.next_class);
+               } else {
+                       struct host1x_job_gather *g = &cmd->gather;
+
+                       dma_addr_t addr = g->base + g->offset;
+                       u32 op2, op3;
+
+                       op2 = lower_32_bits(addr);
+                       op3 = upper_32_bits(addr);
 
-               if (op3 != 0) {
+                       trace_write_gather(cdma, g->bo, g->offset, g->words);
+
+                       if (op3 != 0) {
 #if HOST1X_HW >= 6
-                       u32 op1 = host1x_opcode_gather_wide(g->words);
-                       u32 op4 = HOST1X_OPCODE_NOP;
+                               u32 op1 = host1x_opcode_gather_wide(g->words);
+                               u32 op4 = HOST1X_OPCODE_NOP;
 
-                       host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
+                               host1x_cdma_push_wide(cdma, op1, op2, op3, op4);
 #else
-                       dev_err(dev, "invalid gather for push buffer %pad\n",
-                               &addr);
-                       continue;
+                               dev_err(dev, "invalid gather for push buffer %pad\n",
+                                       &addr);
+                               continue;
 #endif
-               } else {
-                       u32 op1 = host1x_opcode_gather(g->words);
+                       } else {
+                               u32 op1 = host1x_opcode_gather(g->words);
 
-                       host1x_cdma_push(cdma, op1, op2);
+                               host1x_cdma_push(cdma, op1, op2);
+                       }
                }
        }
 }
@@ -126,7 +171,7 @@ static int channel_submit(struct host1x_job *job)
        struct host1x *host = dev_get_drvdata(ch->dev->parent);
 
        trace_host1x_channel_submit(dev_name(ch->dev),
-                                   job->num_gathers, job->num_relocs,
+                                   job->num_cmds, job->num_relocs,
                                    job->syncpt->id, job->syncpt_incrs);
 
        /* before error checks, return current max */
@@ -181,7 +226,7 @@ static int channel_submit(struct host1x_job *job)
                                 host1x_opcode_setclass(job->class, 0, 0),
                                 HOST1X_OPCODE_NOP);
 
-       submit_gathers(job);
+       submit_gathers(job, syncval - user_syncpt_incrs);
 
        /* end CDMA submit & stash pinned hMems into sync queue */
        host1x_cdma_end(&ch->cdma, job);
@@ -191,7 +236,7 @@ static int channel_submit(struct host1x_job *job)
        /* schedule a submit complete interrupt */
        err = host1x_intr_add_action(host, sp, syncval,
                                     HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
-                                    completed_waiter, NULL);
+                                    completed_waiter, &job->waiter);
        completed_waiter = NULL;
        WARN(err, "Failed to set submit complete interrupt");
 
index ceb4822..54e31d8 100644 (file)
@@ -156,9 +156,9 @@ static unsigned int show_channel_command(struct output *o, u32 val,
        }
 }
 
-static void show_gather(struct output *o, phys_addr_t phys_addr,
+static void show_gather(struct output *o, dma_addr_t phys_addr,
                        unsigned int words, struct host1x_cdma *cdma,
-                       phys_addr_t pin_addr, u32 *map_addr)
+                       dma_addr_t pin_addr, u32 *map_addr)
 {
        /* Map dmaget cursor to corresponding mem handle */
        u32 offset = phys_addr - pin_addr;
@@ -176,11 +176,11 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
        }
 
        for (i = 0; i < words; i++) {
-               u32 addr = phys_addr + i * 4;
+               dma_addr_t addr = phys_addr + i * 4;
                u32 val = *(map_addr + offset / 4 + i);
 
                if (!data_count) {
-                       host1x_debug_output(o, "%08x: %08x: ", addr, val);
+                       host1x_debug_output(o, "    %pad: %08x: ", &addr, val);
                        data_count = show_channel_command(o, val, &payload);
                } else {
                        host1x_debug_cont(o, "%08x%s", val,
@@ -195,23 +195,25 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
        struct push_buffer *pb = &cdma->push_buffer;
        struct host1x_job *job;
 
-       host1x_debug_output(o, "PUSHBUF at %pad, %u words\n",
-                           &pb->dma, pb->size / 4);
-
-       show_gather(o, pb->dma, pb->size / 4, cdma, pb->dma, pb->mapped);
-
        list_for_each_entry(job, &cdma->sync_queue, list) {
                unsigned int i;
 
-               host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
-                                   job, job->syncpt->id, job->syncpt_end,
-                                   job->first_get, job->timeout,
+               host1x_debug_output(o, "JOB, syncpt %u: %u timeout: %u num_slots: %u num_handles: %u\n",
+                                   job->syncpt->id, job->syncpt_end, job->timeout,
                                    job->num_slots, job->num_unpins);
 
-               for (i = 0; i < job->num_gathers; i++) {
-                       struct host1x_job_gather *g = &job->gathers[i];
+               show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma,
+                           pb->dma + job->first_get, pb->mapped + job->first_get);
+
+               for (i = 0; i < job->num_cmds; i++) {
+                       struct host1x_job_gather *g;
                        u32 *mapped;
 
+                       if (job->cmds[i].is_wait)
+                               continue;
+
+                       g = &job->cmds[i].gather;
+
                        if (job->gather_copy_mapped)
                                mapped = (u32 *)job->gather_copy_mapped;
                        else
@@ -222,7 +224,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
                                continue;
                        }
 
-                       host1x_debug_output(o, "    GATHER at %pad+%#x, %d words\n",
+                       host1x_debug_output(o, "  GATHER at %pad+%#x, %d words\n",
                                            &g->base, g->offset, g->words);
 
                        show_gather(o, g->base + g->offset, g->words, cdma,
index 02a9330..85242a5 100644 (file)
@@ -16,10 +16,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
                                           struct output *o)
 {
        struct host1x_cdma *cdma = &ch->cdma;
+       dma_addr_t dmastart, dmaend;
        u32 dmaput, dmaget, dmactrl;
        u32 cbstat, cbread;
        u32 val, base, baseval;
 
+       dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART);
+       dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND);
        dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
        dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
        dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
@@ -56,9 +59,10 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
                                    HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
                                    cbread);
 
-       host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+       host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend);
+       host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n",
                            dmaput, dmaget, dmactrl);
-       host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+       host1x_debug_output(o, "CBREAD %08x CBSTAT %08x\n", cbread, cbstat);
 
        show_channel_gathers(o, cdma);
        host1x_debug_output(o, "\n");
index 6d1b583..9d06678 100644 (file)
@@ -16,10 +16,23 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
                                           struct output *o)
 {
        struct host1x_cdma *cdma = &ch->cdma;
+       dma_addr_t dmastart = 0, dmaend = 0;
        u32 dmaput, dmaget, dmactrl;
        u32 offset, class;
        u32 ch_stat;
 
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6
+       dmastart = host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART_HI);
+       dmastart <<= 32;
+#endif
+       dmastart |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMASTART);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && HOST1X_HW >= 6
+       dmaend = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND_HI);
+       dmaend <<= 32;
+#endif
+       dmaend |= host1x_ch_readl(ch, HOST1X_CHANNEL_DMAEND);
+
        dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
        dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
        dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
@@ -41,7 +54,8 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
                host1x_debug_output(o, "active class %02x, offset %04x\n",
                                    class, offset);
 
-       host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+       host1x_debug_output(o, "DMASTART %pad, DMAEND %pad\n", &dmastart, &dmaend);
+       host1x_debug_output(o, "DMAPUT %08x DMAGET %08x DMACTL %08x\n",
                            dmaput, dmaget, dmactrl);
        host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat);
 
index 4fc51f7..0a2ab8f 100644 (file)
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
 }
 #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
        host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+       return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+       host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+       return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+       host1x_uclass_wait_syncpt_32_r()
 
 #endif
index 9e84a4a..60c692b 100644 (file)
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
 }
 #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
        host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+       return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+       host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+       return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+       host1x_uclass_wait_syncpt_32_r()
 
 #endif
index aee5a4e..2fcc9a2 100644 (file)
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
 }
 #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
        host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+       return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+       host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+       return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+       host1x_uclass_wait_syncpt_32_r()
 
 #endif
index c4bacdb..5f83143 100644 (file)
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
 }
 #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
        host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+       return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+       host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+       return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+       host1x_uclass_wait_syncpt_32_r()
 
 #endif
index c74070f..8cd2ef0 100644 (file)
@@ -165,5 +165,17 @@ static inline u32 host1x_uclass_indoff_rwn_read_v(void)
 }
 #define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
        host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_load_syncpt_payload_32_r(void)
+{
+       return 0x4e;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 \
+       host1x_uclass_load_syncpt_payload_32_r()
+static inline u32 host1x_uclass_wait_syncpt_32_r(void)
+{
+       return 0x50;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_32 \
+       host1x_uclass_wait_syncpt_32_r()
 
 #endif
index 6d1f3c0..45b6be9 100644 (file)
@@ -13,6 +13,7 @@
 #include <trace/events/host1x.h>
 #include "channel.h"
 #include "dev.h"
+#include "fence.h"
 #include "intr.h"
 
 /* Wait list management */
@@ -121,12 +122,20 @@ static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
        wake_up_interruptible(wq);
 }
 
+static void action_signal_fence(struct host1x_waitlist *waiter)
+{
+       struct host1x_syncpt_fence *f = waiter->data;
+
+       host1x_fence_signal(f);
+}
+
 typedef void (*action_handler)(struct host1x_waitlist *waiter);
 
 static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
        action_submit_complete,
        action_wakeup,
        action_wakeup_interruptible,
+       action_signal_fence,
 };
 
 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
index 6ea55e6..e4c3460 100644 (file)
@@ -33,6 +33,8 @@ enum host1x_intr_action {
         */
        HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
 
+       HOST1X_INTR_ACTION_SIGNAL_FENCE,
+
        HOST1X_INTR_ACTION_COUNT
 };
 
index adbdc22..0eef6df 100644 (file)
 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 
 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-                                   u32 num_cmdbufs, u32 num_relocs)
+                                   u32 num_cmdbufs, u32 num_relocs,
+                                   bool skip_firewall)
 {
        struct host1x_job *job = NULL;
        unsigned int num_unpins = num_relocs;
+       bool enable_firewall;
        u64 total;
        void *mem;
 
-       if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+       enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
+
+       if (!enable_firewall)
                num_unpins += num_cmdbufs;
 
        /* Check that we're not going to overflow */
        total = sizeof(struct host1x_job) +
                (u64)num_relocs * sizeof(struct host1x_reloc) +
                (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
-               (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
+               (u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
                (u64)num_unpins * sizeof(dma_addr_t) +
                (u64)num_unpins * sizeof(u32 *);
        if (total > ULONG_MAX)
@@ -48,6 +52,8 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
        if (!job)
                return NULL;
 
+       job->enable_firewall = enable_firewall;
+
        kref_init(&job->ref);
        job->channel = ch;
 
@@ -57,8 +63,8 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
        mem += num_relocs * sizeof(struct host1x_reloc);
        job->unpins = num_unpins ? mem : NULL;
        mem += num_unpins * sizeof(struct host1x_job_unpin_data);
-       job->gathers = num_cmdbufs ? mem : NULL;
-       mem += num_cmdbufs * sizeof(struct host1x_job_gather);
+       job->cmds = num_cmdbufs ? mem : NULL;
+       mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
        job->addr_phys = num_unpins ? mem : NULL;
 
        job->reloc_addr_phys = job->addr_phys;
@@ -79,6 +85,13 @@ static void job_free(struct kref *ref)
 {
        struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 
+       if (job->release)
+               job->release(job);
+
+       if (job->waiter)
+               host1x_intr_put_ref(job->syncpt->host, job->syncpt->id,
+                                   job->waiter, false);
+
        if (job->syncpt)
                host1x_syncpt_put(job->syncpt);
 
@@ -94,22 +107,38 @@ EXPORT_SYMBOL(host1x_job_put);
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
                           unsigned int words, unsigned int offset)
 {
-       struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
+       struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
 
        gather->words = words;
        gather->bo = bo;
        gather->offset = offset;
 
-       job->num_gathers++;
+       job->num_cmds++;
 }
 EXPORT_SYMBOL(host1x_job_add_gather);
 
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+                        bool relative, u32 next_class)
+{
+       struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
+
+       cmd->is_wait = true;
+       cmd->wait.id = id;
+       cmd->wait.threshold = thresh;
+       cmd->wait.next_class = next_class;
+       cmd->wait.relative = relative;
+
+       job->num_cmds++;
+}
+EXPORT_SYMBOL(host1x_job_add_wait);
+
 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 {
        struct host1x_client *client = job->client;
        struct device *dev = client->dev;
        struct host1x_job_gather *g;
        struct iommu_domain *domain;
+       struct sg_table *sgt;
        unsigned int i;
        int err;
 
@@ -119,7 +148,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
        for (i = 0; i < job->num_relocs; i++) {
                struct host1x_reloc *reloc = &job->relocs[i];
                dma_addr_t phys_addr, *phys;
-               struct sg_table *sgt;
 
                reloc->target.bo = host1x_bo_get(reloc->target.bo);
                if (!reloc->target.bo) {
@@ -192,20 +220,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
         * We will copy gathers BO content later, so there is no need to
         * hold and pin them.
         */
-       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+       if (job->enable_firewall)
                return 0;
 
-       for (i = 0; i < job->num_gathers; i++) {
+       for (i = 0; i < job->num_cmds; i++) {
                size_t gather_size = 0;
                struct scatterlist *sg;
-               struct sg_table *sgt;
                dma_addr_t phys_addr;
                unsigned long shift;
                struct iova *alloc;
                dma_addr_t *phys;
                unsigned int j;
 
-               g = &job->gathers[i];
+               if (job->cmds[i].is_wait)
+                       continue;
+
+               g = &job->cmds[i].gather;
+
                g->bo = host1x_bo_get(g->bo);
                if (!g->bo) {
                        err = -EINVAL;
@@ -296,7 +327,7 @@ static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
                if (cmdbuf != reloc->cmdbuf.bo)
                        continue;
 
-               if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
+               if (job->enable_firewall) {
                        target = (u32 *)job->gather_copy_mapped +
                                        reloc->cmdbuf.offset / sizeof(u32) +
                                                g->offset / sizeof(u32);
@@ -538,8 +569,13 @@ static inline int copy_gathers(struct device *host, struct host1x_job *job,
        fw.num_relocs = job->num_relocs;
        fw.class = job->class;
 
-       for (i = 0; i < job->num_gathers; i++) {
-               struct host1x_job_gather *g = &job->gathers[i];
+       for (i = 0; i < job->num_cmds; i++) {
+               struct host1x_job_gather *g;
+
+               if (job->cmds[i].is_wait)
+                       continue;
+
+               g = &job->cmds[i].gather;
 
                size += g->words * sizeof(u32);
        }
@@ -561,10 +597,14 @@ static inline int copy_gathers(struct device *host, struct host1x_job *job,
 
        job->gather_copy_size = size;
 
-       for (i = 0; i < job->num_gathers; i++) {
-               struct host1x_job_gather *g = &job->gathers[i];
+       for (i = 0; i < job->num_cmds; i++) {
+               struct host1x_job_gather *g;
                void *gather;
 
+               if (job->cmds[i].is_wait)
+                       continue;
+               g = &job->cmds[i].gather;
+
                /* Copy the gather */
                gather = host1x_bo_mmap(g->bo);
                memcpy(job->gather_copy_mapped + offset, gather + g->offset,
@@ -600,28 +640,33 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
        if (err)
                goto out;
 
-       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
+       if (job->enable_firewall) {
                err = copy_gathers(host->dev, job, dev);
                if (err)
                        goto out;
        }
 
        /* patch gathers */
-       for (i = 0; i < job->num_gathers; i++) {
-               struct host1x_job_gather *g = &job->gathers[i];
+       for (i = 0; i < job->num_cmds; i++) {
+               struct host1x_job_gather *g;
+
+               if (job->cmds[i].is_wait)
+                       continue;
+               g = &job->cmds[i].gather;
 
                /* process each gather mem only once */
                if (g->handled)
                        continue;
 
                /* copy_gathers() sets gathers base if firewall is enabled */
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               if (!job->enable_firewall)
                        g->base = job->gather_addr_phys[i];
 
-               for (j = i + 1; j < job->num_gathers; j++) {
-                       if (job->gathers[j].bo == g->bo) {
-                               job->gathers[j].handled = true;
-                               job->gathers[j].base = g->base;
+               for (j = i + 1; j < job->num_cmds; j++) {
+                       if (!job->cmds[j].is_wait &&
+                           job->cmds[j].gather.bo == g->bo) {
+                               job->cmds[j].gather.handled = true;
+                               job->cmds[j].gather.base = g->base;
                        }
                }
 
@@ -649,8 +694,7 @@ void host1x_job_unpin(struct host1x_job *job)
                struct device *dev = unpin->dev ?: host->dev;
                struct sg_table *sgt = unpin->sgt;
 
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
-                   unpin->size && host->domain) {
+               if (!job->enable_firewall && unpin->size && host->domain) {
                        iommu_unmap(host->domain, job->addr_phys[i],
                                    unpin->size);
                        free_iova(&host->iova,
index 94bc2e4..b4428c5 100644 (file)
@@ -18,6 +18,22 @@ struct host1x_job_gather {
        bool handled;
 };
 
+struct host1x_job_wait {
+       u32 id;
+       u32 threshold;
+       u32 next_class;
+       bool relative;
+};
+
+struct host1x_job_cmd {
+       bool is_wait;
+
+       union {
+               struct host1x_job_gather gather;
+               struct host1x_job_wait wait;
+       };
+};
+
 struct host1x_job_unpin_data {
        struct host1x_bo *bo;
        struct sg_table *sgt;
index e648ebb..d198a10 100644 (file)
@@ -407,6 +407,8 @@ static void syncpt_release(struct kref *ref)
 
        atomic_set(&sp->max_val, host1x_syncpt_read(sp));
 
+       sp->locked = false;
+
        mutex_lock(&sp->host->syncpt_mutex);
 
        host1x_syncpt_base_free(sp->base);
index a6766f8..95cd29b 100644 (file)
@@ -40,6 +40,13 @@ struct host1x_syncpt {
 
        /* interrupt data */
        struct host1x_syncpt_intr intr;
+
+       /*
+        * If a submission incrementing this syncpoint fails, lock it so that
+        * further submission cannot be made until application has handled the
+        * failure.
+        */
+       bool locked;
 };
 
 /* Initialize sync point array  */
@@ -115,4 +122,9 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
        return sp->id < host1x_syncpt_nb_pts(sp->host);
 }
 
+static inline void host1x_syncpt_set_locked(struct host1x_syncpt *sp)
+{
+       sp->locked = true;
+}
+
 #endif
index 9b0487c..7bccf58 100644 (file)
@@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
                                              u32 syncpt_id);
 
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
+
 /*
  * host1x channel
  */
@@ -216,8 +218,8 @@ struct host1x_job {
        struct host1x_client *client;
 
        /* Gathers and their memory */
-       struct host1x_job_gather *gathers;
-       unsigned int num_gathers;
+       struct host1x_job_cmd *cmds;
+       unsigned int num_cmds;
 
        /* Array of handles to be pinned & unpinned */
        struct host1x_reloc *relocs;
@@ -234,9 +236,15 @@ struct host1x_job {
        u32 syncpt_incrs;
        u32 syncpt_end;
 
+       /* Completion waiter ref */
+       void *waiter;
+
        /* Maximum time to wait for this job */
        unsigned int timeout;
 
+       /* Job has timed out and should be released */
+       bool cancelled;
+
        /* Index and number of slots used in the push buffer */
        unsigned int first_get;
        unsigned int num_slots;
@@ -257,12 +265,25 @@ struct host1x_job {
 
        /* Add a channel wait for previous ops to complete */
        bool serialize;
+
+       /* Fast-forward syncpoint increments on job timeout */
+       bool syncpt_recovery;
+
+       /* Callback called when job is freed */
+       void (*release)(struct host1x_job *job);
+       void *user_data;
+
+       /* Whether host1x-side firewall should be ran for this job or not */
+       bool enable_firewall;
 };
 
 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-                                   u32 num_cmdbufs, u32 num_relocs);
+                                   u32 num_cmdbufs, u32 num_relocs,
+                                   bool skip_firewall);
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
                           unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+                        bool relative, u32 next_class);
 struct host1x_job *host1x_job_get(struct host1x_job *job);
 void host1x_job_put(struct host1x_job *job);
 int host1x_job_pin(struct host1x_job *job, struct device *dev);
index c4df3c3..94cfc30 100644 (file)
@@ -1,24 +1,5 @@
-/*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+/* Copyright (c) 2012-2020 NVIDIA Corporation */
 
 #ifndef _UAPI_TEGRA_DRM_H_
 #define _UAPI_TEGRA_DRM_H_
@@ -29,6 +10,8 @@
 extern "C" {
 #endif
 
+/* Tegra DRM legacy UAPI. Only enabled with STAGING */
+
 #define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
 #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
 
@@ -649,8 +632,8 @@ struct drm_tegra_gem_get_flags {
 #define DRM_TEGRA_SYNCPT_READ          0x02
 #define DRM_TEGRA_SYNCPT_INCR          0x03
 #define DRM_TEGRA_SYNCPT_WAIT          0x04
-#define DRM_TEGRA_OPEN_CHANNEL         0x05
-#define DRM_TEGRA_CLOSE_CHANNEL                0x06
+#define DRM_TEGRA_OPEN_CHANNEL         0x05
+#define DRM_TEGRA_CLOSE_CHANNEL                0x06
 #define DRM_TEGRA_GET_SYNCPT           0x07
 #define DRM_TEGRA_SUBMIT               0x08
 #define DRM_TEGRA_GET_SYNCPT_BASE      0x09
@@ -674,6 +657,402 @@ struct drm_tegra_gem_get_flags {
 #define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
 #define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
 
+/* New Tegra DRM UAPI */
+
+/*
+ * Reported by the driver in the `capabilities` field.
+ *
+ * DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent
+ * with regard to the system memory.
+ */
+#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0)
+
+struct drm_tegra_channel_open {
+       /**
+        * @host1x_class: [in]
+        *
+        * Host1x class of the engine that will be programmed using this
+        * channel.
+        */
+       __u32 host1x_class;
+
+       /**
+        * @flags: [in]
+        *
+        * Flags.
+        */
+       __u32 flags;
+
+       /**
+        * @context: [out]
+        *
+        * Opaque identifier corresponding to the opened channel.
+        */
+       __u32 context;
+
+       /**
+        * @version: [out]
+        *
+        * Version of the engine hardware. This can be used by userspace
+        * to determine how the engine needs to be programmed.
+        */
+       __u32 version;
+
+       /**
+        * @capabilities: [out]
+        *
+        * Flags describing the hardware capabilities.
+        */
+       __u32 capabilities;
+       __u32 padding;
+};
+
+struct drm_tegra_channel_close {
+       /**
+        * @context: [in]
+        *
+        * Identifier of the channel to close.
+        */
+       __u32 context;
+       __u32 padding;
+};
+
+/*
+ * Mapping flags that can be used to influence how the mapping is created.
+ *
+ * DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access
+ * DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access
+ */
+#define DRM_TEGRA_CHANNEL_MAP_READ  (1 << 0)
+#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1)
+#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \
+                                         DRM_TEGRA_CHANNEL_MAP_WRITE)
+
+struct drm_tegra_channel_map {
+       /**
+        * @context: [in]
+        *
+        * Identifier of the channel to which make memory available for.
+        */
+       __u32 context;
+
+       /**
+        * @handle: [in]
+        *
+        * GEM handle of the memory to map.
+        */
+       __u32 handle;
+
+       /**
+        * @flags: [in]
+        *
+        * Flags.
+        */
+       __u32 flags;
+
+       /**
+        * @mapping: [out]
+        *
+        * Identifier corresponding to the mapping, to be used for
+        * relocations or unmapping later.
+        */
+       __u32 mapping;
+};
+
+struct drm_tegra_channel_unmap {
+       /**
+        * @context: [in]
+        *
+        * Channel identifier of the channel to unmap memory from.
+        */
+       __u32 context;
+
+       /**
+        * @mapping: [in]
+        *
+        * Mapping identifier of the memory mapping to unmap.
+        */
+       __u32 mapping;
+};
+
+/* Submission */
+
+/**
+ * Specify that bit 39 of the patched-in address should be set to switch
+ * swizzling between Tegra and non-Tegra sector layout on systems that store
+ * surfaces in system memory in non-Tegra sector layout.
+ */
+#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0)
+
+struct drm_tegra_submit_buf {
+       /**
+        * @mapping: [in]
+        *
+        * Identifier of the mapping to use in the submission.
+        */
+       __u32 mapping;
+
+       /**
+        * @flags: [in]
+        *
+        * Flags.
+        */
+       __u32 flags;
+
+       /**
+        * Information for relocation patching.
+        */
+       struct {
+               /**
+                * @target_offset: [in]
+                *
+                * Offset from the start of the mapping of the data whose
+                * address is to be patched into the gather.
+                */
+               __u64 target_offset;
+
+               /**
+                * @gather_offset_words: [in]
+                *
+                * Offset in words from the start of the gather data to
+                * where the address should be patched into.
+                */
+               __u32 gather_offset_words;
+
+               /**
+                * @shift: [in]
+                *
+                * Number of bits the address should be shifted right before
+                * patching in.
+                */
+               __u32 shift;
+       } reloc;
+};
+
+/**
+ * Execute `words` words of Host1x opcodes specified in the `gather_data_ptr`
+ * buffer. Each GATHER_UPTR command uses successive words from the buffer.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR               0
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT               1
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands. The threshold is calculated relative to the start of the job.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE      2
+
+struct drm_tegra_submit_cmd_gather_uptr {
+       __u32 words;
+       __u32 reserved[3];
+};
+
+struct drm_tegra_submit_cmd_wait_syncpt {
+       __u32 id;
+       __u32 value;
+       __u32 reserved[2];
+};
+
+struct drm_tegra_submit_cmd {
+       /**
+        * @type: [in]
+        *
+        * Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD*
+        * defines.
+        */
+       __u32 type;
+
+       /**
+        * @flags: [in]
+        *
+        * Flags.
+        */
+       __u32 flags;
+
+       union {
+               struct drm_tegra_submit_cmd_gather_uptr gather_uptr;
+               struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt;
+               __u32 reserved[4];
+       };
+};
+
+struct drm_tegra_submit_syncpt {
+       /**
+        * @id: [in]
+        *
+        * ID of the syncpoint that the job will increment.
+        */
+       __u32 id;
+
+       /**
+        * @flags: [in]
+        *
+        * Flags.
+        */
+       __u32 flags;
+
+       /**
+        * @increments: [in]
+        *
+        * Number of times the job will increment this syncpoint.
+        */
+       __u32 increments;
+
+       /**
+        * @value: [out]
+        *
+        * Value the syncpoint will have once the job has completed all
+        * its specified syncpoint increments.
+        *
+        * Note that the kernel may increment the syncpoint before or after
+        * the job. These increments are not reflected in this field.
+        *
+        * If the job hangs or times out, not all of the increments may
+        * get executed.
+        */
+       __u32 value;
+};
+
+struct drm_tegra_channel_submit {
+       /**
+        * @context: [in]
+        *
+        * Identifier of the channel to submit this job to.
+        */
+       __u32 context;
+
+       /**
+        * @num_bufs: [in]
+        *
+        * Number of elements in the `bufs_ptr` array.
+        */
+       __u32 num_bufs;
+
+       /**
+        * @num_cmds: [in]
+        *
+        * Number of elements in the `cmds_ptr` array.
+        */
+       __u32 num_cmds;
+
+       /**
+        * @gather_data_words: [in]
+        *
+        * Number of 32-bit words in the `gather_data_ptr` array.
+        */
+       __u32 gather_data_words;
+
+       /**
+        * @bufs_ptr: [in]
+        *
+        * Pointer to an array of drm_tegra_submit_buf structures.
+        */
+       __u64 bufs_ptr;
+
+       /**
+        * @cmds_ptr: [in]
+        *
+        * Pointer to an array of drm_tegra_submit_cmd structures.
+        */
+       __u64 cmds_ptr;
+
+       /**
+        * @gather_data_ptr: [in]
+        *
+        * Pointer to an array of Host1x opcodes to be used by GATHER_UPTR
+        * commands.
+        */
+       __u64 gather_data_ptr;
+
+       /**
+        * @syncobj_in: [in]
+        *
+        * Handle for DRM syncobj that will be waited before submission.
+        * Ignored if zero.
+        */
+       __u32 syncobj_in;
+
+       /**
+        * @syncobj_out: [in]
+        *
+        * Handle for DRM syncobj that will have its fence replaced with
+        * the job's completion fence. Ignored if zero.
+        */
+       __u32 syncobj_out;
+
+       /**
+        * @syncpt_incr: [in,out]
+        *
+        * Information about the syncpoint the job will increment.
+        */
+       struct drm_tegra_submit_syncpt syncpt;
+};
+
+struct drm_tegra_syncpoint_allocate {
+       /**
+        * @id: [out]
+        *
+        * ID of allocated syncpoint.
+        */
+       __u32 id;
+       __u32 padding;
+};
+
+struct drm_tegra_syncpoint_free {
+       /**
+        * @id: [in]
+        *
+        * ID of syncpoint to free.
+        */
+       __u32 id;
+       __u32 padding;
+};
+
+struct drm_tegra_syncpoint_wait {
+       /**
+        * @timeout: [in]
+        *
+        * Absolute timestamp at which the wait will time out.
+        */
+       __s64 timeout_ns;
+
+       /**
+        * @id: [in]
+        *
+        * ID of syncpoint to wait on.
+        */
+       __u32 id;
+
+       /**
+        * @threshold: [in]
+        *
+        * Threshold to wait for.
+        */
+       __u32 threshold;
+
+       /**
+        * @value: [out]
+        *
+        * Value of the syncpoint upon wait completion.
+        */
+       __u32 value;
+
+       __u32 padding;
+};
+
+#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open)
+#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close)
+#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map)
+#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap)
+#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit)
+
+#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait)
+
 #if defined(__cplusplus)
 }
 #endif