Merge tag 'drm-misc-next-fixes-2020-10-20' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Tue, 20 Oct 2020 20:52:50 +0000 (06:52 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 20 Oct 2020 20:58:39 +0000 (06:58 +1000)
Two patches to prevent out-of-bands accesses on fonts buffers

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20201020141445.4jisqylfbusdnzge@gilmour
470 files changed:
Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/display/renesas,du.txt
Documentation/gpu/amdgpu.rst
MAINTAINERS
drivers/acpi/acpi_lpss.c
drivers/char/agp/amd-k7-agp.c
drivers/char/agp/nvidia-agp.c
drivers/char/agp/sworks-agp.c
drivers/dma-buf/heaps/heap-helpers.c
drivers/dma-buf/udmabuf.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/include/bios_parser_types.h
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/hwmgr.h
drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h [new file with mode: 0644]
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/cadence/Kconfig [new file with mode: 0644]
drivers/gpu/drm/bridge/cadence/Makefile [new file with mode: 0644]
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c [new file with mode: 0644]
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h [new file with mode: 0644]
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c [new file with mode: 0644]
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h [new file with mode: 0644]
drivers/gpu/drm/bridge/lvds-codec.c
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_atomic.c
drivers/gpu/drm/i915/display/intel_atomic.h
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_cdclk.h
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_ddi.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_hdcp.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_gmbus.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdcp.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_hdmi.h
drivers/gpu/drm/i915/display/intel_hotplug.c
drivers/gpu/drm/i915/display/intel_lvds.c
drivers/gpu/drm/i915/display/intel_panel.c
drivers/gpu/drm/i915/display/intel_panel.h
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/display/intel_tv.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/display/vlv_dsi_pll.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/lima/lima_vm.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/Makefile
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_hdmi.h
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/dp/dp_audio.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_audio.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_aux.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_aux.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_catalog.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_catalog.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_ctrl.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_ctrl.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_debug.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_debug.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_display.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_display.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_drm.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_drm.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_hpd.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_hpd.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_link.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_link.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_panel.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_panel.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_parser.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_parser.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_power.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_power.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_reg.h [new file with mode: 0644]
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c [new file with mode: 0644]
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_trace.h
drivers/gpu/drm/msm/msm_gpummu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/msm/msm_ringbuffer.h
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/radeon/uvd_v2_2.c
drivers/gpu/drm/radeon/uvd_v4_2.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_kms.h
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/v3d/v3d_mmu.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/gpu/host1x/job.c
drivers/media/platform/vsp1/vsp1_drm.c
drivers/phy/cadence/phy-cadence-torrent.c
drivers/phy/mediatek/Kconfig
drivers/phy/mediatek/Makefile
drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c [moved from drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c with 99% similarity]
drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c [moved from drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c with 99% similarity]
drivers/phy/mediatek/phy-mtk-hdmi.c [moved from drivers/gpu/drm/mediatek/mtk_hdmi_phy.c with 96% similarity]
drivers/phy/mediatek/phy-mtk-hdmi.h [moved from drivers/gpu/drm/mediatek/mtk_hdmi_phy.h with 95% similarity]
drivers/pwm/pwm-crc.c
drivers/pwm/pwm-lpss-platform.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-lpss.h
drivers/xen/gntdev-dmabuf.c
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_edid.h
include/drm/drm_prime.h
include/drm/i915_pciids.h
include/linux/adreno-smmu-priv.h [new file with mode: 0644]
include/linux/phy/phy.h
samples/vfio-mdev/mbochs.c

diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
new file mode 100644 (file)
index 0000000..74d675f
--- /dev/null
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/bridge/cdns,mhdp8546.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence MHDP8546 bridge
+
+maintainers:
+  - Swapnil Jakhade <sjakhade@cadence.com>
+  - Yuti Amonkar <yamonkar@cadence.com>
+
+properties:
+  compatible:
+    enum:
+      - cdns,mhdp8546
+      - ti,j721e-mhdp8546
+
+  reg:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description:
+          Register block of mhdptx apb registers up to PHY mapped area (AUX_CONFIG_P).
+          The AUX and PMA registers are not part of this range, they are instead
+          included in the associated PHY.
+      - description:
+          Register block for DSS_EDP0_INTG_CFG_VP registers in case of TI J7 SoCs.
+
+  reg-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: mhdptx
+      - const: j721e-intg
+
+  clocks:
+    maxItems: 1
+    description:
+      DP bridge clock, used by the IP to know how to translate a number of
+      clock cycles into a time (which is used to comply with DP standard timings
+      and delays).
+
+  phys:
+    maxItems: 1
+    description:
+      phandle to the DisplayPort PHY.
+
+  phy-names:
+    items:
+      - const: dpphy
+
+  power-domains:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  ports:
+    type: object
+    description:
+      Ports as described in Documentation/devicetree/bindings/graph.txt.
+
+    properties:
+      '#address-cells':
+        const: 1
+
+      '#size-cells':
+        const: 0
+
+      port@0:
+        type: object
+        description:
+          First input port representing the DP bridge input.
+
+      port@1:
+        type: object
+        description:
+          Second input port representing the DP bridge input.
+
+      port@2:
+        type: object
+        description:
+          Third input port representing the DP bridge input.
+
+      port@3:
+        type: object
+        description:
+          Fourth input port representing the DP bridge input.
+
+      port@4:
+        type: object
+        description:
+          Output port representing the DP bridge output.
+
+    required:
+      - port@0
+      - port@4
+      - '#address-cells'
+      - '#size-cells'
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: ti,j721e-mhdp8546
+    then:
+      properties:
+        reg:
+          minItems: 2
+        reg-names:
+          minItems: 2
+    else:
+      properties:
+        reg:
+          maxItems: 1
+        reg-names:
+          maxItems: 1
+
+required:
+  - compatible
+  - clocks
+  - reg
+  - reg-names
+  - phys
+  - phy-names
+  - interrupts
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    bus {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        mhdp: dp-bridge@f0fb000000 {
+            compatible = "cdns,mhdp8546";
+            reg = <0xf0 0xfb000000 0x0 0x1000000>;
+            reg-names = "mhdptx";
+            clocks = <&mhdp_clock>;
+            phys = <&dp_phy>;
+            phy-names = "dpphy";
+            interrupts = <GIC_SPI 614 IRQ_TYPE_LEVEL_HIGH>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+                    dp_bridge_input: endpoint {
+                        remote-endpoint = <&xxx_dpi_output>;
+                    };
+                };
+
+                port@4 {
+                    reg = <4>;
+                    dp_bridge_output: endpoint {
+                        remote-endpoint = <&xxx_dp_connector_input>;
+                    };
+                };
+            };
+        };
+    };
+...
index 68951d5..ff3ae25 100644 (file)
@@ -79,6 +79,9 @@ properties:
       The GPIO used to control the power down line of this device.
     maxItems: 1
 
+  power-supply:
+    maxItems: 1
+
 required:
   - compatible
   - ports
index 819f3e3..3f60726 100644 (file)
@@ -14,8 +14,10 @@ Required properties:
 - compatible : Shall contain one or more of
   - "renesas,r8a774a1-hdmi" for R8A774A1 (RZ/G2M) compatible HDMI TX
   - "renesas,r8a774b1-hdmi" for R8A774B1 (RZ/G2N) compatible HDMI TX
+  - "renesas,r8a774e1-hdmi" for R8A774E1 (RZ/G2H) compatible HDMI TX
   - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX
   - "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX
+  - "renesas,r8a77961-hdmi" for R8A77961 (R-Car M3-W+) compatible HDMI TX
   - "renesas,r8a77965-hdmi" for R8A77965 (R-Car M3-N) compatible HDMI TX
   - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 and RZ/G2 compatible
                             HDMI TX
@@ -42,7 +44,7 @@ Optional properties:
 Example:
 
        hdmi0: hdmi@fead0000 {
-               compatible = "renesas,r8a7795-dw-hdmi";
+               compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
                reg = <0 0xfead0000 0 0x10000>;
                interrupts = <0 389 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cpg CPG_CORE R8A7795_CLK_S0D4>, <&cpg CPG_MOD 729>;
index baaf2a2..e5b1639 100644 (file)
@@ -16,11 +16,13 @@ description: |
 properties:
   compatible:
     enum:
+      - renesas,r8a7742-lvds # for RZ/G1H compatible LVDS encoders
       - renesas,r8a7743-lvds # for RZ/G1M compatible LVDS encoders
       - renesas,r8a7744-lvds # for RZ/G1N compatible LVDS encoders
       - renesas,r8a774a1-lvds # for RZ/G2M compatible LVDS encoders
       - renesas,r8a774b1-lvds # for RZ/G2N compatible LVDS encoders
       - renesas,r8a774c0-lvds # for RZ/G2E compatible LVDS encoders
+      - renesas,r8a774e1-lvds # for RZ/G2H compatible LVDS encoders
       - renesas,r8a7790-lvds # for R-Car H2 compatible LVDS encoders
       - renesas,r8a7791-lvds # for R-Car M2-W compatible LVDS encoders
       - renesas,r8a7793-lvds # for R-Car M2-N compatible LVDS encoders
index b91e709..1212207 100644 (file)
@@ -43,7 +43,7 @@ Required properties (all function blocks):
        "mediatek,<chip>-dpi"                   - DPI controller, see mediatek,dpi.txt
        "mediatek,<chip>-disp-mutex"            - display mutex
        "mediatek,<chip>-disp-od"               - overdrive
-  the supported chips are mt2701, mt2712 and mt8173.
+  the supported chips are mt2701, mt7623, mt2712 and mt8173.
 - reg: Physical base address and length of the function block register space
 - interrupts: The interrupt signal from the function block (required, except for
   merge and split function blocks).
index 77def44..dc1ebd1 100644 (file)
@@ -7,7 +7,7 @@ output bus.
 
 Required properties:
 - compatible: "mediatek,<chip>-dpi"
-  the supported chips are mt2701 , mt8173 and mt8183.
+  the supported chips are mt2701, mt7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
index 8e4729d..f06f24d 100644 (file)
@@ -7,7 +7,7 @@ channel output.
 
 Required properties:
 - compatible: "mediatek,<chip>-dsi"
-  the supported chips are mt2701, mt8173 and mt8183.
+- the supported chips are mt2701, mt7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
@@ -26,7 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
 
 Required properties:
 - compatible: "mediatek,<chip>-mipi-tx"
-  the supported chips are mt2701, mt8173 and mt8183.
+- the supported chips are mt2701, 7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - clocks: PLL reference clock
 - clock-output-names: name of the output clock line to the DSI encoder
index 7b12424..6b1c586 100644 (file)
@@ -6,6 +6,7 @@ its parallel input.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-hdmi".
+- the supported chips are mt2701, mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
@@ -32,6 +33,7 @@ The HDMI CEC controller handles hotplug detection and CEC communication.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-cec"
+- the supported chips are mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clock
@@ -44,6 +46,7 @@ The Mediatek's I2C controller is used to interface with I2C devices.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-hdmi-ddc"
+- the supported chips are mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - clocks: device clock
 - clock-names: Should be "ddc-i2c".
@@ -56,6 +59,7 @@ output and drives the HDMI pads.
 
 Required properties:
 - compatible: "mediatek,<chip>-hdmi-phy"
+- the supported chips are mt2701, mt7623 and mt8173
 - reg: Physical base address and length of the module's registers
 - clocks: PLL reference clock
 - clock-names: must contain "pll_ref"
index 7884fd7..b9a64d3 100644 (file)
@@ -90,6 +90,8 @@ Required properties:
   * "qcom,dsi-phy-14nm-660"
   * "qcom,dsi-phy-10nm"
   * "qcom,dsi-phy-10nm-8998"
+  * "qcom,dsi-phy-7nm"
+  * "qcom,dsi-phy-7nm-8150"
 - reg: Physical base address and length of the registers of PLL, PHY. Some
   revisions require the PHY regulator base address, whereas others require the
   PHY lane base address. See below for each PHY revision.
@@ -98,7 +100,7 @@ Required properties:
   * "dsi_pll"
   * "dsi_phy"
   * "dsi_phy_regulator"
-  For DSI 14nm and 10nm PHYs:
+  For DSI 14nm, 10nm and 7nm PHYs:
   * "dsi_pll"
   * "dsi_phy"
   * "dsi_phy_lane"
@@ -116,7 +118,7 @@ Required properties:
 - vcca-supply: phandle to vcca regulator device node
   For 14nm PHY:
 - vcca-supply: phandle to vcca regulator device node
-  For 10nm PHY:
+  For 10nm and 7nm PHY:
 - vdds-supply: phandle to vdds regulator device node
 
 Optional properties:
index 51cd4d1..7d65c24 100644 (file)
@@ -3,6 +3,7 @@
 Required Properties:
 
   - compatible: must be one of the following.
+    - "renesas,du-r8a7742" for R8A7742 (RZ/G1H) compatible DU
     - "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
     - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
     - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
@@ -10,6 +11,7 @@ Required Properties:
     - "renesas,du-r8a774a1" for R8A774A1 (RZ/G2M) compatible DU
     - "renesas,du-r8a774b1" for R8A774B1 (RZ/G2N) compatible DU
     - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
+    - "renesas,du-r8a774e1" for R8A774E1 (RZ/G2H) compatible DU
     - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
     - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -18,6 +20,7 @@ Required Properties:
     - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
     - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
     - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
+    - "renesas,du-r8a77961" for R8A77961 (R-Car M3-W+) compatible DU
     - "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU
     - "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU
     - "renesas,du-r8a77980" for R8A77980 (R-Car V3H) compatible DU
@@ -68,6 +71,7 @@ corresponding to each DU output.
 
                         Port0          Port1          Port2          Port3
 -----------------------------------------------------------------------------
+ R8A7742 (RZ/G1H)       DPAD 0         LVDS 0         LVDS 1         -
  R8A7743 (RZ/G1M)       DPAD 0         LVDS 0         -              -
  R8A7744 (RZ/G1N)       DPAD 0         LVDS 0         -              -
  R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
@@ -75,6 +79,7 @@ corresponding to each DU output.
  R8A774A1 (RZ/G2M)      DPAD 0         HDMI 0         LVDS 0         -
  R8A774B1 (RZ/G2N)      DPAD 0         HDMI 0         LVDS 0         -
  R8A774C0 (RZ/G2E)      DPAD 0         LVDS 0         LVDS 1         -
+ R8A774E1 (RZ/G2H)      DPAD 0         HDMI 0         LVDS 0         -
  R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
  R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
  R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
@@ -83,6 +88,7 @@ corresponding to each DU output.
  R8A7794 (R-Car E2)     DPAD 0         DPAD 1         -              -
  R8A7795 (R-Car H3)     DPAD 0         HDMI 0         HDMI 1         LVDS 0
  R8A7796 (R-Car M3-W)   DPAD 0         HDMI 0         LVDS 0         -
+ R8A77961 (R-Car M3-W+) DPAD 0         HDMI 0         LVDS 0         -
  R8A77965 (R-Car M3-N)  DPAD 0         HDMI 0         LVDS 0         -
  R8A77970 (R-Car V3M)   DPAD 0         LVDS 0         -              -
  R8A77980 (R-Car V3H)   DPAD 0         LVDS 0         -              -
index 29ca5f5..1f9ea82 100644 (file)
@@ -70,6 +70,15 @@ Interrupt Handling
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
    :internal:
 
+IP Blocks
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+   :doc: IP Blocks
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+   :identifiers: amd_ip_block_type amd_ip_funcs
+
 AMDGPU XGMI Support
 ===================
 
@@ -197,8 +206,8 @@ pp_power_profile_mode
 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
    :doc: pp_power_profile_mode
 
-*_busy_percent
-~~~~~~~~~~~~~~
+\*_busy_percent
+~~~~~~~~~~~~~~~
 
 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
    :doc: gpu_busy_percent
index 99db89d..06024a4 100644 (file)
@@ -5828,6 +5828,7 @@ L:        dri-devel@lists.freedesktop.org
 S:     Supported
 F:     Documentation/devicetree/bindings/display/mediatek/
 F:     drivers/gpu/drm/mediatek/
+F:     drivers/phy/mediatek/phy-mtk-hdmi*
 
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
index 5e2bfbc..a8d7d83 100644 (file)
@@ -67,7 +67,15 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_CLK_DIVIDER               BIT(2)
 #define LPSS_LTR                       BIT(3)
 #define LPSS_SAVE_CTX                  BIT(4)
-#define LPSS_NO_D3_DELAY               BIT(5)
+/*
+ * For some devices the DSDT AML code for another device turns off the device
+ * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
+ * as ctx register values.
+ * Luckily these devices always use the same ctx register values, so we can
+ * work around this by saving the ctx registers once on activation.
+ */
+#define LPSS_SAVE_CTX_ONCE             BIT(5)
+#define LPSS_NO_D3_DELAY               BIT(6)
 
 struct lpss_private_data;
 
@@ -254,9 +262,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
 };
 
 static const struct lpss_device_desc bsw_pwm_dev_desc = {
-       .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+       .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
        .prv_offset = 0x800,
        .setup = bsw_pwm_setup,
+       .resume_from_noirq = true,
 };
 
 static const struct lpss_device_desc byt_uart_dev_desc = {
@@ -884,9 +893,14 @@ static int acpi_lpss_activate(struct device *dev)
         * we have to deassert reset line to be sure that ->probe() will
         * recognize the device.
         */
-       if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+       if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
                lpss_deassert_reset(pdata);
 
+#ifdef CONFIG_PM
+       if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
+               acpi_lpss_save_ctx(dev, pdata);
+#endif
+
        return 0;
 }
 
@@ -1030,7 +1044,7 @@ static int acpi_lpss_resume(struct device *dev)
 
        acpi_lpss_d3_to_d0_delay(pdata);
 
-       if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+       if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
                acpi_lpss_restore_ctx(dev, pdata);
 
        return 0;
index 6914e4f..2b20955 100644 (file)
@@ -425,7 +425,7 @@ static int agp_amdk7_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        bridge->driver = &amd_irongate_driver;
-       bridge->dev_private_data = &amd_irongate_private,
+       bridge->dev_private_data = &amd_irongate_private;
        bridge->dev = pdev;
        bridge->capndx = cap_ptr;
 
index 623205b..f78e756 100644 (file)
@@ -382,7 +382,7 @@ static int agp_nvidia_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        bridge->driver = &nvidia_driver;
-       bridge->dev_private_data = &nvidia_private,
+       bridge->dev_private_data = &nvidia_private;
        bridge->dev = pdev;
        bridge->capndx = cap_ptr;
 
index 7729414..f875970 100644 (file)
@@ -513,7 +513,7 @@ static int agp_serverworks_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        bridge->driver = &sworks_driver;
-       bridge->dev_private_data = &serverworks_private,
+       bridge->dev_private_data = &serverworks_private;
        bridge->dev = pci_dev_get(pdev);
 
        pci_set_drvdata(pdev, bridge);
index 9f964ca..d0696cf 100644 (file)
@@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
                                      enum dma_data_direction direction)
 {
        struct dma_heaps_attachment *a = attachment->priv;
-       struct sg_table *table;
-
-       table = &a->table;
+       struct sg_table *table = &a->table;
+       int ret;
 
-       if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
-                       direction))
-               table = ERR_PTR(-ENOMEM);
+       ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+       if (ret)
+               table = ERR_PTR(ret);
        return table;
 }
 
@@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                   struct sg_table *table,
                                   enum dma_data_direction direction)
 {
-       dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+       dma_unmap_sgtable(attachment->dev, table, direction, 0);
 }
 
 static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
index 5ee1e93..db732f7 100644 (file)
@@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
                                        GFP_KERNEL);
        if (ret < 0)
                goto err;
-       if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
-               ret = -EINVAL;
+       ret = dma_map_sgtable(dev, sg, direction, 0);
+       if (ret < 0)
                goto err;
-       }
        return sg;
 
 err:
@@ -78,7 +77,7 @@ err:
 static void put_sg_table(struct device *dev, struct sg_table *sg,
                         enum dma_data_direction direction)
 {
-       dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+       dma_unmap_sgtable(dev, sg, direction, 0);
        sg_free_table(sg);
        kfree(sg);
 }
index 4009d2e..87f095d 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/rbtree.h>
 #include <linux/hashtable.h>
 #include <linux/dma-fence.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
 
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include "amdgpu_mes.h"
 #include "amdgpu_umc.h"
 #include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 
 #define MAX_GPU_INSTANCE               16
@@ -879,6 +882,9 @@ struct amdgpu_device {
        /* mmhub */
        struct amdgpu_mmhub             mmhub;
 
+       /* gfxhub */
+       struct amdgpu_gfxhub            gfxhub;
+
        /* gfx */
        struct amdgpu_gfx               gfx;
 
@@ -987,6 +993,9 @@ struct amdgpu_device {
        atomic_t                        throttling_logging_enabled;
        struct ratelimit_state          throttling_logging_rs;
        uint32_t                        ras_features;
+
+       bool                            in_pci_err_recovery;
+       struct pci_saved_state          *pci_state;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1011,18 +1020,32 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
 
 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
                               uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+                           uint32_t reg, uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
                        uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+                            uint32_t reg, uint32_t v);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
 
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+                               u32 pcie_index, u32 pcie_data,
+                               u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+                                 u32 pcie_index, u32 pcie_data,
+                                 u32 reg_addr);
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+                                u32 pcie_index, u32 pcie_data,
+                                u32 reg_addr, u32 reg_data);
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+                                  u32 pcie_index, u32 pcie_data,
+                                  u32 reg_addr, u64 reg_data);
+
 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
@@ -1033,8 +1056,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
  */
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
 
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
@@ -1042,9 +1065,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1090,7 +1113,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
                WREG32_SMC(_Reg, tmp);                          \
        } while (0)
 
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
@@ -1260,6 +1283,15 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return
 void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
 
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
+                                          pci_channel_state_t state);
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
+void amdgpu_pci_resume(struct pci_dev *pdev);
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+
 #include "amdgpu_object.h"
 
 /* used by df_v3_6.c and amdgpu_pmu.c */
index 4a93b88..165b02e 100644 (file)
@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        }
        adev->atif = atif;
 
-       if (atif->notifications.brightness_change) {
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+       if (atif->notifications.brightness_change) {
                if (amdgpu_device_has_dc_support(adev)) {
 #if defined(CONFIG_DRM_AMD_DC)
                        struct amdgpu_display_manager *dm = &adev->dm;
index edff1b7..0544460 100644 (file)
@@ -36,6 +36,8 @@
  */
 uint64_t amdgpu_amdkfd_total_mem_size;
 
+static bool kfd_initialized;
+
 int amdgpu_amdkfd_init(void)
 {
        struct sysinfo si;
@@ -51,19 +53,26 @@ int amdgpu_amdkfd_init(void)
 #else
        ret = -ENOENT;
 #endif
+       kfd_initialized = !ret;
 
        return ret;
 }
 
 void amdgpu_amdkfd_fini(void)
 {
-       kgd2kfd_exit();
+       if (kfd_initialized) {
+               kgd2kfd_exit();
+               kfd_initialized = false;
+       }
 }
 
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 {
        bool vf = amdgpu_sriov_vf(adev);
 
+       if (!kfd_initialized)
+               return;
+
        adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
                                      adev->pdev, adev->asic_type, vf);
 
@@ -572,6 +581,13 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
        return adev->rev_id;
 }
 
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       return adev->gmc.noretry;
+}
+
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
                                uint32_t *ib_cmd, uint32_t ib_len)
index a10507e..bc9f0e4 100644 (file)
@@ -181,6 +181,7 @@ uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
 
 /* Read user wptr from a specified user address space with page fault
index df0aab0..1529815 100644 (file)
@@ -32,7 +32,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_0.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -753,7 +752,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
        }
 
        /* SDMA is on gfxhub as well for Navi1* series */
-       gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
index 5b38f84..50016bf 100644 (file)
@@ -31,7 +31,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_1.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -657,7 +656,7 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        /* SDMA is on gfxhub as well for Navi1* series */
-       gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 #if 0
index e6aede7..e0d5110 100644 (file)
@@ -36,9 +36,7 @@
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
-#include "mmhub_v1_0.h"
-#include "gfxhub_v1_0.h"
-
+#include "gfx_v9_0.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -703,7 +701,180 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
 
        adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 
-       gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+       mutex_lock(&adev->srbm_mutex);
+       mutex_lock(&adev->grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+       mutex_unlock(&adev->grbm_idx_mutex);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
+ * @get_wave_count: Read device registers to get number of waves in flight for
+ * a particular queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ * @queue_idx: Index of queue in the queue-map bit-field
+ * @wave_cnt: Output parameter updated with number of waves in flight
+ * @vmid: Output parameter updated with VMID of queue whose wave count
+ * is being collected
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+               int *wave_cnt, int *vmid)
+{
+       int pipe_idx;
+       int queue_slot;
+       unsigned int reg_val;
+
+       /*
+        * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
+        * parameters to read out waves in flight. Get VMID if there are
+        * non-zero waves in flight.
+        */
+       *vmid = 0xFF;
+       *wave_cnt = 0;
+       pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+       queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+       soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+       reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+                        queue_slot);
+       *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+       if (*wave_cnt != 0)
+               *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+                        CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+}
+
+/**
+ * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in flight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @kgd: Handle of device from which to get number of waves in flight
+ * @pasid: Identifies the process for which this query call is invoked
+ * @wave_cnt: Output parameter updated with number of waves in flight that
+ * belong to process with given pasid
+ * @max_waves_per_cu: Output parameter updated with maximum number of waves
+ * possible per Compute Unit
+ *
+ * @note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to an iaccurate wave count. Following is a high-level sequence:
+ *    Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ *    Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * The registers that provide the waves in flight are:
+ *
+ *  SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
+ *  queue is slotted, OFF if there is no queue. A process could have ZERO or
+ *  more queues slotted and submitting waves to be run on compute units. Even
+ *  when there is a queue it is possible there could be zero wave fronts, this
+ *  can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
+ *  command
+ *
+ *  For each bit that is ON from above:
+ *
+ *    Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ *    number of waves that are in flight for the queue at specified index. The
+ *    index ranges from 0 to 7.
+ *
+ *    If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
+ *    of the wave(s).
+ *
+ *    Determine if VMID from above step maps to pasid provided as parameter. If
+ *    it matches agrregate the wave count. That the VMID will not match pasid is
+ *    a normal condition i.e. a device is expected to support multiple queues
+ *    from multiple proceses.
+ *
+ *  Reading registers referenced above involves programming GRBM appropriately
+ */
+static void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+               int *pasid_wave_cnt, int *max_waves_per_cu)
+{
+       int qidx;
+       int vmid;
+       int se_idx;
+       int sh_idx;
+       int se_cnt;
+       int sh_cnt;
+       int wave_cnt;
+       int queue_map;
+       int pasid_tmp;
+       int max_queue_cnt;
+       int vmid_wave_cnt = 0;
+       struct amdgpu_device *adev;
+       DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
+
+       adev = get_amdgpu_device(kgd);
+       lock_spi_csq_mutexes(adev);
+       soc15_grbm_select(adev, 1, 0, 0, 0);
+
+       /*
+        * Iterate through the shader engines and arrays of the device
+        * to get number of waves in flight
+        */
+       bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+                         KGD_MAX_QUEUES);
+       max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
+                       adev->gfx.mec.num_queue_per_pipe;
+       sh_cnt = adev->gfx.config.max_sh_per_se;
+       se_cnt = adev->gfx.config.max_shader_engines;
+       for (se_idx = 0; se_idx < se_cnt; se_idx++) {
+               for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+
+                       gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+                       queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
+                                          mmSPI_CSQ_WF_ACTIVE_STATUS));
+
+                       /*
+                        * Assumption: queue map encodes following schema: four
+                        * pipes per each micro-engine, with each pipe mapping
+                        * eight queues. This schema is true for GFX9 devices
+                        * and must be verified for newer device families
+                        */
+                       for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+
+                               /* Skip qeueus that are not associated with
+                                * compute functions
+                                */
+                               if (!test_bit(qidx, cp_queue_bitmap))
+                                       continue;
+
+                               if (!(queue_map & (1 << qidx)))
+                                       continue;
+
+                               /* Get number of waves in flight and aggregate them */
+                               get_wave_count(adev, qidx, &wave_cnt, &vmid);
+                               if (wave_cnt != 0) {
+                                       pasid_tmp =
+                                         RREG32(SOC15_REG_OFFSET(OSSSYS, 0,
+                                                mmIH_VMID_0_LUT) + vmid);
+                                       if (pasid_tmp == pasid)
+                                               vmid_wave_cnt += wave_cnt;
+                               }
+                       }
+               }
+       }
+
+       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       soc15_grbm_select(adev, 0, 0, 0, 0);
+       unlock_spi_csq_mutexes(adev);
+
+       /* Update the output parameters and return */
+       *pasid_wave_cnt = vmid_wave_cnt;
+       *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+                               adev->gfx.cu_info.max_waves_per_simd;
 }
 
 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
@@ -726,4 +897,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                        kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
 };
index 17c010d..b4df646 100644 (file)
@@ -543,6 +543,7 @@ int amdgpu_mem_train_support(struct amdgpu_device *adev)
                case HW_REV(11, 0, 0):
                case HW_REV(11, 0, 5):
                case HW_REV(11, 0, 7):
+               case HW_REV(11, 0, 11):
                        ret = 1;
                        break;
                default:
index 3e35a8f..7abe950 100644 (file)
@@ -616,7 +616,7 @@ static bool amdgpu_atpx_detect(void)
        while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
                vga_count++;
 
-               has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+               has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
 
                parent_pdev = pci_upstream_bridge(pdev);
                d3_supported |= parent_pdev && parent_pdev->bridge_d3;
@@ -626,7 +626,7 @@ static bool amdgpu_atpx_detect(void)
        while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
                vga_count++;
 
-               has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+               has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
 
                parent_pdev = pci_upstream_bridge(pdev);
                d3_supported |= parent_pdev && parent_pdev->bridge_d3;
index abe0c27..2d125b8 100644 (file)
@@ -267,7 +267,7 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
                } else {
                        r = get_user(value, (uint32_t *)buf);
                        if (!r)
-                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
+                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
                }
                if (r) {
                        result = r;
index f7307af..e8b4175 100644 (file)
@@ -80,8 +80,6 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -303,10 +301,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * MMIO register access helper functions.
+ * register access helper functions.
  */
 /**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a memory mapped IO or indirect register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -314,30 +312,29 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
-                       uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+                           uint32_t reg, uint32_t acc_flags)
 {
        uint32_t ret;
 
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
-           down_read_trylock(&adev->reset_sem)) {
-               ret = amdgpu_kiq_rreg(adev, reg);
-               up_read(&adev->reset_sem);
-               return ret;
-       }
-
-       if ((reg * 4) < adev->rmmio_size)
-               ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
+       if (adev->in_pci_err_recovery)
+               return 0;
 
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+       if ((reg * 4) < adev->rmmio_size) {
+               if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_sem)) {
+                       ret = amdgpu_kiq_rreg(adev, reg);
+                       up_read(&adev->reset_sem);
+               } else {
+                       ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               ret = adev->pcie_rreg(adev, reg * 4);
        }
 
-       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+
        return ret;
 }
 
@@ -355,7 +352,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  *
  * Returns the 8 bit value from the offset specified.
  */
-uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
+{
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (offset < adev->rmmio_size)
                return (readb(adev->rmmio + offset));
        BUG();
@@ -376,33 +377,19 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
+{
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (offset < adev->rmmio_size)
                writeb(value, adev->rmmio + offset);
        else
                BUG();
 }
 
-static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
-                                      uint32_t reg, uint32_t v,
-                                      uint32_t acc_flags)
-{
-       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
-       if ((reg * 4) < adev->rmmio_size)
-               writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-}
-
 /**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a memory mapped IO or indirect register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -411,17 +398,27 @@ static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
+                       uint32_t acc_flags)
 {
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
-           down_read_trylock(&adev->reset_sem)) {
-               amdgpu_kiq_wreg(adev, reg, v);
-               up_read(&adev->reset_sem);
+       if (adev->in_pci_err_recovery)
                return;
+
+       if ((reg * 4) < adev->rmmio_size) {
+               if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_sem)) {
+                       amdgpu_kiq_wreg(adev, reg, v);
+                       up_read(&adev->reset_sem);
+               } else {
+                       writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               adev->pcie_wreg(adev, reg * 4, v);
        }
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 }
 
 /*
@@ -429,18 +426,20 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  *
  * this function is invoked only the debugfs register access
  * */
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+                            uint32_t reg, uint32_t v)
 {
-       if (amdgpu_sriov_fullaccess(adev) &&
-               adev->gfx.rlc.funcs &&
-               adev->gfx.rlc.funcs->is_rlcg_access_range) {
+       if (adev->in_pci_err_recovery)
+               return;
 
+       if (amdgpu_sriov_fullaccess(adev) &&
+           adev->gfx.rlc.funcs &&
+           adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+       } else {
+               writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        }
-
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /**
@@ -453,6 +452,9 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
  */
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if ((reg * 4) < adev->rio_mem_size)
                return ioread32(adev->rio_mem + (reg * 4));
        else {
@@ -472,6 +474,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
  */
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if ((reg * 4) < adev->rio_mem_size)
                iowrite32(v, adev->rio_mem + (reg * 4));
        else {
@@ -491,6 +496,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  */
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return readl(adev->doorbell.ptr + index);
        } else {
@@ -511,6 +519,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                writel(v, adev->doorbell.ptr + index);
        } else {
@@ -529,6 +540,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
  */
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
        } else {
@@ -549,6 +563,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
        } else {
@@ -557,6 +574,135 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 }
 
 /**
+ * amdgpu_device_indirect_rreg - read an indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+                               u32 pcie_index, u32 pcie_data,
+                               u32 reg_addr)
+{
+       unsigned long flags;
+       u32 r;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       r = readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+       return r;
+}
+
+/**
+ * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+                                 u32 pcie_index, u32 pcie_data,
+                                 u32 reg_addr)
+{
+       unsigned long flags;
+       u64 r;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       /* read low 32 bits */
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       r = readl(pcie_data_offset);
+       /* read high 32 bits */
+       writel(reg_addr + 4, pcie_index_offset);
+       readl(pcie_index_offset);
+       r |= ((u64)readl(pcie_data_offset) << 32);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+       return r;
+}
+
+/**
+ * amdgpu_device_indirect_wreg - write an indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+                                u32 pcie_index, u32 pcie_data,
+                                u32 reg_addr, u32 reg_data)
+{
+       unsigned long flags;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel(reg_data, pcie_data_offset);
+       readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+                                  u32 pcie_index, u32 pcie_data,
+                                  u32 reg_addr, u64 reg_data)
+{
+       unsigned long flags;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       /* write low 32 bits */
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+       readl(pcie_data_offset);
+       /* write high 32 bits */
+       writel(reg_addr + 4, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel((u32)(reg_data >> 32), pcie_data_offset);
+       readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
  * amdgpu_invalid_rreg - dummy reg read function
  *
  * @adev: amdgpu device pointer
@@ -1224,11 +1370,15 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        amdgpu_gmc_tmz_set(adev);
 
-       if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
+       if (amdgpu_num_kcq == -1) {
+               amdgpu_num_kcq = 8;
+       } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
                amdgpu_num_kcq = 8;
                dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
        }
 
+       amdgpu_gmc_noretry_set(adev);
+
        return 0;
 }
 
@@ -1256,7 +1406,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
                pci_set_power_state(dev->pdev, PCI_D0);
-               pci_restore_state(dev->pdev);
+               amdgpu_device_load_pci_state(dev->pdev);
                r = pci_enable_device(dev->pdev);
                if (r)
                        DRM_WARN("pci_enable_device failed (%d)\n", r);
@@ -1269,7 +1419,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
                drm_kms_helper_poll_disable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                amdgpu_device_suspend(dev, true);
-               pci_save_state(dev->pdev);
+               amdgpu_device_cache_pci_state(dev->pdev);
                /* Shut down the device */
                pci_disable_device(dev->pdev);
                pci_set_power_state(dev->pdev, PCI_D3cold);
@@ -1631,6 +1781,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_VEGA20:
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
        default:
                return 0;
        case CHIP_VEGA10:
@@ -1662,12 +1814,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_NAVI12:
                chip_name = "navi12";
                break;
-       case CHIP_SIENNA_CICHLID:
-               chip_name = "sienna_cichlid";
-               break;
-       case CHIP_NAVY_FLOUNDER:
-               chip_name = "navy_flounder";
-               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -2999,6 +3145,7 @@ static const struct attribute *amdgpu_dev_attributes[] = {
        NULL
 };
 
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -3170,13 +3317,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
                dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
-               return r;
+               goto failed_unmap;
        }
 
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
-               return r;
+               goto failed_unmap;
 
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
@@ -3217,6 +3364,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                }
        }
 
+       pci_enable_pcie_error_reporting(adev->ddev.pdev);
+
        /* Post card if necessary */
        if (amdgpu_device_need_post(adev)) {
                if (!adev->bios) {
@@ -3359,16 +3508,18 @@ fence_driver_init:
                flush_delayed_work(&adev->delayed_init_work);
 
        r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
-       if (r) {
+       if (r)
                dev_err(adev->dev, "Could not create amdgpu device attr\n");
-               return r;
-       }
 
        if (IS_ENABLED(CONFIG_PERF_EVENTS))
                r = amdgpu_pmu_init(adev);
        if (r)
                dev_err(adev->dev, "amdgpu_pmu_init failed\n");
 
+       /* Have stored pci confspace at hand for restore in sudden PCI error */
+       if (amdgpu_device_cache_pci_state(adev->pdev))
+               pci_restore_state(pdev);
+
        return 0;
 
 failed:
@@ -3376,6 +3527,10 @@ failed:
        if (boco)
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
 
+failed_unmap:
+       iounmap(adev->rmmio);
+       adev->rmmio = NULL;
+
        return r;
 }
 
@@ -3393,11 +3548,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        flush_delayed_work(&adev->delayed_init_work);
        adev->shutdown = true;
 
+       kfree(adev->pci_state);
+
        /* make sure IB test finished before entering exclusive mode
         * to avoid preemption on IB test
         * */
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_request_full_gpu(adev, false);
+               amdgpu_virt_fini_data_exchange(adev);
+       }
 
        /* disable all interrupts */
        amdgpu_irq_disable_all(adev);
@@ -4031,6 +4190,11 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
        amdgpu_debugfs_wait_dump(adev);
 
+       if (amdgpu_sriov_vf(adev)) {
+               /* stop the data exchange thread */
+               amdgpu_virt_fini_data_exchange(adev);
+       }
+
        /* block all schedulers and reset given job's ring */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
@@ -4072,7 +4236,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                               struct list_head *device_list_handle,
-                              bool *need_full_reset_arg)
+                              bool *need_full_reset_arg,
+                              bool skip_hw_reset)
 {
        struct amdgpu_device *tmp_adev = NULL;
        bool need_full_reset = *need_full_reset_arg, vram_lost = false;
@@ -4082,7 +4247,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
         * ASIC reset has to be done on all HGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
         */
-       if (need_full_reset) {
+       if (!skip_hw_reset && need_full_reset) {
                list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
                        /* For XGMI run all resets in parallel to speed up the process */
                        if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -4477,7 +4642,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                if (r)
                        adev->asic_reset_res = r;
        } else {
-               r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+               r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
                if (r && r == -EAGAIN)
                        goto retry;
        }
@@ -4705,3 +4870,235 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
 
        return 0;
 }
+
+static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (!ring || !ring->sched.thread)
+                       continue;
+
+               cancel_delayed_work_sync(&ring->sched.work_tdr);
+       }
+}
+
+/**
+ * amdgpu_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
+ */
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int i;
+
+       DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+
+       if (adev->gmc.xgmi.num_physical_nodes > 1) {
+               DRM_WARN("No support for XGMI hive yet...");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       switch (state) {
+       case pci_channel_io_normal:
+               return PCI_ERS_RESULT_CAN_RECOVER;
+       /* Fatal error, prepare for slot reset */
+       case pci_channel_io_frozen:             
+               /*              
+                * Cancel and wait for all TDRs in progress if failing to
+                * set  adev->in_gpu_reset in amdgpu_device_lock_adev
+                *
+                * Locking adev->reset_sem will prevent any external access
+                * to GPU during PCI error recovery
+                */
+               while (!amdgpu_device_lock_adev(adev, NULL))
+                       amdgpu_cancel_all_tdr(adev);
+
+               /*
+                * Block any work scheduling as we do for regular GPU reset
+                * for the duration of the recovery
+                */
+               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+                       struct amdgpu_ring *ring = adev->rings[i];
+
+                       if (!ring || !ring->sched.thread)
+                               continue;
+
+                       drm_sched_stop(&ring->sched, NULL);
+               }
+               return PCI_ERS_RESULT_NEED_RESET;
+       case pci_channel_io_perm_failure:
+               /* Permanent error, prepare for device removal */
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
+{
+
+       DRM_INFO("PCI error: mmio enabled callback!!\n");
+
+       /* TODO - dump whatever for debugging purposes */
+
+       /* This called only if amdgpu_pci_error_detected returns
+        * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+        * works, no need to reset slot.
+        */
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int r, i;
+       bool need_full_reset = true;
+       u32 memsize;
+       struct list_head device_list;
+
+       DRM_INFO("PCI error: slot reset callback!!\n");
+
+       INIT_LIST_HEAD(&device_list);
+       list_add_tail(&adev->gmc.xgmi.head, &device_list);
+
+       /* wait for asic to come out of reset */
+       msleep(500);
+
+       /* Restore PCI confspace */
+       amdgpu_device_load_pci_state(pdev);
+
+       /* confirm  ASIC came out of reset */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               memsize = amdgpu_asic_get_config_memsize(adev);
+
+               if (memsize != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+       if (memsize == 0xffffffff) {
+               r = -ETIME;
+               goto out;
+       }
+
+       adev->in_pci_err_recovery = true;       
+       r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+       adev->in_pci_err_recovery = false;
+       if (r)
+               goto out;
+
+       r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+
+out:
+       if (!r) {
+               if (amdgpu_device_cache_pci_state(adev->pdev))
+                       pci_restore_state(adev->pdev);
+
+               DRM_INFO("PCIe error recovery succeeded\n");
+       } else {
+               DRM_ERROR("PCIe error recovery failed, err:%d", r);
+               amdgpu_device_unlock_adev(adev);
+       }
+
+       return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+void amdgpu_pci_resume(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int i;
+
+
+       DRM_INFO("PCI error: resume callback!!\n");
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (!ring || !ring->sched.thread)
+                       continue;
+
+
+               drm_sched_resubmit_jobs(&ring->sched);
+               drm_sched_start(&ring->sched, true);
+       }
+
+       amdgpu_device_unlock_adev(adev);
+}
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int r;
+
+       r = pci_save_state(pdev);
+       if (!r) {
+               kfree(adev->pci_state);
+
+               adev->pci_state = pci_store_saved_state(pdev);
+
+               if (!adev->pci_state) {
+                       DRM_ERROR("Failed to store PCI saved state");
+                       return false;
+               }
+       } else {
+               DRM_WARN("Failed to save PCI state, err:%d\n", r);
+               return false;
+       }
+
+       return true;
+}
+
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int r;
+
+       if (!adev->pci_state)
+               return false;
+
+       r = pci_load_saved_state(pdev, adev->pci_state);
+
+       if (!r) {
+               pci_restore_state(pdev);
+       } else {
+               DRM_WARN("Failed to load PCI state, err:%d\n", r);
+               return false;
+       }
+
+       return true;
+}
+
+
index 61a26c1..373cdeb 100644 (file)
@@ -44,9 +44,9 @@ struct amdgpu_df_funcs {
        void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
                                            bool enable);
        int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
-                                        int is_enable);
+                                        int is_add);
        int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
-                                        int is_disable);
+                                        int is_remove);
        void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
                                         uint64_t *count);
        uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
index c81206e..7cc7af2 100644 (file)
@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
           take the current one */
        if (active && !adev->have_disp_power_ref) {
                adev->have_disp_power_ref = true;
-               goto out;
+               return ret;
        }
        /* if we have no active crtcs, then drop the power ref
           we got before */
index 3ded6f4..c241317 100644 (file)
@@ -32,7 +32,6 @@
 #include <drm/drm_pciids.h>
 #include <linux/console.h>
 #include <linux/module.h>
-#include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_probe_helper.h>
@@ -148,7 +147,7 @@ int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
-int amdgpu_noretry;
+int amdgpu_noretry = -1;
 int amdgpu_force_asic_type = -1;
 int amdgpu_tmz = 0;
 int amdgpu_reset_method = -1; /* auto */
@@ -597,8 +596,13 @@ MODULE_PARM_DESC(mes,
        "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
 module_param_named(mes, amdgpu_mes, int, 0444);
 
+/**
+ * DOC: noretry (int)
+ * Disable retry faults in the GPU memory controller.
+ * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
+ */
 MODULE_PARM_DESC(noretry,
-       "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
+       "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
 module_param_named(noretry, amdgpu_noretry, int, 0644);
 
 /**
@@ -1073,8 +1077,16 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
-       {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+       {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+
+       /* Sienna_Cichlid */
+       {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 
        {0, 0, 0}
 };
@@ -1102,6 +1114,16 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                return -ENODEV;
        }
 
+       /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
+        * however, SME requires an indirect IOMMU mapping because the encryption
+        * bit is beyond the DMA mask of the chip.
+        */
+       if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+               dev_info(&pdev->dev,
+                        "SME is not compatible with RAVEN\n");
+               return -ENOTSUPP;
+       }
+
 #ifdef CONFIG_DRM_AMDGPU_SI
        if (!amdgpu_si_support) {
                switch (flags & AMD_ASIC_MASK) {
@@ -1308,7 +1330,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                if (amdgpu_is_atpx_hybrid()) {
                        pci_ignore_hotplug(pdev);
                } else {
-                       pci_save_state(pdev);
+                       amdgpu_device_cache_pci_state(pdev);
                        pci_disable_device(pdev);
                        pci_ignore_hotplug(pdev);
                        pci_set_power_state(pdev, PCI_D3cold);
@@ -1341,7 +1363,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
                        pci_set_master(pdev);
                } else {
                        pci_set_power_state(pdev, PCI_D0);
-                       pci_restore_state(pdev);
+                       amdgpu_device_load_pci_state(pdev);
                        ret = pci_enable_device(pdev);
                        if (ret)
                                return ret;
@@ -1520,6 +1542,13 @@ static struct drm_driver kms_driver = {
        .patchlevel = KMS_DRIVER_PATCHLEVEL,
 };
 
+static struct pci_error_handlers amdgpu_pci_err_handler = {
+       .error_detected = amdgpu_pci_error_detected,
+       .mmio_enabled   = amdgpu_pci_mmio_enabled,
+       .slot_reset     = amdgpu_pci_slot_reset,
+       .resume         = amdgpu_pci_resume,
+};
+
 static struct pci_driver amdgpu_kms_pci_driver = {
        .name = DRIVER_NAME,
        .id_table = pciidlist,
@@ -1527,6 +1556,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
        .remove = amdgpu_pci_remove,
        .shutdown = amdgpu_pci_shutdown,
        .driver.pm = &amdgpu_pm_ops,
+       .err_handler = &amdgpu_pci_err_handler,
 };
 
 static int __init amdgpu_init(void)
index e811fec..8f4a8f8 100644 (file)
 
 static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
 {
-       /* TODO: Gaming SKUs don't have the FRU EEPROM.
-        * Use this hack to address hangs on modprobe on gaming SKUs
-        * until a proper solution can be implemented by only supporting
-        * the explicit chip IDs for VG20 Server cards
-        *
-        * TODO: Add list of supported Arcturus DIDs once confirmed
+       /* Only server cards have the FRU EEPROM
+        * TODO: See if we can figure this out dynamically instead of
+        * having to parse VBIOS versions.
         */
-       if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
-           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
-           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
-               return true;
-       return false;
+       struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+       /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+        * we can use just the "DXXX" portion. If there were more models, we
+        * could convert the 3 characters to a hex integer and use a switch
+        * for ease/speed/readability. For now, 2 string comparisons are
+        * reasonable and not too expensive
+        */
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+               /* D161 and D163 are the VG20 server SKUs */
+               if (strnstr(atom_ctx->vbios_version, "D161",
+                           sizeof(atom_ctx->vbios_version)) ||
+                   strnstr(atom_ctx->vbios_version, "D163",
+                           sizeof(atom_ctx->vbios_version)))
+                       return true;
+               else
+                       return false;
+       default:
+               return false;
+       }
 }
 
 static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
index f29a861..1308d97 100644 (file)
@@ -26,4 +26,4 @@
 
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
 
-#endif  // __AMDGPU_PRODINFO_H__
+#endif  // __AMDGPU_FRU_EEPROM_H__
index d698142..8c9bacf 100644 (file)
@@ -693,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -757,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        BUG_ON(!ring->funcs->emit_wreg);
 
+       if (adev->in_pci_err_recovery)
+               return;
+
        spin_lock_irqsave(&kiq->ring_lock, flags);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
index a611e78..258498c 100644 (file)
@@ -217,6 +217,7 @@ struct amdgpu_gfx_funcs {
        int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
        void (*reset_ras_error_count) (struct amdgpu_device *adev);
        void (*init_spm_golden)(struct amdgpu_device *adev);
+       void (*query_ras_error_status) (struct amdgpu_device *adev);
 };
 
 struct sq_work {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644 (file)
index 0000000..66ebc2e
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GFXHUB_H__
+#define __AMDGPU_GFXHUB_H__
+
+struct amdgpu_gfxhub_funcs {
+       u64 (*get_fb_location)(struct amdgpu_device *adev);
+       u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+       void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+                       uint64_t page_table_base);
+       int (*gart_enable)(struct amdgpu_device *adev);
+
+       void (*gart_disable)(struct amdgpu_device *adev);
+       void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
+       void (*init)(struct amdgpu_device *adev);
+       int (*get_xgmi_info)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_gfxhub {
+       const struct amdgpu_gfxhub_funcs *funcs;
+};
+
+#endif
index 213ef09..36604d7 100644 (file)
@@ -413,6 +413,44 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        }
 }
 
+/**
+ * amdgpu_noretry_set -- set per asic noretry defaults
+ * @adev: amdgpu_device pointer
+ *
+ * Set a per asic default for the no-retry parameter.
+ *
+ */
+void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+{
+       struct amdgpu_gmc *gmc = &adev->gmc;
+
+       switch (adev->asic_type) {
+       case CHIP_RAVEN:
+               /* Raven currently has issues with noretry
+                * regardless of what we decide for other
+                * asics, we should leave raven with
+                * noretry = 0 until we root cause the
+                * issues.
+                */
+               if (amdgpu_noretry == -1)
+                       gmc->noretry = 0;
+               else
+                       gmc->noretry = amdgpu_noretry;
+               break;
+       default:
+               /* default this to 0 for now, but we may want
+                * to change this in the future for certain
+                * GPUs as it can increase performance in
+                * certain cases.
+                */
+               if (amdgpu_noretry == -1)
+                       gmc->noretry = 0;
+               else
+                       gmc->noretry = amdgpu_noretry;
+               break;
+       }
+}
+
 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
                                   bool enable)
 {
index d61bbde..aa0c837 100644 (file)
@@ -239,6 +239,7 @@ struct amdgpu_gmc {
 
        struct amdgpu_xgmi xgmi;
        struct amdgpu_irq_src   ecc_irq;
+       int noretry;
 };
 
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -300,6 +301,7 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 
 extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
 
 extern void
 amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
index 456a4a9..a5aaff1 100644 (file)
@@ -177,7 +177,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                        break;
                case CHIP_VEGA10:
                        /* turn runpm on if noretry=0 */
-                       if (!amdgpu_noretry)
+                       if (!adev->gmc.noretry)
                                adev->runpm = true;
                        break;
                default:
@@ -282,14 +282,25 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                fw_info->feature = 0;
                break;
        case AMDGPU_INFO_FW_TA:
-               if (query_fw->index > 1)
-                       return -EINVAL;
-               if (query_fw->index == 0) {
+               switch (query_fw->index) {
+               case 0:
                        fw_info->ver = adev->psp.ta_fw_version;
                        fw_info->feature = adev->psp.ta_xgmi_ucode_version;
-               } else {
+                       break;
+               case 1:
                        fw_info->ver = adev->psp.ta_fw_version;
                        fw_info->feature = adev->psp.ta_ras_ucode_version;
+                       break;
+               case 2:
+                       fw_info->ver = adev->psp.ta_fw_version;
+                       fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+                       break;
+               case 3:
+                       fw_info->ver = adev->psp.ta_fw_version;
+                       fw_info->feature = adev->psp.ta_dtm_ucode_version;
+                       break;
+               default:
+                       return -EINVAL;
                }
                break;
        case AMDGPU_INFO_FW_SDMA:
@@ -1385,13 +1396,31 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
                   fw_info.feature, fw_info.ver);
 
        query_fw.fw_type = AMDGPU_INFO_FW_TA;
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < 4; i++) {
                query_fw.index = i;
                ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
                if (ret)
                        continue;
-               seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
-                               i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+               switch (query_fw.index) {
+               case 0:
+                       seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+                                       "RAS", fw_info.feature, fw_info.ver);
+                       break;
+               case 1:
+                       seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+                                       "XGMI", fw_info.feature, fw_info.ver);
+                       break;
+               case 2:
+                       seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+                                       "HDCP", fw_info.feature, fw_info.ver);
+                       break;
+               case 3:
+                       seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+                                       "DTM", fw_info.feature, fw_info.ver);
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        /* SMC */
index 0c43d7f..1ae9bda 100644 (file)
@@ -40,6 +40,7 @@ struct amdgpu_mmhub_funcs {
                                uint64_t page_table_base);
        void (*update_power_gating)(struct amdgpu_device *adev,
                                 bool enable);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_mmhub {
index 04a430e..a04decb 100644 (file)
@@ -46,6 +46,7 @@
 
 #include <drm/drm_dp_mst_helper.h>
 #include "modules/inc/mod_freesync.h"
+#include "amdgpu_dm_irq_params.h"
 
 struct amdgpu_bo;
 struct amdgpu_device;
@@ -404,7 +405,8 @@ struct amdgpu_crtc {
        struct amdgpu_flip_work *pflip_works;
        enum amdgpu_flip_status pflip_status;
        int deferred_flip_completion;
-       u32 last_flip_vblank;
+       /* parameters access from DM IRQ handler */
+       struct dm_irq_params dm_irq_params;
        /* pll sharing */
        struct amdgpu_atom_ss ss;
        bool ss_enabled;
index d6c38e2..18be544 100644 (file)
@@ -161,10 +161,12 @@ static int psp_sw_init(void *handle)
        struct psp_context *psp = &adev->psp;
        int ret;
 
-       ret = psp_init_microcode(psp);
-       if (ret) {
-               DRM_ERROR("Failed to load psp firmware!\n");
-               return ret;
+       if (!amdgpu_sriov_vf(adev)) {
+               ret = psp_init_microcode(psp);
+               if (ret) {
+                       DRM_ERROR("Failed to load psp firmware!\n");
+                       return ret;
+               }
        }
 
        ret = psp_memory_training_init(psp);
@@ -178,7 +180,7 @@ static int psp_sw_init(void *handle)
                return ret;
        }
 
-       if (adev->asic_type == CHIP_NAVI10) {
+       if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
                ret= psp_sysfs_init(adev);
                if (ret) {
                        return ret;
@@ -219,6 +221,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
        int i;
        struct amdgpu_device *adev = psp->adev;
 
+       if (psp->adev->in_pci_err_recovery)
+               return 0;
+
        for (i = 0; i < adev->usec_timeout; i++) {
                val = RREG32(reg_index);
                if (check_changed) {
@@ -245,6 +250,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
        bool ras_intr = false;
        bool skip_unsupport = false;
 
+       if (psp->adev->in_pci_err_recovery)
+               return 0;
+
        mutex_lock(&psp->mutex);
 
        memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -929,6 +937,7 @@ static int psp_ras_load(struct psp_context *psp)
 {
        int ret;
        struct psp_gfx_cmd_resp *cmd;
+       struct ta_ras_shared_memory *ras_cmd;
 
        /*
         * TODO: bypass the loading in sriov for now
@@ -952,11 +961,20 @@ static int psp_ras_load(struct psp_context *psp)
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
 
+       ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf;
+
        if (!ret) {
-               psp->ras.ras_initialized = true;
                psp->ras.session_id = cmd->resp.session_id;
+
+               if (!ras_cmd->ras_status)
+                       psp->ras.ras_initialized = true;
+               else
+                       dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
        }
 
+       if (ret || ras_cmd->ras_status)
+               amdgpu_ras_fini(psp->adev);
+
        kfree(cmd);
 
        return ret;
index e5ea147..8bf6a7c 100644 (file)
@@ -1027,58 +1027,6 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
        return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
 }
 
-static void amdgpu_ras_sysfs_add_bad_page_node(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       struct attribute_group group;
-       struct bin_attribute *bin_attrs[] = {
-               &con->badpages_attr,
-               NULL,
-       };
-
-       con->badpages_attr = (struct bin_attribute) {
-               .attr = {
-                       .name = "gpu_vram_bad_pages",
-                       .mode = S_IRUGO,
-               },
-               .size = 0,
-               .private = NULL,
-               .read = amdgpu_ras_sysfs_badpages_read,
-       };
-
-       group.name = RAS_FS_NAME;
-       group.bin_attrs = bin_attrs;
-
-       sysfs_bin_attr_init(bin_attrs[0]);
-
-       sysfs_update_group(&adev->dev->kobj, &group);
-}
-
-static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       struct attribute *attrs[] = {
-               &con->features_attr.attr,
-               NULL
-       };
-       struct attribute_group group = {
-               .name = RAS_FS_NAME,
-               .attrs = attrs,
-       };
-
-       con->features_attr = (struct device_attribute) {
-               .attr = {
-                       .name = "features",
-                       .mode = S_IRUGO,
-               },
-                       .show = amdgpu_ras_sysfs_features_read,
-       };
-
-       sysfs_attr_init(attrs[0]);
-
-       return sysfs_create_group(&adev->dev->kobj, &group);
-}
-
 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1300,13 +1248,43 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
 /* debugfs end */
 
 /* ras fs */
-
+static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+               amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static DEVICE_ATTR(features, S_IRUGO,
+               amdgpu_ras_sysfs_features_read, NULL);
 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
 {
-       amdgpu_ras_sysfs_create_feature_node(adev);
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       struct attribute_group group = {
+               .name = RAS_FS_NAME,
+       };
+       struct attribute *attrs[] = {
+               &con->features_attr.attr,
+               NULL
+       };
+       struct bin_attribute *bin_attrs[] = {
+               NULL,
+               NULL,
+       };
+       int r;
 
-       if (amdgpu_bad_page_threshold != 0)
-               amdgpu_ras_sysfs_add_bad_page_node(adev);
+       /* add features entry */
+       con->features_attr = dev_attr_features;
+       group.attrs = attrs;
+       sysfs_attr_init(attrs[0]);
+
+       if (amdgpu_bad_page_threshold != 0) {
+               /* add bad_page_features entry */
+               bin_attr_gpu_vram_bad_pages.private = NULL;
+               con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+               bin_attrs[0] = &con->badpages_attr;
+               group.bin_attrs = bin_attrs;
+               sysfs_bin_attr_init(bin_attrs[0]);
+       }
+
+       r = sysfs_create_group(&adev->dev->kobj, &group);
+       if (r)
+               dev_err(adev->dev, "Failed to create RAS sysfs group!");
 
        return 0;
 }
@@ -1498,6 +1476,45 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
        }
 }
 
+/* Parse RdRspStatus and WrRspStatus */
+void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
+               struct ras_query_if *info)
+{
+       /*
+        * Only two block need to query read/write
+        * RspStatus at current state
+        */
+       switch (info->head.block) {
+       case AMDGPU_RAS_BLOCK__GFX:
+               if (adev->gfx.funcs->query_ras_error_status)
+                       adev->gfx.funcs->query_ras_error_status(adev);
+               break;
+       case AMDGPU_RAS_BLOCK__MMHUB:
+               if (adev->mmhub.funcs->query_ras_error_status)
+                       adev->mmhub.funcs->query_ras_error_status(adev);
+               break;
+       default:
+               break;
+       }
+}
+
+static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
+{
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       struct ras_manager *obj;
+
+       if (!con)
+               return;
+
+       list_for_each_entry(obj, &con->head, node) {
+               struct ras_query_if info = {
+                       .head = obj->head,
+               };
+
+               amdgpu_ras_error_status_query(adev, &info);
+       }
+}
+
 /* recovery begin */
 
 /* return 0 on success.
@@ -1568,8 +1585,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
                }
 
                list_for_each_entry(remote_adev,
-                               device_list_handle, gmc.xgmi.head)
+                               device_list_handle, gmc.xgmi.head) {
+                       amdgpu_ras_query_err_status(remote_adev);
                        amdgpu_ras_log_on_err_counter(remote_adev);
+               }
 
                amdgpu_put_xgmi_hive(hive);
        }
@@ -1967,8 +1986,7 @@ static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
 {
        if (adev->asic_type != CHIP_VEGA10 &&
                adev->asic_type != CHIP_VEGA20 &&
-               adev->asic_type != CHIP_ARCTURUS &&
-               adev->asic_type != CHIP_SIENNA_CICHLID)
+               adev->asic_type != CHIP_ARCTURUS)
                return 1;
        else
                return 0;
@@ -2012,6 +2030,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
 
        *supported = amdgpu_ras_enable == 0 ?
                        0 : *hw_supported & amdgpu_ras_mask;
+
        adev->ras_features = *supported;
 }
 
index 63e734a..ee9480d 100644 (file)
@@ -35,7 +35,7 @@
 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
         job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
 
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
                      (unsigned long)__entry->value)
 );
 
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -321,6 +321,49 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
            TP_ARGS(mapping)
 );
 
+TRACE_EVENT(amdgpu_vm_update_ptes,
+           TP_PROTO(struct amdgpu_vm_update_params *p,
+                    uint64_t start, uint64_t end,
+                    unsigned int nptes, uint64_t dst,
+                    uint64_t incr, uint64_t flags,
+                    pid_t pid, uint64_t vm_ctx),
+       TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
+       TP_STRUCT__entry(
+                        __field(u64, start)
+                        __field(u64, end)
+                        __field(u64, flags)
+                        __field(unsigned int, nptes)
+                        __field(u64, incr)
+                        __field(pid_t, pid)
+                        __field(u64, vm_ctx)
+                        __dynamic_array(u64, dst, nptes)
+       ),
+
+       TP_fast_assign(
+                       unsigned int i;
+
+                       __entry->start = start;
+                       __entry->end = end;
+                       __entry->flags = flags;
+                       __entry->incr = incr;
+                       __entry->nptes = nptes;
+                       __entry->pid = pid;
+                       __entry->vm_ctx = vm_ctx;
+                       for (i = 0; i < nptes; ++i) {
+                               u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
+                                       p->pages_addr, dst) : dst;
+
+                               ((u64 *)__get_dynamic_array(dst))[i] = addr;
+                               dst += incr;
+                       }
+       ),
+       TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
+                 " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+                 __entry->vm_ctx, __entry->start, __entry->end,
+                 __entry->flags, __entry->incr,  __print_array(
+                 __get_dynamic_array(dst), __entry->nptes, 8))
+);
+
 TRACE_EVENT(amdgpu_vm_set_ptes,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint64_t flags, bool direct),
index e7b67dc..8039d23 100644 (file)
@@ -1016,6 +1016,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 
 release_sg:
        kfree(ttm->sg);
+       ttm->sg = NULL;
        return r;
 }
 
@@ -1159,7 +1160,12 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
 }
 
 /**
- * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
+ * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
+ * through AGP or GART aperture.
+ *
+ * If bo is accessible through AGP aperture, then use AGP aperture
+ * to access bo; otherwise allocate logical space in GART aperture
+ * and map bo to GART aperture.
  */
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 {
index 495c3d7..f3b7287 100644 (file)
@@ -68,6 +68,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
        mutex_init(&adev->vcn.vcn_pg_lock);
+       mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
        atomic_set(&adev->vcn.total_submission_cnt, 0);
        for (i = 0; i < adev->vcn.num_vcn_inst; i++)
                atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -237,6 +238,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        }
 
        release_firmware(adev->vcn.fw);
+       mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
        mutex_destroy(&adev->vcn.vcn_pg_lock);
 
        return 0;
index 7a9b804..1769115 100644 (file)
@@ -220,6 +220,7 @@ struct amdgpu_vcn {
        struct amdgpu_vcn_inst   inst[AMDGPU_MAX_VCN_INSTANCES];
        struct amdgpu_vcn_reg    internal;
        struct mutex             vcn_pg_lock;
+       struct mutex            vcn1_jpeg1_workaround;
        atomic_t                 total_submission_cnt;
 
        unsigned        harvest_config;
index f76961d..d0aea5e 100644 (file)
 #include "soc15.h"
 #include "nv.h"
 
+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+       do { \
+               vf2pf_info->ucode_info[ucode].id = ucode; \
+               vf2pf_info->ucode_info[ucode].version = ver; \
+       } while (0)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 {
        /* By now all MMIO pages except mailbox are blocked */
@@ -239,10 +245,10 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
 }
 
 
-int amdgpu_virt_fw_reserve_get_checksum(void *obj,
-                                       unsigned long obj_size,
-                                       unsigned int key,
-                                       unsigned int chksum)
+unsigned int amd_sriov_msg_checksum(void *obj,
+                               unsigned long obj_size,
+                               unsigned int key,
+                               unsigned int checksum)
 {
        unsigned int ret = key;
        unsigned long i = 0;
@@ -252,9 +258,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
        /* calculate checksum */
        for (i = 0; i < obj_size; ++i)
                ret += *(pos + i);
-       /* minus the chksum itself */
-       pos = (char *)&chksum;
-       for (i = 0; i < sizeof(chksum); ++i)
+       /* minus the checksum itself */
+       pos = (char *)&checksum;
+       for (i = 0; i < sizeof(checksum); ++i)
                ret -= *(pos + i);
        return ret;
 }
@@ -415,33 +421,188 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
        }
 }
 
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
 {
-       uint32_t pf2vf_size = 0;
-       uint32_t checksum = 0;
+       struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
+       uint32_t checksum;
        uint32_t checkval;
-       char *str;
+
+       if (adev->virt.fw_reserve.p_pf2vf == NULL)
+               return -EINVAL;
+
+       if (pf2vf_info->size > 1024) {
+               DRM_ERROR("invalid pf2vf message size\n");
+               return -EINVAL;
+       }
+
+       switch (pf2vf_info->version) {
+       case 1:
+               checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+               checkval = amd_sriov_msg_checksum(
+                       adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+                       adev->virt.fw_reserve.checksum_key, checksum);
+               if (checksum != checkval) {
+                       DRM_ERROR("invalid pf2vf message\n");
+                       return -EINVAL;
+               }
+
+               adev->virt.gim_feature =
+                       ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+               break;
+       case 2:
+               /* TODO: missing key, need to add it later */
+               checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+               checkval = amd_sriov_msg_checksum(
+                       adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+                       0, checksum);
+               if (checksum != checkval) {
+                       DRM_ERROR("invalid pf2vf message\n");
+                       return -EINVAL;
+               }
+
+               adev->virt.vf2pf_update_interval_ms =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+               adev->virt.gim_feature =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+
+               break;
+       default:
+               DRM_ERROR("invalid pf2vf version\n");
+               return -EINVAL;
+       }
+
+       /* correct too large or too little interval value */
+       if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+               adev->virt.vf2pf_update_interval_ms = 2000;
+
+       return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
+{
+       struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+       vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+       if (adev->virt.fw_reserve.p_vf2pf == NULL)
+               return;
+
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ta_ras_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.ta_xgmi_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
+}
+
+static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+{
+       struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+       struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+
+       vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+       if (adev->virt.fw_reserve.p_vf2pf == NULL)
+               return -EINVAL;
+
+       memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
+
+       vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
+       vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
+
+#ifdef MODULE
+       if (THIS_MODULE->version != NULL)
+               strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
+       else
+#endif
+               strcpy(vf2pf_info->driver_version, "N/A");
+
+       vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
+       vf2pf_info->driver_cert = 0;
+       vf2pf_info->os_info.all = 0;
+
+       vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
+       vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+       vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
+       vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
+
+       amdgpu_virt_populate_vf2pf_ucode_info(adev);
+
+       /* TODO: read dynamic info */
+       vf2pf_info->gfx_usage = 0;
+       vf2pf_info->compute_usage = 0;
+       vf2pf_info->encode_usage = 0;
+       vf2pf_info->decode_usage = 0;
+
+       vf2pf_info->checksum =
+               amd_sriov_msg_checksum(
+               vf2pf_info, vf2pf_info->header.size, 0, 0);
+
+       return 0;
+}
+
+void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+{
+       struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+
+       amdgpu_virt_read_pf2vf_data(adev);
+       amdgpu_virt_write_vf2pf_data(adev);
+
+       schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+}
+
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
+{
+       if (adev->virt.vf2pf_update_interval_ms != 0) {
+               DRM_INFO("clean up the vf2pf work item\n");
+               flush_delayed_work(&adev->virt.vf2pf_work);
+               cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+       }
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
        uint64_t bp_block_offset = 0;
        uint32_t bp_block_size = 0;
-       struct amdgim_pf2vf_info_v2 *pf2vf_v2 = NULL;
+       struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
 
        adev->virt.fw_reserve.p_pf2vf = NULL;
        adev->virt.fw_reserve.p_vf2pf = NULL;
+       adev->virt.vf2pf_update_interval_ms = 0;
 
        if (adev->mman.fw_vram_usage_va != NULL) {
+               adev->virt.vf2pf_update_interval_ms = 2000;
+
                adev->virt.fw_reserve.p_pf2vf =
-                       (struct amd_sriov_msg_pf2vf_info_header *)(
-                       adev->mman.fw_vram_usage_va + AMDGIM_DATAEXCHANGE_OFFSET);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
-
-               /* pf2vf message must be in 4K */
-               if (pf2vf_size > 0 && pf2vf_size < 4096) {
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
-                               pf2vf_v2 = (struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf;
-                               bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_L & 0xFFFFFFFF) |
-                                               ((((uint64_t)pf2vf_v2->bp_block_offset_H) << 32) & 0xFFFFFFFF00000000);
+                       (struct amd_sriov_msg_pf2vf_info_header *)
+                       (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+               adev->virt.fw_reserve.p_vf2pf =
+                       (struct amd_sriov_msg_vf2pf_info_header *)
+                       (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+
+               amdgpu_virt_read_pf2vf_data(adev);
+               amdgpu_virt_write_vf2pf_data(adev);
+
+               /* bad page handling for version 2 */
+               if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
+                               pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+
+                               bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+                                               ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
                                bp_block_size = pf2vf_v2->bp_block_size;
 
                                if (bp_block_size && !adev->virt.ras_init_done)
@@ -450,37 +611,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                                if (adev->virt.ras_init_done)
                                        amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
                        }
+       }
 
-                       checkval = amdgpu_virt_fw_reserve_get_checksum(
-                               adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
-                               adev->virt.fw_reserve.checksum_key, checksum);
-                       if (checkval == checksum) {
-                               adev->virt.fw_reserve.p_vf2pf =
-                                       ((void *)adev->virt.fw_reserve.p_pf2vf +
-                                       pf2vf_size);
-                               memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
-                                       sizeof(amdgim_vf2pf_info));
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
-                                       AMDGPU_FW_VRAM_VF2PF_VER);
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
-                                       sizeof(amdgim_vf2pf_info));
-                               AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
-                                       &str);
-#ifdef MODULE
-                               if (THIS_MODULE->version != NULL)
-                                       strcpy(str, THIS_MODULE->version);
-                               else
-#endif
-                                       strcpy(str, "N/A");
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
-                                       0);
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
-                                       amdgpu_virt_fw_reserve_get_checksum(
-                                       adev->virt.fw_reserve.p_vf2pf,
-                                       pf2vf_size,
-                                       adev->virt.fw_reserve.checksum_key, 0));
-                       }
-               }
+       if (adev->virt.vf2pf_update_interval_ms != 0) {
+               INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+               schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
        }
 }
 
index b2046c3..8dd624c 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef AMDGPU_VIRT_H
 #define AMDGPU_VIRT_H
 
+#include "amdgv_sriovmsg.h"
+
 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
 #define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
@@ -79,7 +81,10 @@ struct amdgpu_virt_fw_reserve {
        struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
        unsigned int checksum_key;
 };
+
 /*
+ * Legacy GIM header
+ *
  * Defination between PF and VF
  * Structures forcibly aligned to 4 to keep the same style as PF.
  */
@@ -101,15 +106,7 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
 };
 
-struct amd_sriov_msg_pf2vf_info_header {
-       /* the total structure size in byte. */
-       uint32_t size;
-       /* version of this structure, written by the GIM */
-       uint32_t version;
-       /* reserved */
-       uint32_t reserved[2];
-} __aligned(4);
-struct  amdgim_pf2vf_info_v1 {
+struct amdgim_pf2vf_info_v1 {
        /* header contains size and version */
        struct amd_sriov_msg_pf2vf_info_header header;
        /* max_width * max_height */
@@ -128,54 +125,6 @@ struct  amdgim_pf2vf_info_v1 {
        unsigned int checksum;
 } __aligned(4);
 
-struct  amdgim_pf2vf_info_v2 {
-       /* header contains size and version */
-       struct amd_sriov_msg_pf2vf_info_header header;
-       /* use private key from mailbox 2 to create chueksum */
-       uint32_t checksum;
-       /* The features flags of the GIM driver supports. */
-       uint32_t feature_flags;
-       /* max_width * max_height */
-       uint32_t uvd_enc_max_pixels_count;
-       /* 16x16 pixels/sec, codec independent */
-       uint32_t uvd_enc_max_bandwidth;
-       /* max_width * max_height */
-       uint32_t vce_enc_max_pixels_count;
-       /* 16x16 pixels/sec, codec independent */
-       uint32_t vce_enc_max_bandwidth;
-       /* Bad pages block position in BYTE */
-       uint32_t bp_block_offset_L;
-       uint32_t bp_block_offset_H;
-       /* Bad pages block size in BYTE */
-       uint32_t bp_block_size;
-       /* MEC FW position in kb from the start of VF visible frame buffer */
-       uint32_t mecfw_kboffset_L;
-       uint32_t mecfw_kboffset_H;
-       /* MEC FW size in KB */
-       uint32_t mecfw_ksize;
-       /* UVD FW position in kb from the start of VF visible frame buffer */
-       uint32_t uvdfw_kboffset_L;
-       uint32_t uvdfw_kboffset_H;
-       /* UVD FW size in KB */
-       uint32_t uvdfw_ksize;
-       /* VCE FW position in kb from the start of VF visible frame buffer */
-       uint32_t vcefw_kboffset_L;
-       uint32_t vcefw_kboffset_H;
-       /* VCE FW size in KB */
-       uint32_t vcefw_ksize;
-       uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (18 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 0)];
-} __aligned(4);
-
-
-struct amd_sriov_msg_vf2pf_info_header {
-       /* the total structure size in byte. */
-       uint32_t size;
-       /*version of this structure, written by the guest */
-       uint32_t version;
-       /* reserved */
-       uint32_t reserved[2];
-} __aligned(4);
-
 struct amdgim_vf2pf_info_v1 {
        /* header contains size and version */
        struct amd_sriov_msg_vf2pf_info_header header;
@@ -237,31 +186,6 @@ struct amdgim_vf2pf_info_v2 {
        uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
 } __aligned(4);
 
-#define AMDGPU_FW_VRAM_VF2PF_VER 2
-typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
-
-#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
-       do { \
-               ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
-       } while (0)
-
-#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
-       do { \
-               (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
-       } while (0)
-
-#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
-       do { \
-               if (!adev->virt.fw_reserve.p_pf2vf) \
-                       *(val) = 0; \
-               else { \
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
-                               *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
-                               *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
-               } \
-       } while (0)
-
 struct amdgpu_virt_ras_err_handler_data {
        /* point to bad page records array */
        struct eeprom_table_record *bps;
@@ -285,7 +209,7 @@ struct amdgpu_virt {
        struct work_struct              flr_work;
        struct amdgpu_mm_table          mm_table;
        const struct amdgpu_virt_ops    *ops;
-       struct amdgpu_vf_error_buffer   vf_errors;
+       struct amdgpu_vf_error_buffer   vf_errors;
        struct amdgpu_virt_fw_reserve   fw_reserve;
        uint32_t gim_feature;
        uint32_t reg_access_mode;
@@ -293,6 +217,10 @@ struct amdgpu_virt {
        bool tdr_debug;
        struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
        bool ras_init_done;
+
+       /* vf2pf message */
+       struct delayed_work vf2pf_work;
+       uint32_t vf2pf_update_interval_ms;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -341,11 +269,9 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
-int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
-                                       unsigned int key,
-                                       unsigned int chksum);
 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
 void amdgpu_detect_virtualization(struct amdgpu_device *adev);
 
 bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
index 420931d..2b65e83 100644 (file)
@@ -1502,6 +1502,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 
                        pt = cursor.entry->base.bo;
                        shift = parent_shift;
+                       frag_end = max(frag_end, ALIGN(frag_start + 1,
+                                  1ULL << shift));
                }
 
                /* Looks good so far, calculate parameters for the update */
@@ -1513,19 +1515,26 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                entry_end = min(entry_end, end);
 
                do {
+                       struct amdgpu_vm *vm = params->vm;
                        uint64_t upd_end = min(entry_end, frag_end);
                        unsigned nptes = (upd_end - frag_start) >> shift;
+                       uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
 
                        /* This can happen when we set higher level PDs to
                         * silent to stop fault floods.
                         */
                        nptes = max(nptes, 1u);
+
+                       trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+                                                   nptes, dst, incr, upd_flags,
+                                                   vm->task_info.pid,
+                                                   vm->immediate.fence_context);
                        amdgpu_vm_update_flags(params, pt, cursor.level,
                                               pe_start, dst, nptes, incr,
-                                              flags | AMDGPU_PTE_FRAG(frag));
+                                              upd_flags);
 
                        pe_start += nptes * 8;
-                       dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+                       dst += nptes * incr;
 
                        frag_start = upd_end;
                        if (frag_start >= frag_end) {
index 770025a..7c46937 100644 (file)
@@ -98,7 +98,7 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
 #define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 
-/* How to programm VM fault handling */
+/* How to program VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER     0
 #define AMDGPU_VM_FAULT_STOP_FIRST     1
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
index 39c704a..0786e75 100644 (file)
@@ -59,7 +59,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
  *
  * @p: see amdgpu_vm_update_params definition
  * @bo: PD/PT to update
- * @pe: kmap addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
index 189d46e..db79057 100644 (file)
@@ -155,7 +155,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
  *
  * @p: see amdgpu_vm_update_params definition
  * @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
@@ -187,7 +187,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
  *
  * @p: see amdgpu_vm_update_params definition
  * @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
new file mode 100644 (file)
index 0000000..5355827
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018-2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGV_SRIOV_MSG__H_
+#define AMDGV_SRIOV_MSG__H_
+
+/* unit in kilobytes */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET              0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB             64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB    AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB      4
+
+/*
+ * layout
+ * 0           64KB        65KB        66KB
+ * |   VBIOS   |   PF2VF   |   VF2PF   |   Bad Page   | ...
+ * |   64KB    |   1KB     |   1KB     |
+ */
+#define AMD_SRIOV_MSG_SIZE_KB                   1
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB           AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB           (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB        (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+
+/*
+ * PF2VF history log:
+ * v1 defined in amdgim
+ * v2 current
+ *
+ * VF2PF history log:
+ * v1 defined in amdgim
+ * v2 defined in amdgim
+ * v3 current
+ */
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER                        2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER                        3
+
+#define AMD_SRIOV_MSG_RESERVE_UCODE            24
+
+enum amd_sriov_ucode_engine_id {
+       AMD_SRIOV_UCODE_ID_VCE = 0,
+       AMD_SRIOV_UCODE_ID_UVD,
+       AMD_SRIOV_UCODE_ID_MC,
+       AMD_SRIOV_UCODE_ID_ME,
+       AMD_SRIOV_UCODE_ID_PFP,
+       AMD_SRIOV_UCODE_ID_CE,
+       AMD_SRIOV_UCODE_ID_RLC,
+       AMD_SRIOV_UCODE_ID_RLC_SRLC,
+       AMD_SRIOV_UCODE_ID_RLC_SRLG,
+       AMD_SRIOV_UCODE_ID_RLC_SRLS,
+       AMD_SRIOV_UCODE_ID_MEC,
+       AMD_SRIOV_UCODE_ID_MEC2,
+       AMD_SRIOV_UCODE_ID_SOS,
+       AMD_SRIOV_UCODE_ID_ASD,
+       AMD_SRIOV_UCODE_ID_TA_RAS,
+       AMD_SRIOV_UCODE_ID_TA_XGMI,
+       AMD_SRIOV_UCODE_ID_SMC,
+       AMD_SRIOV_UCODE_ID_SDMA,
+       AMD_SRIOV_UCODE_ID_SDMA2,
+       AMD_SRIOV_UCODE_ID_VCN,
+       AMD_SRIOV_UCODE_ID_DMCU,
+       AMD_SRIOV_UCODE_ID__MAX
+};
+
+#pragma pack(push, 1)  // PF2VF / VF2PF data areas are byte packed
+
+union amd_sriov_msg_feature_flags {
+       struct {
+               uint32_t  error_log_collect  : 1;
+               uint32_t  host_load_ucodes   : 1;
+               uint32_t  host_flr_vramlost  : 1;
+               uint32_t  mm_bw_management   : 1;
+               uint32_t  pp_one_vf_mode     : 1;
+               uint32_t  reserved           : 27;
+       } flags;
+       uint32_t      all;
+};
+
+union amd_sriov_msg_os_info {
+       struct {
+               uint32_t  windows            : 1;
+               uint32_t  reserved           : 31;
+       } info;
+       uint32_t      all;
+};
+
+struct amd_sriov_msg_pf2vf_info_header {
+       /* the total structure size in byte */
+       uint32_t size;
+       /* version of this structure, written by the HOST */
+       uint32_t version;
+       /* reserved */
+       uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_pf2vf_info {
+       /* header contains size and version */
+       struct amd_sriov_msg_pf2vf_info_header header;
+       /* use private key from mailbox 2 to create checksum */
+       uint32_t checksum;
+       /* The features flags of the HOST driver supports */
+       union amd_sriov_msg_feature_flags feature_flags;
+       /* (max_width * max_height * fps) / (16 * 16) */
+       uint32_t hevc_enc_max_mb_per_second;
+       /* (max_width * max_height) / (16 * 16) */
+       uint32_t hevc_enc_max_mb_per_frame;
+       /* (max_width * max_height * fps) / (16 * 16) */
+       uint32_t avc_enc_max_mb_per_second;
+       /* (max_width * max_height) / (16 * 16) */
+       uint32_t avc_enc_max_mb_per_frame;
+       /* MEC FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t mecfw_offset;
+       /* MEC FW size in BYTE */
+       uint32_t mecfw_size;
+       /* UVD FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t uvdfw_offset;
+       /* UVD FW size in BYTE */
+       uint32_t uvdfw_size;
+       /* VCE FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t vcefw_offset;
+       /* VCE FW size in BYTE */
+       uint32_t vcefw_size;
+       /* Bad pages block position in BYTE */
+       uint32_t bp_block_offset_low;
+       uint32_t bp_block_offset_high;
+       /* Bad pages block size in BYTE */
+       uint32_t bp_block_size;
+       /* frequency for VF to update the VF2PF area in msec, 0 = manual */
+       uint32_t vf2pf_update_interval_ms;
+       /* identification in ROCm SMI */
+       uint64_t uuid;
+       uint32_t fcn_idx;
+       /* reserved */
+       uint32_t reserved[256-26];
+};
+
+struct amd_sriov_msg_vf2pf_info_header {
+       /* the total structure size in byte */
+       uint32_t size;
+       /* version of this structure, written by the guest */
+       uint32_t version;
+       /* reserved */
+       uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_vf2pf_info {
+       /* header contains size and version */
+       struct amd_sriov_msg_vf2pf_info_header header;
+       uint32_t checksum;
+       /* driver version */
+       uint8_t  driver_version[64];
+       /* driver certification, 1=WHQL, 0=None */
+       uint32_t driver_cert;
+       /* guest OS type and version */
+       union amd_sriov_msg_os_info os_info;
+       /* guest fb information in the unit of MB */
+       uint32_t fb_usage;
+       /* guest gfx engine usage percentage */
+       uint32_t gfx_usage;
+       /* guest gfx engine health percentage */
+       uint32_t gfx_health;
+       /* guest compute engine usage percentage */
+       uint32_t compute_usage;
+       /* guest compute engine health percentage */
+       uint32_t compute_health;
+       /* guest avc engine usage percentage. 0xffff means N/A */
+       uint32_t avc_enc_usage;
+       /* guest avc engine health percentage. 0xffff means N/A */
+       uint32_t avc_enc_health;
+       /* guest hevc engine usage percentage. 0xffff means N/A */
+       uint32_t hevc_enc_usage;
+       /* guest hevc engine usage percentage. 0xffff means N/A */
+       uint32_t hevc_enc_health;
+       /* combined encode/decode usage */
+       uint32_t encode_usage;
+       uint32_t decode_usage;
+       /* Version of PF2VF that VF understands */
+       uint32_t pf2vf_version_required;
+       /* additional FB usage */
+       uint32_t fb_vis_usage;
+       uint32_t fb_vis_size;
+       uint32_t fb_size;
+       /* guest ucode data, each one is 1.25 Dword */
+       struct {
+               uint8_t  id;
+               uint32_t version;
+       } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+
+       /* reserved */
+       uint32_t reserved[256-68];
+};
+
+/* mailbox message send from guest to host  */
+enum amd_sriov_mailbox_request_message {
+       MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1,
+       MB_REQ_MSG_REL_GPU_INIT_ACCESS,
+       MB_REQ_MSG_REQ_GPU_FINI_ACCESS,
+       MB_REQ_MSG_REL_GPU_FINI_ACCESS,
+       MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
+       MB_REQ_MSG_REQ_GPU_INIT_DATA,
+
+       MB_REQ_MSG_LOG_VF_ERROR       = 200,
+};
+
+/* mailbox message send from host to guest  */
+enum amd_sriov_mailbox_response_message {
+       MB_RES_MSG_CLR_MSG_BUF = 0,
+       MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+       MB_RES_MSG_FLR_NOTIFICATION,
+       MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
+       MB_RES_MSG_SUCCESS,
+       MB_RES_MSG_FAIL,
+       MB_RES_MSG_QUERY_ALIVE,
+       MB_RES_MSG_GPU_INIT_DATA_READY,
+
+       MB_RES_MSG_TEXT_MESSAGE = 255
+};
+
+/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
+enum amd_sriov_gpu_init_data_version {
+       GPU_INIT_DATA_READY_V1 = 1,
+};
+
+#pragma pack(pop)      // Restore previous packing option
+
+/* checksum function between host and guest */
+unsigned int amd_sriov_msg_checksum(void *obj,
+                               unsigned long obj_size,
+                               unsigned int key,
+                               unsigned int checksum);
+
+/* assertion at compile time */
+#ifdef __linux__
+#define stringification(s) _stringification(s)
+#define _stringification(s) #s
+
+_Static_assert(
+       sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+       "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+       sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+       "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+       AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+       "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+
+_Static_assert(
+       AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+       "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+
+#undef _stringification
+#undef stringification
+#endif
+
+#endif /* AMDGV_SRIOV_MSG__H_ */
index 401c99f..db953e9 100644 (file)
@@ -316,14 +316,9 @@ static int cik_ih_sw_fini(void *handle)
 
 static int cik_ih_hw_init(void *handle)
 {
-       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = cik_ih_irq_init(adev);
-       if (r)
-               return r;
-
-       return 0;
+       return cik_ih_irq_init(adev);
 }
 
 static int cik_ih_hw_fini(void *handle)
index cc93577..b4d4b76 100644 (file)
@@ -47,6 +47,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
 static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
                                              int index);
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+                               unsigned crtc_id);
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
 static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
                                                        int crtc,
                                                        enum amdgpu_interrupt_state state);
@@ -171,8 +174,10 @@ static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
 static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
 
-       drm_crtc_vblank_off(crtc);
+       if (dev->num_crtcs)
+               drm_crtc_vblank_off(crtc);
 
        amdgpu_crtc->enabled = false;
        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -247,6 +252,11 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
        amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
        drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
 
+       hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
+       amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
+       hrtimer_start(&amdgpu_crtc->vblank_timer,
+                     DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
        return 0;
 }
 
@@ -476,7 +486,7 @@ static int dce_virtual_hw_fini(void *handle)
 
        for (i = 0; i<adev->mode_info.num_crtc; i++)
                if (adev->mode_info.crtcs[i])
-                       dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
+                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
 
        return 0;
 }
@@ -698,9 +708,15 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
                                                       struct amdgpu_crtc, vblank_timer);
        struct drm_device *ddev = amdgpu_crtc->base.dev;
        struct amdgpu_device *adev = drm_to_adev(ddev);
+       struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
+               [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
+       int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+                                               amdgpu_crtc->crtc_id);
 
-       drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
-       dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+       if (amdgpu_irq_enabled(adev, source, irq_type)) {
+               drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+               dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+       }
        hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
                      HRTIMER_MODE_REL);
 
@@ -716,21 +732,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
                return;
        }
 
-       if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
-               DRM_DEBUG("Enable software vsync timer\n");
-               hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                                   DCE_VIRTUAL_VBLANK_PERIOD);
-               adev->mode_info.crtcs[crtc]->vblank_timer.function =
-                       dce_virtual_vblank_timer_handle;
-               hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                             DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
-       } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
-               DRM_DEBUG("Disable software vsync timer\n");
-               hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
-       }
-
        adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
        DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
 }
index 2eab808..7b89fd2 100644 (file)
@@ -455,7 +455,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
                                          uint32_t *lo_base_addr,
                                          uint32_t *hi_base_addr,
                                          uint32_t *lo_val,
-                                         uint32_t *hi_val)
+                                         uint32_t *hi_val,
+                                         bool is_enable)
 {
 
        uint32_t eventsel, instance, unitmask;
@@ -477,7 +478,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
        instance_5432 = (instance >> 2) & 0xf;
        instance_76 = (instance >> 6) & 0x3;
 
-       *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
+       *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
+       *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
        *hi_val = (instance_76 << 29) | instance_5432;
 
        DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
@@ -572,14 +574,14 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
 }
 
 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
-                            int is_enable)
+                            int is_add)
 {
        uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
        int err = 0, ret = 0;
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
-               if (is_enable)
+               if (is_add)
                        return df_v3_6_pmc_add_cntr(adev, config);
 
                df_v3_6_reset_perfmon_cntr(adev, config);
@@ -589,7 +591,8 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
                                        &lo_base_addr,
                                        &hi_base_addr,
                                        &lo_val,
-                                       &hi_val);
+                                       &hi_val,
+                                       true);
 
                if (ret)
                        return ret;
@@ -612,7 +615,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
 }
 
 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
-                           int is_disable)
+                           int is_remove)
 {
        uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
        int ret = 0;
@@ -624,15 +627,17 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
                        &lo_base_addr,
                        &hi_base_addr,
                        &lo_val,
-                       &hi_val);
+                       &hi_val,
+                       false);
 
                if (ret)
                        return ret;
 
-               df_v3_6_reset_perfmon_cntr(adev, config);
 
-               if (is_disable)
+               if (is_remove) {
+                       df_v3_6_reset_perfmon_cntr(adev, config);
                        df_v3_6_pmc_release_cntr(adev, config);
+               }
 
                break;
        default:
index d502e30..9792ec7 100644 (file)
@@ -3560,7 +3560,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
                break;
        }
 
-       if (adev->gfx.cp_fw_write_wait == false)
+       if (!adev->gfx.cp_fw_write_wait)
                DRM_WARN_ONCE("CP firmware version too old, please update!");
 }
 
@@ -3610,6 +3610,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
                if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
                break;
+       case CHIP_NAVY_FLOUNDER:
+               adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+               break;
        default:
                break;
        }
@@ -6980,15 +6983,19 @@ static int gfx_v10_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+       if (!adev->in_pci_err_recovery) {
 #ifndef BRING_UP_DEBUG
-       if (amdgpu_async_gfx_ring) {
-               r = gfx_v10_0_kiq_disable_kgq(adev);
-               if (r)
-                       DRM_ERROR("KGQ disable failed\n");
-       }
+               if (amdgpu_async_gfx_ring) {
+                       r = gfx_v10_0_kiq_disable_kgq(adev);
+                       if (r)
+                               DRM_ERROR("KGQ disable failed\n");
+               }
 #endif
-       if (amdgpu_gfx_disable_kcq(adev))
-               DRM_ERROR("KCQ disable failed\n");
+               if (amdgpu_gfx_disable_kcq(adev))
+                       DRM_ERROR("KCQ disable failed\n");
+       }
+
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
index 93c63ff..6959aeb 100644 (file)
@@ -49,6 +49,7 @@
 #include "amdgpu_ras.h"
 
 #include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
 
 #include "asic_reg/pwr/pwr_10_0_offset.h"
 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -788,7 +789,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                                  struct amdgpu_cu_info *cu_info);
 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
@@ -2075,6 +2075,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
        .ras_error_inject = &gfx_v9_4_ras_error_inject,
        .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
        .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+       .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
 };
 
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2196,7 +2197,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                                      int mec, int pipe, int queue)
 {
-       int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
        unsigned int hw_prio;
@@ -2221,13 +2221,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
-       if (r)
-               return r;
-
-
-       return 0;
+       return amdgpu_ring_init(adev, ring, 1024,
+                               &adev->gfx.eop_irq, irq_type, hw_prio);
 }
 
 static int gfx_v9_0_sw_init(void *handle)
@@ -2402,7 +2397,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
        /* TODO */
 }
 
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+                          u32 instance)
 {
        u32 data;
 
@@ -2560,14 +2556,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
-                                           !!amdgpu_noretry);
+                                           !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
-                                           !!amdgpu_noretry);
+                                           !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
                                (adev->gmc.private_aperture_start >> 48));
@@ -2800,7 +2796,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
        uint32_t default_data = 0;
 
        default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
-       if (enable == true) {
+       if (enable) {
                /* enable GFXIP control over CGPG */
                data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
                if(default_data != data)
index fa5a3fb..dfe8d48 100644 (file)
@@ -26,9 +26,7 @@
 
 extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
 
-void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
-uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+                          u32 instance);
 
 #endif
index bd85aed..bc699d6 100644 (file)
@@ -992,3 +992,32 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 
        return ret;
 }
+
+static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+       { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+       uint32_t i, j;
+       uint32_t reg_value;
+
+       if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+               return;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+
+       for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
+               for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+                    j++) {
+                       gfx_v9_4_select_se_sh(adev, i, 0, j);
+                       reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+                               gfx_v9_4_rdrsp_status_regs));
+                       if (reg_value)
+                               dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+                                               j, reg_value);
+               }
+       }
+
+       gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
index 1ffecc5..875f184 100644 (file)
@@ -34,4 +34,6 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
 
 void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
 
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+
 #endif /* __GFX_V9_4_H__ */
index 529e463..fad887a 100644 (file)
@@ -245,7 +245,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -403,3 +403,13 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
        hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
                mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
 }
+
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
+       .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v1_0_gart_enable,
+       .gart_disable = gfxhub_v1_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+       .init = gfxhub_v1_0_init,
+};
index 92d3a70..0c46672 100644 (file)
@@ -33,4 +33,5 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
 void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base);
 
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs;
 #endif
index c0ab71d..1e24b6d 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 #include "amdgpu.h"
+#include "gfxhub_v1_0.h"
 #include "gfxhub_v1_1.h"
 
 #include "gc/gc_9_2_1_offset.h"
@@ -28,7 +29,7 @@
 
 #include "soc15_common.h"
 
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
 {
        u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
        u32 max_region =
@@ -66,3 +67,13 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
 
        return 0;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs = {
+       .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v1_0_gart_enable,
+       .gart_disable = gfxhub_v1_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+       .init = gfxhub_v1_0_init,
+       .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+};
index d753cf2..ae5759f 100644 (file)
@@ -24,6 +24,6 @@
 #ifndef __GFXHUB_V1_1_H__
 #define __GFXHUB_V1_1_H__
 
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs;
 
 #endif
index b882ac5..456360b 100644 (file)
@@ -102,7 +102,7 @@ gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
                GCVM_L2_PROTECTION_FAULT_STATUS, RW));
 }
 
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
 {
        u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
 
@@ -112,12 +112,12 @@ u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
        return base;
 }
 
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
 {
        return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
 }
 
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -153,11 +153,6 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
        uint64_t value;
 
        if (!amdgpu_sriov_vf(adev)) {
-               /*
-                * the new L1 policy will block SRIOV guest from writing
-                * these regs, and they will be programed at host.
-                * so skip programing these regs.
-                */
                /* Disable AGP. */
                WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
                WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
@@ -318,7 +313,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -347,7 +342,7 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
        }
 }
 
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
 {
        /* GART Enable. */
        gfxhub_v2_0_init_gart_aperture_regs(adev);
@@ -363,7 +358,7 @@ int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
        u32 tmp;
@@ -394,7 +389,7 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
@@ -436,7 +431,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
        .get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
 };
 
-void gfxhub_v2_0_init(struct amdgpu_device *adev)
+static void gfxhub_v2_0_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 
@@ -477,3 +472,13 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
 
        hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
+       .get_fb_location = gfxhub_v2_0_get_fb_location,
+       .get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v2_0_gart_enable,
+       .gart_disable = gfxhub_v2_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
+       .init = gfxhub_v2_0_init,
+};
index 392b8cd..9ddc35c 100644 (file)
 #ifndef __GFXHUB_V2_0_H__
 #define __GFXHUB_V2_0_H__
 
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
-                                         bool value);
-void gfxhub_v2_0_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs;
 
 #endif
index 237a9ff..724bb29 100644 (file)
@@ -102,7 +102,7 @@ gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
                GCVM_L2_PROTECTION_FAULT_STATUS, RW));
 }
 
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
 {
        u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
 
@@ -112,12 +112,12 @@ u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
        return base;
 }
 
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
 {
        return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
 }
 
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -319,7 +319,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -348,7 +348,7 @@ static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
        }
 }
 
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
 {
        if (amdgpu_sriov_vf(adev)) {
                /*
@@ -376,7 +376,7 @@ int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
        u32 tmp;
@@ -405,7 +405,7 @@ void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
@@ -454,7 +454,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
        .get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
 };
 
-void gfxhub_v2_1_init(struct amdgpu_device *adev)
+static void gfxhub_v2_1_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 
@@ -496,7 +496,7 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev)
        hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
 }
 
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
 {
        u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
        u32 max_region =
@@ -531,3 +531,14 @@ int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
 
        return 0;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
+       .get_fb_location = gfxhub_v2_1_get_fb_location,
+       .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v2_1_gart_enable,
+       .gart_disable = gfxhub_v2_1_gart_disable,
+       .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
+       .init = gfxhub_v2_1_init,
+       .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
+};
index 3452a4e..f75c2ec 100644 (file)
 #ifndef __GFXHUB_V2_1_H__
 #define __GFXHUB_V2_1_H__
 
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
-                                         bool value);
-void gfxhub_v2_1_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
-
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs;
 
 #endif
index 31359e5..dbc8b76 100644 (file)
@@ -634,11 +634,26 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
        adev->mmhub.funcs = &mmhub_v2_0_funcs;
 }
 
+static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
+               adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
+               break;
+       default:
+               adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
+               break;
+       }
+}
+
+
 static int gmc_v10_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gmc_v10_0_set_mmhub_funcs(adev);
+       gmc_v10_0_set_gfxhub_funcs(adev);
        gmc_v10_0_set_gmc_funcs(adev);
        gmc_v10_0_set_irq_funcs(adev);
        gmc_v10_0_set_umc_funcs(adev);
@@ -676,11 +691,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 {
        u64 base = 0;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               base = gfxhub_v2_1_get_fb_location(adev);
-       else
-               base = gfxhub_v2_0_get_fb_location(adev);
+       base = adev->gfxhub.funcs->get_fb_location(adev);
 
        /* add the xgmi offset of the physical node */
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -689,11 +700,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
        amdgpu_gmc_gart_location(adev, mc);
 
        /* base offset of vram pages */
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
-       else
-               adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
+       adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 
        /* add the xgmi offset of the physical node */
        adev->vm_manager.vram_base_offset +=
@@ -777,11 +784,7 @@ static int gmc_v10_0_sw_init(void *handle)
        int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_init(adev);
-       else
-               gfxhub_v2_0_init(adev);
+       adev->gfxhub.funcs->init(adev);
 
        adev->mmhub.funcs->init(adev);
 
@@ -852,7 +855,7 @@ static int gmc_v10_0_sw_init(void *handle)
        }
 
        if (adev->gmc.xgmi.supported) {
-               r = gfxhub_v2_1_get_xgmi_info(adev);
+               r = adev->gfxhub.funcs->get_xgmi_info(adev);
                if (r)
                        return r;
        }
@@ -944,11 +947,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               r = gfxhub_v2_1_gart_enable(adev);
-       else
-               r = gfxhub_v2_0_gart_enable(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
@@ -969,11 +968,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
        value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
                false : true;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_set_fault_enable_default(adev, value);
-       else
-               gfxhub_v2_0_set_fault_enable_default(adev, value);
+       adev->gfxhub.funcs->set_fault_enable_default(adev, value);
        adev->mmhub.funcs->set_fault_enable_default(adev, value);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
@@ -1014,11 +1009,7 @@ static int gmc_v10_0_hw_init(void *handle)
  */
 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
 {
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_gart_disable(adev);
-       else
-               gfxhub_v2_0_gart_disable(adev);
+       adev->gfxhub.funcs->gart_disable(adev);
        adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
index 91629c2..3ebbddb 100644 (file)
@@ -269,7 +269,6 @@ static const char *mmhub_client_ids_arcturus[][2] = {
        [14][1] = "HDP",
        [15][1] = "SDMA0",
        [32+15][1] = "SDMA1",
-       [32+15][1] = "SDMA1",
        [64+15][1] = "SDMA2",
        [96+15][1] = "SDMA3",
        [128+15][1] = "SDMA4",
@@ -1165,6 +1164,19 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
        }
 }
 
+static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_ARCTURUS:
+       case CHIP_VEGA20:
+               adev->gfxhub.funcs = &gfxhub_v1_1_funcs;
+               break;
+       default:
+               adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
+               break;
+       }
+}
+
 static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1173,6 +1185,7 @@ static int gmc_v9_0_early_init(void *handle)
        gmc_v9_0_set_irq_funcs(adev);
        gmc_v9_0_set_umc_funcs(adev);
        gmc_v9_0_set_mmhub_funcs(adev);
+       gmc_v9_0_set_gfxhub_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
        adev->gmc.shared_aperture_end =
@@ -1194,21 +1207,16 @@ static int gmc_v9_0_late_init(void *handle)
        r = amdgpu_gmc_allocate_vm_inv_eng(adev);
        if (r)
                return r;
-       /* Check if ecc is available */
+
+       /*
+        * Workaround performance drop issue with VBIOS enables partial
+        * writes, while disables HBM ECC for vega10.
+        */
        if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
-               r = amdgpu_atomfirmware_mem_ecc_supported(adev);
-               if (!r) {
-                       DRM_INFO("ECC is not present.\n");
+               if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
                        if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
                                adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
-               } else
-                       DRM_INFO("ECC is active.\n");
-
-               r = amdgpu_atomfirmware_sram_ecc_supported(adev);
-               if (!r)
-                       DRM_INFO("SRAM ECC is not present.\n");
-               else
-                       DRM_INFO("SRAM ECC is active.\n");
+               }
        }
 
        if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
@@ -1235,7 +1243,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
        amdgpu_gmc_gart_location(adev, mc);
        amdgpu_gmc_agp_location(adev, mc);
        /* base offset of vram pages */
-       adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+       adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 
        /* XXX: add the xgmi offset of the physical node? */
        adev->vm_manager.vram_base_offset +=
@@ -1270,7 +1278,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 
 #ifdef CONFIG_X86_64
        if (adev->flags & AMD_IS_APU) {
-               adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+               adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
 #endif
@@ -1340,7 +1348,7 @@ static int gmc_v9_0_sw_init(void *handle)
        int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gfxhub_v1_0_init(adev);
+       adev->gfxhub.funcs->init(adev);
 
        adev->mmhub.funcs->init(adev);
 
@@ -1454,7 +1462,7 @@ static int gmc_v9_0_sw_init(void *handle)
        adev->need_swiotlb = drm_need_swiotlb(44);
 
        if (adev->gmc.xgmi.supported) {
-               r = gfxhub_v1_1_get_xgmi_info(adev);
+               r = adev->gfxhub.funcs->get_xgmi_info(adev);
                if (r)
                        return r;
        }
@@ -1546,8 +1554,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
  */
 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
 {
-       if (adev->asic_type == CHIP_RAVEN)
+       if (adev->asic_type == CHIP_RAVEN) {
                WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+               WARN_ON(adev->gmc.sdpif_register !=
+                       RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
+       }
 }
 
 /**
@@ -1567,7 +1578,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       r = gfxhub_v1_0_gart_enable(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
@@ -1634,7 +1645,7 @@ static int gmc_v9_0_hw_init(void *handle)
                value = true;
 
        if (!amdgpu_sriov_vf(adev)) {
-               gfxhub_v1_0_set_fault_enable_default(adev, value);
+               adev->gfxhub.funcs->set_fault_enable_default(adev, value);
                adev->mmhub.funcs->set_fault_enable_default(adev, value);
        }
        for (i = 0; i < adev->num_vmhubs; ++i)
@@ -1657,7 +1668,7 @@ static int gmc_v9_0_hw_init(void *handle)
  */
 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
 {
-       gfxhub_v1_0_gart_disable(adev);
+       adev->gfxhub.funcs->gart_disable(adev);
        adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
@@ -1681,14 +1692,9 @@ static int gmc_v9_0_hw_fini(void *handle)
 
 static int gmc_v9_0_suspend(void *handle)
 {
-       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = gmc_v9_0_hw_fini(adev);
-       if (r)
-               return r;
-
-       return 0;
+       return gmc_v9_0_hw_fini(adev);
 }
 
 static int gmc_v9_0_resume(void *handle)
index bc30028..c600b61 100644 (file)
@@ -33,6 +33,7 @@
 
 static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
 
 static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
 {
@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
        .insert_start = jpeg_v1_0_decode_ring_insert_start,
        .insert_end = jpeg_v1_0_decode_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
-       .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .begin_use = jpeg_v1_0_ring_begin_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
        .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
 }
+
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+       struct  amdgpu_device *adev = ring->adev;
+       bool    set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+       int             cnt = 0;
+
+       mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+       if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
+               DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
+
+       for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+               if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
+                       DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
+       }
+
+       vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+}
index 4b74658..1c22d83 100644 (file)
@@ -832,7 +832,6 @@ static int mes_v10_1_queue_init(struct amdgpu_device *adev)
 static int mes_v10_1_ring_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
-       int r;
 
        ring = &adev->mes.ring;
 
@@ -849,11 +848,7 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
        ring->no_scheduler = true;
        sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
-       r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
-       if (r)
-               return r;
-
-       return 0;
+       return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
 }
 
 static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
index 45a902b..f84701c 100644 (file)
@@ -268,7 +268,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
index 2d88278..2063700 100644 (file)
@@ -201,11 +201,6 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
 
        if (!amdgpu_sriov_vf(adev)) {
-               /*
-                * the new L1 policy will block SRIOV guest from writing
-                * these regs, and they will be programed at host.
-                * so skip programing these regs.
-                */
                /* Program the system aperture low logical page number. */
                WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                             adev->gmc.vram_start >> 18);
@@ -374,7 +369,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
index 6c6ad52..66748bb 100644 (file)
@@ -330,7 +330,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
                                    hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
                                    i * hub->ctx_distance, tmp);
@@ -1624,6 +1624,34 @@ static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
+static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = {
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t reg_value;
+
+       if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
+               reg_value =
+                       RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
+               if (reg_value)
+                       dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+                                       i, reg_value);
+       }
+}
+
 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
@@ -1636,4 +1664,5 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .set_clockgating = mmhub_v9_4_set_clockgating,
        .get_clockgating = mmhub_v9_4_get_clockgating,
        .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
+       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
 };
index 9c07014..f5ce9a9 100644 (file)
@@ -262,7 +262,8 @@ flr_done:
 
        /* Trigger recovery for world switch failure if no TDR */
        if (amdgpu_device_should_recover_gpu(adev)
-               && (amdgpu_device_has_job_running(adev) || adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
+               && (!amdgpu_device_has_job_running(adev) ||
+               adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
                amdgpu_device_gpu_recover(adev, NULL);
 }
 
index 9c23abf..666ed99 100644 (file)
@@ -283,7 +283,7 @@ flr_done:
 
        /* Trigger recovery for world switch failure if no TDR */
        if (amdgpu_device_should_recover_gpu(adev)
-               && (amdgpu_device_has_job_running(adev) ||
+               && (!amdgpu_device_has_job_running(adev) ||
                adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
                adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
index 4d14023..1ce741a 100644 (file)
@@ -69,75 +69,40 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
  */
 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u32 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg(adev, address, data, reg);
 }
 
 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, v);
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg(adev, address, data, reg, v);
 }
 
 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u64 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* read low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-
-       /* read high 32 bit*/
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       r |= ((u64)RREG32(data) << 32);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg64(adev, address, data, reg);
 }
 
 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* write low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v & 0xffffffffULL));
-       (void)RREG32(data);
-
-       /* write high 32 bit */
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v >> 32));
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
 }
 
 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
@@ -311,7 +276,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
        /* disable BM */
        pci_clear_master(adev->pdev);
 
-       pci_save_state(adev->pdev);
+       amdgpu_device_cache_pci_state(adev->pdev);
 
        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
                dev_info(adev->dev, "GPU smu mode1 reset\n");
@@ -323,7 +288,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 
        if (ret)
                dev_err(adev->dev, "GPU mode1 reset failed\n");
-       pci_restore_state(adev->pdev);
+       amdgpu_device_load_pci_state(adev->pdev);
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
@@ -621,7 +586,7 @@ static void nv_invalidate_hdp(struct amdgpu_device *adev,
                                struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
-               WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+               WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
        } else {
                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
                                        HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
index cbc04a5..1ef2f5b 100644 (file)
@@ -83,19 +83,6 @@ struct psp_gfx_ctrl
 */
 #define GFX_FLAG_RESPONSE               0x80000000
 
-/* Gbr IH registers ID */
-enum ih_reg_id {
-       IH_RB           = 0,            // IH_RB_CNTL
-       IH_RB_RNG1      = 1,            // IH_RB_CNTL_RING1
-       IH_RB_RNG2      = 2,            // IH_RB_CNTL_RING2
-};
-
-/* Command to setup Gibraltar IH register */
-struct psp_gfx_cmd_gbr_ih_reg {
-       uint32_t                reg_value;      /* Value to be set to the IH_RB_CNTL... register*/
-       enum ih_reg_id          reg_id;         /* ID of the register */
-};
-
 /* TEE Gfx Command IDs for the ring buffer interface. */
 enum psp_gfx_cmd_id
 {
index e16874f..6c5d961 100644 (file)
@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
 
 /* address block */
 #define smnMP1_FIRMWARE_FLAGS          0x3010024
index 856c503..86fb1ed 100644 (file)
@@ -592,6 +592,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
@@ -1000,7 +1003,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
                sdma[i] = &adev->sdma.instance[i].page;
 
                if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
-                       (unset == false)) {
+                       (!unset)) {
                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
                        unset = true;
                }
@@ -1063,6 +1066,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
                        WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
                }
                WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
+
+               /*
+                * Enable SDMA utilization. Its only supported on
+                * Arcturus for the moment and firmware version 14
+                * and above.
+                */
+               if (adev->asic_type == CHIP_ARCTURUS &&
+                   adev->sdma.instance[i].fw_version >= 14)
+                       WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
        }
 
 }
@@ -1080,7 +1092,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                sdma_v4_0_gfx_stop(adev);
                sdma_v4_0_rlc_stop(adev);
                if (adev->sdma.has_page_queue)
index e2232dd..9c72b95 100644 (file)
@@ -203,6 +203,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
@@ -616,7 +619,7 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                sdma_v5_0_gfx_stop(adev);
                sdma_v5_0_rlc_stop(adev);
        }
index 46a9617..9f39527 100644 (file)
@@ -148,6 +148,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
@@ -559,7 +562,7 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       if (enable == false) {
+       if (!enable) {
                sdma_v5_2_gfx_stop(adev);
                sdma_v5_2_rlc_stop(adev);
        }
index 455d5e3..e5e336f 100644 (file)
@@ -1339,7 +1339,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
        uint32_t temp;
 
        temp = RREG32(CONFIG_CNTL);
-       if (state == false) {
+       if (!state) {
                temp &= ~(1<<0);
                temp |= (1<<1);
        } else {
index 2f93c47..afcccc6 100644 (file)
  */
 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u32 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg(adev, address, data, reg);
 }
 
 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, v);
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg(adev, address, data, reg, v);
 }
 
 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u64 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* read low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-
-       /* read high 32 bit*/
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       r |= ((u64)RREG32(data) << 32);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg64(adev, address, data, reg);
 }
 
 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* write low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v & 0xffffffffULL));
-       (void)RREG32(data);
-
-       /* write high 32 bit */
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v >> 32));
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
 }
 
 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
@@ -484,13 +449,13 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
        /* disable BM */
        pci_clear_master(adev->pdev);
 
-       pci_save_state(adev->pdev);
+       amdgpu_device_cache_pci_state(adev->pdev);
 
        ret = psp_gpu_reset(adev);
        if (ret)
                dev_err(adev->dev, "GPU mode1 reset failed\n");
 
-       pci_restore_state(adev->pdev);
+       amdgpu_device_load_pci_state(adev->pdev);
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
@@ -697,12 +662,12 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
                 * it doesn't support SRIOV. */
                if (amdgpu_discovery) {
                        r = amdgpu_discovery_reg_base_init(adev);
-                       if (r) {
-                               DRM_WARN("failed to init reg base from ip discovery table, "
-                                        "fallback to legacy init method\n");
-                               vega10_reg_base_init(adev);
-                       }
+                       if (r == 0)
+                               break;
+                       DRM_WARN("failed to init reg base from ip discovery table, "
+                                "fallback to legacy init method\n");
                }
+               vega10_reg_base_init(adev);
                break;
        case CHIP_VEGA20:
                vega20_reg_base_init(adev);
index 3cafba7..b0c0c43 100644 (file)
@@ -348,7 +348,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        /* Set the write pointer delay */
        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 
-       /* programm the 4GB memory segment for rptr and ring buffer */
+       /* program the 4GB memory segment for rptr and ring buffer */
        WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
                                   (0x7 << 16) | (0x1 << 31));
 
@@ -541,7 +541,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
        uint64_t addr;
        uint32_t size;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
        size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
index a566ff9..6e57001 100644 (file)
@@ -253,7 +253,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
        uint64_t offset;
        uint32_t size;
 
-       /* programm memory controller bits 0-27 */
+       /* program memory controller bits 0-27 */
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->uvd.inst->gpu_addr));
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -404,7 +404,7 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
        /* set the wb address */
        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 0a880bc..666bfa4 100644 (file)
@@ -583,7 +583,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
        uint64_t offset;
        uint32_t size;
 
-       /* programm memory controller bits 0-27 */
+       /* program memory controller bits 0-27 */
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->uvd.inst->gpu_addr));
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -825,7 +825,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        /* set the wb address */
        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1240,8 +1240,8 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
                break;
        }
 
-       if (false == int_handled)
-                       DRM_ERROR("Unhandled interrupt: %d %d\n",
+       if (!int_handled)
+               DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
 
        return 0;
index e07e3fa..b44c867 100644 (file)
@@ -1073,7 +1073,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
                                (upper_32_bits(ring->gpu_addr) >> 2));
 
-               /* programm the RB_BASE for ring buffer */
+               /* program the RB_BASE for ring buffer */
                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                                lower_32_bits(ring->gpu_addr));
                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 927c330..86e1ef7 100644 (file)
@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
                                int inst_idx, struct dpg_pause_state *new_state);
 
 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -910,7 +911,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                        (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1068,7 +1069,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                                                                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                                                                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
        }
 }
 
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
 {
-       struct amdgpu_device *adev = ring->adev;
+       struct  amdgpu_device *adev = ring->adev;
        bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
+       mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+       if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
+               DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
+
+       vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+
+}
+
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
+{
+       struct amdgpu_device *adev = ring->adev;
+
        if (set_clocks) {
                amdgpu_gfx_off_ctrl(adev, false);
                if (adev->pm.dpm_enabled)
@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
        }
 }
 
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
+{
+       schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+       mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+}
+
 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
        .name = "vcn_v1_0",
        .early_init = vcn_v1_0_early_init,
@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .insert_end = vcn_v1_0_dec_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
        .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
        .insert_end = vcn_v1_0_enc_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
        .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
index f67d739..1f1cc7f 100644 (file)
@@ -24,7 +24,8 @@
 #ifndef __VCN_V1_0_H__
 #define __VCN_V1_0_H__
 
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
 
 extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
 
index 23a9eb5..e5d29de 100644 (file)
@@ -900,7 +900,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1060,7 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
        fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 139fac0..0f1d3ef 100644 (file)
@@ -882,7 +882,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1062,7 +1062,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
 
                fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
-               /* programm the RB_BASE for ring buffer */
+               /* program the RB_BASE for ring buffer */
                WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
                WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 589d6cd..e074f7e 100644 (file)
@@ -746,18 +746,18 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
                | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
                | UVD_SUVD_CGC_GATE__EFC_MASK
                | UVD_SUVD_CGC_GATE__SAOE_MASK
-               | 0x08000000
+               | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
                | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
                | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
-               | 0x40000000
+               | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
                | UVD_SUVD_CGC_GATE__SMPA_MASK);
        WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
 
        data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
        data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
                | UVD_SUVD_CGC_GATE2__MPBE1_MASK
-               | 0x00000004
-               | 0x00000008
+               | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
+               | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
                | UVD_SUVD_CGC_GATE2__MPC1_MASK);
        WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
 
@@ -776,8 +776,8 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
                | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
-               | 0x00008000
-               | 0x00010000
+               | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
+               | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
                | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
                | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
@@ -892,8 +892,8 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
                | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
-               | 0x00008000
-               | 0x00010000
+               | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
+               | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
                | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
                | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
                | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
index b7b16ad..222f1df 100644 (file)
@@ -97,6 +97,7 @@ void kfd_chardev_exit(void)
        device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
        class_destroy(kfd_class);
        unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+       kfd_device = NULL;
 }
 
 struct device *kfd_chardev(void)
@@ -1290,18 +1291,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                return -EINVAL;
        }
 
-       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
-               if (args->size != kfd_doorbell_process_slice(dev))
-                       return -EINVAL;
-               offset = kfd_get_process_doorbells(dev, p);
-       } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
-               if (args->size != PAGE_SIZE)
-                       return -EINVAL;
-               offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
-               if (!offset)
-                       return -ENOMEM;
-       }
-
        mutex_lock(&p->mutex);
 
        pdd = kfd_bind_process_to_device(dev, p);
@@ -1310,6 +1299,24 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+               if (args->size != kfd_doorbell_process_slice(dev)) {
+                       err = -EINVAL;
+                       goto err_unlock;
+               }
+               offset = kfd_get_process_doorbells(pdd);
+       } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+               if (args->size != PAGE_SIZE) {
+                       err = -EINVAL;
+                       goto err_unlock;
+               }
+               offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+               if (!offset) {
+                       err = -ENOMEM;
+                       goto err_unlock;
+               }
+       }
+
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                dev->kgd, args->va_addr, args->size,
                pdd->vm, (struct kgd_mem **) &mem, &offset,
index 3fac06b..5e2254b 100644 (file)
@@ -797,7 +797,8 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
                return -ENODATA;
        }
 
-       pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
+       pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
+       memcpy(pcrat_image, crat_table, crat_table->length);
        if (!pcrat_image)
                return -ENOMEM;
 
@@ -809,11 +810,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 
 /* Memory required to create Virtual CRAT.
  * Since there is no easy way to predict the amount of memory required, the
- * following amount are allocated for CPU and GPU Virtual CRAT. This is
+ * following amount is allocated for GPU Virtual CRAT. This is
  * expected to cover all known conditions. But to be safe additional check
  * is put in the code to ensure we don't overwrite.
  */
-#define VCRAT_SIZE_FOR_CPU     (2 * PAGE_SIZE)
 #define VCRAT_SIZE_FOR_GPU     (4 * PAGE_SIZE)
 
 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
@@ -964,7 +964,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 #endif
        int ret = 0;
 
-       if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+       if (!pcrat_image)
                return -EINVAL;
 
        /* Fill in CRAT Header.
@@ -1364,30 +1364,37 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
                                  uint32_t proximity_domain)
 {
        void *pcrat_image = NULL;
-       int ret = 0;
+       int ret = 0, num_nodes;
+       size_t dyn_size;
 
        if (!crat_image)
                return -EINVAL;
 
        *crat_image = NULL;
 
-       /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
-        * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
-        * all the current conditions. A check is put not to overwrite beyond
-        * allocated size
+       /* Allocate the CPU Virtual CRAT size based on the number of online
+        * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
+        * This should cover all the current conditions. A check is put not
+        * to overwrite beyond allocated size for GPUs
         */
        switch (flags) {
        case COMPUTE_UNIT_CPU:
-               pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
+               num_nodes = num_online_nodes();
+               dyn_size = sizeof(struct crat_header) +
+                       num_nodes * (sizeof(struct crat_subtype_computeunit) +
+                       sizeof(struct crat_subtype_memory) +
+                       (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
+               pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
                if (!pcrat_image)
                        return -ENOMEM;
-               *size = VCRAT_SIZE_FOR_CPU;
+               *size = dyn_size;
+               pr_debug("CRAT size is %ld", dyn_size);
                ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
                break;
        case COMPUTE_UNIT_GPU:
                if (!kdev)
                        return -EINVAL;
-               pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
+               pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
                if (!pcrat_image)
                        return -ENOMEM;
                *size = VCRAT_SIZE_FOR_GPU;
@@ -1406,7 +1413,7 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
        if (!ret)
                *crat_image = pcrat_image;
        else
-               kfree(pcrat_image);
+               kvfree(pcrat_image);
 
        return ret;
 }
@@ -1419,5 +1426,5 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
  */
 void kfd_destroy_crat_image(void *crat_image)
 {
-       kfree(crat_image);
+       kvfree(crat_image);
 }
index 0e71a05..903170e 100644 (file)
@@ -503,8 +503,8 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
 #ifdef KFD_SUPPORT_IOMMU_V2
        [CHIP_KAVERI] = {&kaveri_device_info, NULL},
        [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
-       [CHIP_RAVEN] = {&raven_device_info, NULL},
 #endif
+       [CHIP_RAVEN] = {&raven_device_info, NULL},
        [CHIP_HAWAII] = {&hawaii_device_info, NULL},
        [CHIP_TONGA] = {&tonga_device_info, NULL},
        [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
@@ -583,6 +583,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
 
        atomic_set(&kfd->sram_ecc_flag, 0);
 
+       ida_init(&kfd->doorbell_ida);
+
        return kfd;
 }
 
@@ -716,6 +718,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
        kfd->unique_id = amdgpu_amdkfd_get_unique_id(kfd->kgd);
 
+       kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
+
        if (kfd_interrupt_init(kfd)) {
                dev_err(kfd_device, "Error initializing interrupts\n");
                goto kfd_interrupt_error;
@@ -798,6 +802,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
                kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
                kfd_doorbell_fini(kfd);
+               ida_destroy(&kfd->doorbell_ida);
                kfd_gtt_sa_fini(kfd);
                amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
                if (kfd->gws)
index 560adc5..62504d5 100644 (file)
@@ -191,9 +191,8 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
        }
 
        q->properties.doorbell_off =
-               kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
+               kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
                                          q->doorbell_id);
-
        return 0;
 }
 
@@ -650,9 +649,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                goto out;
 
        pdd = qpd_to_pdd(qpd);
-       pr_info_ratelimited("Evicting PASID 0x%x queues\n",
+       pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
                            pdd->process->pasid);
 
+       pdd->last_evict_timestamp = get_jiffies_64();
        /* Mark all queues as evicted. Deactivate all active queues on
         * the qpd.
         */
@@ -700,7 +700,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
                goto out;
 
        pdd = qpd_to_pdd(qpd);
-       pr_info_ratelimited("Evicting PASID 0x%x queues\n",
+       pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
                            pdd->process->pasid);
 
        /* Mark all queues as evicted. Deactivate all active queues on
@@ -714,6 +714,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
                q->properties.is_active = false;
                decrement_queue_count(dqm, q->properties.type);
        }
+       pdd->last_evict_timestamp = get_jiffies_64();
        retval = execute_queues_cpsch(dqm,
                                qpd->is_debug ?
                                KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
@@ -732,6 +733,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
        struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
        uint64_t pd_base;
+       uint64_t eviction_duration;
        int retval, ret = 0;
 
        pdd = qpd_to_pdd(qpd);
@@ -746,7 +748,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                goto out;
        }
 
-       pr_info_ratelimited("Restoring PASID 0x%x queues\n",
+       pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
                            pdd->process->pasid);
 
        /* Update PD Base in QPD */
@@ -799,6 +801,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                        ret = retval;
        }
        qpd->evicted = 0;
+       eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
+       atomic64_add(eviction_duration, &pdd->evict_duration_counter);
 out:
        if (mm)
                mmput(mm);
@@ -812,6 +816,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
        struct queue *q;
        struct kfd_process_device *pdd;
        uint64_t pd_base;
+       uint64_t eviction_duration;
        int retval = 0;
 
        pdd = qpd_to_pdd(qpd);
@@ -826,7 +831,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
                goto out;
        }
 
-       pr_info_ratelimited("Restoring PASID 0x%x queues\n",
+       pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
                            pdd->process->pasid);
 
        /* Update PD Base in QPD */
@@ -845,6 +850,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
        retval = execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
        qpd->evicted = 0;
+       eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
+       atomic64_add(eviction_duration, &pdd->evict_duration_counter);
 out:
        dqm_unlock(dqm);
        return retval;
@@ -1192,6 +1199,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
        dqm->sched_running = false;
        dqm_unlock(dqm);
 
+       pm_release_ib(&dqm->packets);
+
        kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
        pm_uninit(&dqm->packets, hanging);
 
@@ -1302,7 +1311,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (q->properties.is_active) {
                increment_queue_count(dqm, q->properties.type);
 
-               retval = execute_queues_cpsch(dqm,
+               execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
        }
 
@@ -1964,6 +1973,7 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
 
        if (!p)
                return -EINVAL;
+       WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
        pdd = kfd_get_process_device_data(dqm->dev, p);
        if (pdd)
                ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
index 309f63a..eca6331 100644 (file)
@@ -61,7 +61,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
                qpd->sh_mem_config =
                                SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
-               if (amdgpu_noretry &&
+               if (dqm->dev->noretry &&
                    !dqm->dev->use_iommu_v2)
                        qpd->sh_mem_config |=
                                1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
index 8e0c00b..768d153 100644 (file)
@@ -31,9 +31,6 @@
  * kernel queues using the first doorbell page reserved for the kernel.
  */
 
-static DEFINE_IDA(doorbell_ida);
-static unsigned int max_doorbell_slices;
-
 /*
  * Each device exposes a doorbell aperture, a PCI MMIO aperture that
  * receives 32-bit writes that are passed to queues as wptr values.
@@ -84,9 +81,9 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
        else
                return -ENOSPC;
 
-       if (!max_doorbell_slices ||
-           doorbell_process_limit < max_doorbell_slices)
-               max_doorbell_slices = doorbell_process_limit;
+       if (!kfd->max_doorbell_slices ||
+           doorbell_process_limit < kfd->max_doorbell_slices)
+               kfd->max_doorbell_slices = doorbell_process_limit;
 
        kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
                                doorbell_start_offset;
@@ -130,6 +127,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
                      struct vm_area_struct *vma)
 {
        phys_addr_t address;
+       struct kfd_process_device *pdd;
 
        /*
         * For simplicitly we only allow mapping of the entire doorbell
@@ -138,9 +136,12 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
        if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
                return -EINVAL;
 
-       /* Calculate physical address of doorbell */
-       address = kfd_get_process_doorbells(dev, process);
+       pdd = kfd_get_process_device_data(dev, process);
+       if (!pdd)
+               return -EINVAL;
 
+       /* Calculate physical address of doorbell */
+       address = kfd_get_process_doorbells(pdd);
        vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
                                VM_DONTDUMP | VM_PFNMAP;
 
@@ -226,7 +227,7 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
 }
 
 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
-                                       struct kfd_process *process,
+                                       struct kfd_process_device *pdd,
                                        unsigned int doorbell_id)
 {
        /*
@@ -236,7 +237,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
         * units regardless of the ASIC-dependent doorbell size.
         */
        return kfd->doorbell_base_dw_offset +
-               process->doorbell_index
+               pdd->doorbell_index
                * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
                doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
 }
@@ -251,25 +252,24 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
 
 }
 
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
-                                       struct kfd_process *process)
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
 {
-       return dev->doorbell_base +
-               process->doorbell_index * kfd_doorbell_process_slice(dev);
+       return pdd->dev->doorbell_base +
+               pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
 }
 
-int kfd_alloc_process_doorbells(struct kfd_process *process)
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index)
 {
-       int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+       int r = ida_simple_get(&kfd->doorbell_ida, 1, kfd->max_doorbell_slices,
                                GFP_KERNEL);
        if (r > 0)
-               process->doorbell_index = r;
+               *doorbell_index = r;
 
        return r;
 }
 
-void kfd_free_process_doorbells(struct kfd_process *process)
+void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index)
 {
-       if (process->doorbell_index)
-               ida_simple_remove(&doorbell_ida, process->doorbell_index);
+       if (doorbell_index)
+               ida_simple_remove(&kfd->doorbell_ida, doorbell_index);
 }
index f4b7f7e..5e90fe6 100644 (file)
@@ -70,6 +70,7 @@ err_create_wq:
 err_topology:
        kfd_chardev_exit();
 err_ioctl:
+       pr_err("KFD is disabled due to module initialization failure\n");
        return err;
 }
 
index 023629f..b7be5c5 100644 (file)
@@ -314,6 +314,11 @@ struct kfd_dev {
        spinlock_t smi_lock;
 
        uint32_t reset_seq_num;
+
+       struct ida doorbell_ida;
+       unsigned int max_doorbell_slices;
+
+       int noretry;
 };
 
 enum kfd_mempool {
@@ -631,7 +636,7 @@ enum kfd_pdd_bound {
        PDD_BOUND_SUSPENDED,
 };
 
-#define MAX_SYSFS_FILENAME_LEN 11
+#define MAX_SYSFS_FILENAME_LEN 15
 
 /*
  * SDMA counter runs at 100MHz frequency.
@@ -692,6 +697,39 @@ struct kfd_process_device {
        uint64_t sdma_past_activity_counter;
        struct attribute attr_sdma;
        char sdma_filename[MAX_SYSFS_FILENAME_LEN];
+
+       /* Eviction activity tracking */
+       uint64_t last_evict_timestamp;
+       atomic64_t evict_duration_counter;
+       struct attribute attr_evict;
+
+       struct kobject *kobj_stats;
+       unsigned int doorbell_index;
+
+       /*
+        * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
+        * that is associated with device encoded by "this" struct instance. The
+        * value reflects CU usage by all of the waves launched by this process
+        * on this device. A very important property of occupancy parameter is
+        * that its value is a snapshot of current use.
+        *
+        * Following is to be noted regarding how this parameter is reported:
+        *
+        *  The number of waves that a CU can launch is limited by couple of
+        *  parameters. These are encoded by struct amdgpu_cu_info instance
+        *  that is part of every device definition. For GFX9 devices this
+        *  translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
+        *  do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
+        *  when they do use scratch memory. This could change for future
+        *  devices and therefore this example should be considered as a guide.
+        *
+        *  All CU's of a device are available for the process. This may not be true
+        *  under certain conditions - e.g. CU masking.
+        *
+        *  Finally number of CU's that are occupied by a process is affected by both
+        *  number of CU's a device has along with number of other competing processes
+        */
+       struct attribute attr_cu_occupancy;
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -729,7 +767,6 @@ struct kfd_process {
        struct mmu_notifier mmu_notifier;
 
        uint16_t pasid;
-       unsigned int doorbell_index;
 
        /*
         * List of kfd_process_device structures,
@@ -862,13 +899,13 @@ u32 read_kernel_doorbell(u32 __iomem *db);
 void write_kernel_doorbell(void __iomem *db, u32 value);
 void write_kernel_doorbell64(void __iomem *db, u64 value);
 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
-                                       struct kfd_process *process,
+                                       struct kfd_process_device *pdd,
                                        unsigned int doorbell_id);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
-                                       struct kfd_process *process);
-int kfd_alloc_process_doorbells(struct kfd_process *process);
-void kfd_free_process_doorbells(struct kfd_process *process);
-
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
+                               unsigned int *doorbell_index);
+void kfd_free_process_doorbells(struct kfd_dev *kfd,
+                               unsigned int doorbell_index);
 /* GTT Sub-Allocator */
 
 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
index a0e12a7..2807e1c 100644 (file)
@@ -249,6 +249,52 @@ cleanup:
        }
 }
 
+/**
+ * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * by current process. Translates acquired wave count into number of compute units
+ * that are occupied.
+ *
+ * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * handle encapsulates GPU device it is associated with, thereby allowing collection
+ * of waves in flight, etc
+ *
+ * @buffer: Handle of user provided buffer updated with wave count
+ *
+ * Return: Number of bytes written to user buffer or an error value
+ */
+static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+{
+       int cu_cnt;
+       int wave_cnt;
+       int max_waves_per_cu;
+       struct kfd_dev *dev = NULL;
+       struct kfd_process *proc = NULL;
+       struct kfd_process_device *pdd = NULL;
+
+       pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+       dev = pdd->dev;
+       if (dev->kfd2kgd->get_cu_occupancy == NULL)
+               return -EINVAL;
+
+       cu_cnt = 0;
+       proc = pdd->process;
+       if (pdd->qpd.queue_count == 0) {
+               pr_debug("Gpu-Id: %d has no active queues for process %d\n",
+                        dev->id, proc->pasid);
+               return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+       }
+
+       /* Collect wave count from device if it supports */
+       wave_cnt = 0;
+       max_waves_per_cu = 0;
+       dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
+                       &max_waves_per_cu);
+
+       /* Translate wave count to number of compute units */
+       cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
+       return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+}
+
 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
                               char *buffer)
 {
@@ -345,6 +391,32 @@ static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
        return 0;
 }
 
+static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
+                                    struct attribute *attr, char *buffer)
+{
+       if (strcmp(attr->name, "evicted_ms") == 0) {
+               struct kfd_process_device *pdd = container_of(attr,
+                               struct kfd_process_device,
+                               attr_evict);
+               uint64_t evict_jiffies;
+
+               evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
+
+               return snprintf(buffer,
+                               PAGE_SIZE,
+                               "%llu\n",
+                               jiffies64_to_msecs(evict_jiffies));
+
+       /* Sysfs handle that gets CU occupancy is per device */
+       } else if (strcmp(attr->name, "cu_occupancy") == 0) {
+               return kfd_get_cu_occupancy(attr, buffer);
+       } else {
+               pr_err("Invalid attribute");
+       }
+
+       return 0;
+}
+
 static struct attribute attr_queue_size = {
        .name = "size",
        .mode = KFD_SYSFS_FILE_MODE
@@ -376,6 +448,19 @@ static struct kobj_type procfs_queue_type = {
        .default_attrs = procfs_queue_attrs,
 };
 
+static const struct sysfs_ops procfs_stats_ops = {
+       .show = kfd_procfs_stats_show,
+};
+
+static struct attribute *procfs_stats_attrs[] = {
+       NULL
+};
+
+static struct kobj_type procfs_stats_type = {
+       .sysfs_ops = &procfs_stats_ops,
+       .default_attrs = procfs_stats_attrs,
+};
+
 int kfd_procfs_add_queue(struct queue *q)
 {
        struct kfd_process *proc;
@@ -417,6 +502,72 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
        return ret;
 }
 
+static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
+{
+       int ret = 0;
+       struct kfd_process_device *pdd;
+       char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
+
+       if (!p)
+               return -EINVAL;
+
+       if (!p->kobj)
+               return -EFAULT;
+
+       /*
+        * Create sysfs files for each GPU:
+        * - proc/<pid>/stats_<gpuid>/
+        * - proc/<pid>/stats_<gpuid>/evicted_ms
+        * - proc/<pid>/stats_<gpuid>/cu_occupancy
+        */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               struct kobject *kobj_stats;
+
+               snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
+                               "stats_%u", pdd->dev->id);
+               kobj_stats = kfd_alloc_struct(kobj_stats);
+               if (!kobj_stats)
+                       return -ENOMEM;
+
+               ret = kobject_init_and_add(kobj_stats,
+                                               &procfs_stats_type,
+                                               p->kobj,
+                                               stats_dir_filename);
+
+               if (ret) {
+                       pr_warn("Creating KFD proc/stats_%s folder failed",
+                                       stats_dir_filename);
+                       kobject_put(kobj_stats);
+                       goto err;
+               }
+
+               pdd->kobj_stats = kobj_stats;
+               pdd->attr_evict.name = "evicted_ms";
+               pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
+               sysfs_attr_init(&pdd->attr_evict);
+               ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
+               if (ret)
+                       pr_warn("Creating eviction stats for gpuid %d failed",
+                                       (int)pdd->dev->id);
+
+               /* Add sysfs file to report compute unit occupancy */
+               if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
+                       pdd->attr_cu_occupancy.name = "cu_occupancy";
+                       pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
+                       sysfs_attr_init(&pdd->attr_cu_occupancy);
+                       ret = sysfs_create_file(kobj_stats,
+                                               &pdd->attr_cu_occupancy);
+                       if (ret)
+                               pr_warn("Creating %s failed for gpuid: %d",
+                                       pdd->attr_cu_occupancy.name,
+                                       (int)pdd->dev->id);
+               }
+       }
+err:
+       return ret;
+}
+
+
 static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
 {
        int ret = 0;
@@ -452,7 +603,6 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
        return ret;
 }
 
-
 void kfd_procfs_del_queue(struct queue *q)
 {
        if (!q)
@@ -660,6 +810,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
                if (!process->kobj_queues)
                        pr_warn("Creating KFD proc/queues folder failed");
 
+               ret = kfd_procfs_add_sysfs_stats(process);
+               if (ret)
+                       pr_warn("Creating sysfs stats dir for pid %d failed",
+                               (int)process->lead_thread->pid);
+
                ret = kfd_procfs_add_sysfs_files(process);
                if (ret)
                        pr_warn("Creating sysfs usage file for pid %d failed",
@@ -781,6 +936,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                kfree(pdd->qpd.doorbell_bitmap);
                idr_destroy(&pdd->alloc_idr);
 
+               kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+
                /*
                 * before destroying pdd, make sure to report availability
                 * for auto suspend
@@ -816,6 +973,12 @@ static void kfd_process_wq_release(struct work_struct *work)
                list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
                        sysfs_remove_file(p->kobj, &pdd->attr_vram);
                        sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+                       sysfs_remove_file(p->kobj, &pdd->attr_evict);
+                       if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
+                               sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
+                       kobject_del(pdd->kobj_stats);
+                       kobject_put(pdd->kobj_stats);
+                       pdd->kobj_stats = NULL;
                }
 
                kobject_del(p->kobj);
@@ -833,8 +996,6 @@ static void kfd_process_wq_release(struct work_struct *work)
        kfd_event_free_process(p);
 
        kfd_pasid_free(p->pasid);
-       kfd_free_process_doorbells(p);
-
        mutex_destroy(&p->mutex);
 
        put_task_struct(p->lead_thread);
@@ -1012,9 +1173,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        if (process->pasid == 0)
                goto err_alloc_pasid;
 
-       if (kfd_alloc_process_doorbells(process) < 0)
-               goto err_alloc_doorbells;
-
        err = pqm_init(&process->pqm, process);
        if (err != 0)
                goto err_process_pqm_init;
@@ -1042,8 +1200,6 @@ err_register_notifier:
 err_init_apertures:
        pqm_uninit(&process->pqm);
 err_process_pqm_init:
-       kfd_free_process_doorbells(process);
-err_alloc_doorbells:
        kfd_pasid_free(process->pasid);
 err_alloc_pasid:
        mutex_destroy(&process->mutex);
@@ -1106,10 +1262,14 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        if (!pdd)
                return NULL;
 
+       if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+               pr_err("Failed to alloc doorbell for pdd\n");
+               goto err_free_pdd;
+       }
+
        if (init_doorbell_bitmap(&pdd->qpd, dev)) {
                pr_err("Failed to init doorbell for process\n");
-               kfree(pdd);
-               return NULL;
+               goto err_free_pdd;
        }
 
        pdd->dev = dev;
@@ -1125,12 +1285,17 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->runtime_inuse = false;
        pdd->vram_usage = 0;
        pdd->sdma_past_activity_counter = 0;
+       atomic64_set(&pdd->evict_duration_counter, 0);
        list_add(&pdd->per_device_list, &p->per_device_data);
 
        /* Init idr used for memory handle translation */
        idr_init(&pdd->alloc_idr);
 
        return pdd;
+
+err_free_pdd:
+       kfree(pdd);
+       return NULL;
 }
 
 /**
@@ -1488,6 +1653,7 @@ void kfd_suspend_all_processes(void)
        unsigned int temp;
        int idx = srcu_read_lock(&kfd_processes_srcu);
 
+       WARN(debug_evictions, "Evicting all processes");
        hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
                cancel_delayed_work_sync(&p->eviction_work);
                cancel_delayed_work_sync(&p->restore_work);
index b24c14b..bb1bc7f 100644 (file)
@@ -228,17 +228,14 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
                return 0;
        else {
                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
-               struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
-                               acrtc->base.state);
 
-
-               if (acrtc_state->stream == NULL) {
+               if (acrtc->dm_irq_params.stream == NULL) {
                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
                                  crtc);
                        return 0;
                }
 
-               return dc_stream_get_vblank_counter(acrtc_state->stream);
+               return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
        }
 }
 
@@ -251,10 +248,8 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                return -EINVAL;
        else {
                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
-               struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
-                                               acrtc->base.state);
 
-               if (acrtc_state->stream ==  NULL) {
+               if (acrtc->dm_irq_params.stream ==  NULL) {
                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
                                  crtc);
                        return 0;
@@ -264,7 +259,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                 * TODO rework base driver to use values directly.
                 * for now parse it back into reg-format
                 */
-               dc_stream_get_scanoutpos(acrtc_state->stream,
+               dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
                                         &v_blank_start,
                                         &v_blank_end,
                                         &h_position,
@@ -323,6 +318,14 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
        return NULL;
 }
 
+static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
+{
+       return acrtc->dm_irq_params.freesync_config.state ==
+                      VRR_STATE_ACTIVE_VARIABLE ||
+              acrtc->dm_irq_params.freesync_config.state ==
+                      VRR_STATE_ACTIVE_FIXED;
+}
+
 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
 {
        return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
@@ -343,7 +346,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
        struct amdgpu_device *adev = irq_params->adev;
        unsigned long flags;
        struct drm_pending_vblank_event *e;
-       struct dm_crtc_state *acrtc_state;
        uint32_t vpos, hpos, v_blank_start, v_blank_end;
        bool vrr_active;
 
@@ -375,12 +377,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
        if (!e)
                WARN_ON(1);
 
-       acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
-       vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+       vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
 
        /* Fixed refresh rate, or VRR scanout position outside front-porch? */
        if (!vrr_active ||
-           !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+           !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
                                      &v_blank_end, &hpos, &vpos) ||
            (vpos < v_blank_start)) {
                /* Update to correct count and vblank timestamp if racing with
@@ -425,7 +426,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
         * of pageflip completion, so last_flip_vblank is the forbidden count
         * for queueing new pageflips if vsync + VRR is enabled.
         */
-       amdgpu_crtc->last_flip_vblank =
+       amdgpu_crtc->dm_irq_params.last_flip_vblank =
                amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
 
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
@@ -441,17 +442,17 @@ static void dm_vupdate_high_irq(void *interrupt_params)
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
-       struct dm_crtc_state *acrtc_state;
        unsigned long flags;
+       int vrr_active;
 
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
 
        if (acrtc) {
-               acrtc_state = to_dm_crtc_state(acrtc->base.state);
+               vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 
                DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
                              acrtc->crtc_id,
-                             amdgpu_dm_vrr_active(acrtc_state));
+                             vrr_active);
 
                /* Core vblank handling is done here after end of front-porch in
                 * vrr mode, as vblank timestamping will give valid results
@@ -459,22 +460,22 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                 * page-flip completion events that have been queued to us
                 * if a pageflip happened inside front-porch.
                 */
-               if (amdgpu_dm_vrr_active(acrtc_state)) {
+               if (vrr_active) {
                        drm_crtc_handle_vblank(&acrtc->base);
 
                        /* BTR processing for pre-DCE12 ASICs */
-                       if (acrtc_state->stream &&
+                       if (acrtc->dm_irq_params.stream &&
                            adev->family < AMDGPU_FAMILY_AI) {
                                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                mod_freesync_handle_v_update(
                                    adev->dm.freesync_module,
-                                   acrtc_state->stream,
-                                   &acrtc_state->vrr_params);
+                                   acrtc->dm_irq_params.stream,
+                                   &acrtc->dm_irq_params.vrr_params);
 
                                dc_stream_adjust_vmin_vmax(
                                    adev->dm.dc,
-                                   acrtc_state->stream,
-                                   &acrtc_state->vrr_params.adjust);
+                                   acrtc->dm_irq_params.stream,
+                                   &acrtc->dm_irq_params.vrr_params.adjust);
                                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                        }
                }
@@ -493,18 +494,17 @@ static void dm_crtc_high_irq(void *interrupt_params)
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
-       struct dm_crtc_state *acrtc_state;
        unsigned long flags;
+       int vrr_active;
 
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
        if (!acrtc)
                return;
 
-       acrtc_state = to_dm_crtc_state(acrtc->base.state);
+       vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 
        DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
-                        amdgpu_dm_vrr_active(acrtc_state),
-                        acrtc_state->active_planes);
+                     vrr_active, acrtc->dm_irq_params.active_planes);
 
        /**
         * Core vblank handling at start of front-porch is only possible
@@ -512,7 +512,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
         * valid results while done in front-porch. Otherwise defer it
         * to dm_vupdate_high_irq after end of front-porch.
         */
-       if (!amdgpu_dm_vrr_active(acrtc_state))
+       if (!vrr_active)
                drm_crtc_handle_vblank(&acrtc->base);
 
        /**
@@ -527,14 +527,16 @@ static void dm_crtc_high_irq(void *interrupt_params)
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
-       if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
-           acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+       if (acrtc->dm_irq_params.stream &&
+           acrtc->dm_irq_params.vrr_params.supported &&
+           acrtc->dm_irq_params.freesync_config.state ==
+                   VRR_STATE_ACTIVE_VARIABLE) {
                mod_freesync_handle_v_update(adev->dm.freesync_module,
-                                            acrtc_state->stream,
-                                            &acrtc_state->vrr_params);
+                                            acrtc->dm_irq_params.stream,
+                                            &acrtc->dm_irq_params.vrr_params);
 
-               dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
-                                          &acrtc_state->vrr_params.adjust);
+               dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+                                          &acrtc->dm_irq_params.vrr_params.adjust);
        }
 
        /*
@@ -549,7 +551,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
         */
        if (adev->family >= AMDGPU_FAMILY_RV &&
            acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
-           acrtc_state->active_planes == 0) {
+           acrtc->dm_irq_params.active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
                        acrtc->event = NULL;
@@ -878,6 +880,45 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        return 0;
 }
 
+static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
+                                                          struct drm_atomic_state *state)
+{
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct amdgpu_dm_connector *amdgpu_dm_connector;
+       struct drm_connector_state *conn_state;
+       struct dm_crtc_state *acrtc_state;
+       struct drm_crtc_state *crtc_state;
+       struct dc_stream_state *stream;
+       struct drm_device *dev = adev_to_drm(adev);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+               amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+               conn_state = connector->state;
+
+               if (!(conn_state && conn_state->crtc))
+                       continue;
+
+               crtc = conn_state->crtc;
+               acrtc_state = to_dm_crtc_state(crtc->state);
+
+               if (!(acrtc_state && acrtc_state->stream))
+                       continue;
+
+               stream = acrtc_state->stream;
+
+               if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
+                   amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
+                   amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
+                   amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
+                       conn_state = drm_atomic_get_connector_state(state, connector);
+                       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+                       crtc_state->mode_changed = true;
+               }
+       }
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1425,9 +1466,6 @@ static int dm_late_init(void *handle)
        struct dmcu *dmcu = NULL;
        bool ret = true;
 
-       if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
-               return detect_mst_link_for_all_connectors(adev_to_drm(adev));
-
        dmcu = adev->dm.dc->res_pool->dmcu;
 
        for (i = 0; i < 16; i++)
@@ -3373,9 +3411,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                goto fail;
        }
 
-       /* No userspace support. */
-       dm->dc->debug.disable_tri_buf = true;
-
        return 0;
 fail:
        kfree(aencoder);
@@ -4689,9 +4724,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                                                             dc_link_get_link_cap(aconnector->dc_link));
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (dsc_caps.is_dsc_supported) {
+               if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
                        /* Set DSC policy according to dsc_clock_en */
-                       dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
+                       dc_dsc_policy_set_enable_dsc_when_not_needed(
+                               aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
 
                        if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                  &dsc_caps,
@@ -4701,16 +4737,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                                                  &stream->timing.dsc_cfg))
                                stream->timing.flags.DSC = 1;
                        /* Overwrite the stream flag if DSC is enabled through debugfs */
-                       if (aconnector->dsc_settings.dsc_clock_en)
+                       if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
                                stream->timing.flags.DSC = 1;
 
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
-                               stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
-                                                                       aconnector->dsc_settings.dsc_slice_width);
+                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+                               stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
 
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
-                               stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
-                                                                       aconnector->dsc_settings.dsc_slice_height);
+                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+                               stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
 
                        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
                                stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
@@ -4809,7 +4843,6 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
        }
 
        state->active_planes = cur->active_planes;
-       state->vrr_params = cur->vrr_params;
        state->vrr_infopacket = cur->vrr_infopacket;
        state->abm_level = cur->abm_level;
        state->vrr_supported = cur->vrr_supported;
@@ -5427,19 +5460,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
 {
 }
 
-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = new_crtc_state->crtc->dev;
-       struct drm_plane *plane;
-
-       drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
-               if (plane->type == DRM_PLANE_TYPE_CURSOR)
-                       return true;
-       }
-
-       return false;
-}
-
 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
 {
        struct drm_atomic_state *state = new_crtc_state->state;
@@ -5503,19 +5523,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
                return ret;
        }
 
-       /* In some use cases, like reset, no stream is attached */
-       if (!dm_crtc_state->stream)
-               return 0;
-
        /*
-        * We want at least one hardware plane enabled to use
-        * the stream with a cursor enabled.
+        * We require the primary plane to be enabled whenever the CRTC is, otherwise
+        * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
+        * planes are disabled, which is not supported by the hardware. And there is legacy
+        * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
         */
-       if (state->enable && state->active &&
-           does_crtc_have_active_cursor(state) &&
-           dm_crtc_state->active_planes == 0)
+       if (state->enable &&
+           !(state->plane_mask & drm_plane_mask(crtc->primary)))
                return -EINVAL;
 
+       /* In some use cases, like reset, no stream is attached */
+       if (!dm_crtc_state->stream)
+               return 0;
+
        if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
                return 0;
 
@@ -6862,6 +6883,7 @@ static void update_freesync_state_on_stream(
        struct mod_vrr_params vrr_params;
        struct dc_info_packet vrr_infopacket = {0};
        struct amdgpu_device *adev = dm->adev;
+       struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
 
        if (!new_stream)
@@ -6876,7 +6898,7 @@ static void update_freesync_state_on_stream(
                return;
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
-       vrr_params = new_crtc_state->vrr_params;
+        vrr_params = acrtc->dm_irq_params.vrr_params;
 
        if (surface) {
                mod_freesync_handle_preflip(
@@ -6907,7 +6929,7 @@ static void update_freesync_state_on_stream(
                &vrr_infopacket);
 
        new_crtc_state->freesync_timing_changed |=
-               (memcmp(&new_crtc_state->vrr_params.adjust,
+               (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
                        &vrr_params.adjust,
                        sizeof(vrr_params.adjust)) != 0);
 
@@ -6916,10 +6938,10 @@ static void update_freesync_state_on_stream(
                        &vrr_infopacket,
                        sizeof(vrr_infopacket)) != 0);
 
-       new_crtc_state->vrr_params = vrr_params;
+       acrtc->dm_irq_params.vrr_params = vrr_params;
        new_crtc_state->vrr_infopacket = vrr_infopacket;
 
-       new_stream->adjust = new_crtc_state->vrr_params.adjust;
+       new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
        new_stream->vrr_infopacket = vrr_infopacket;
 
        if (new_crtc_state->freesync_vrr_info_changed)
@@ -6931,7 +6953,7 @@ static void update_freesync_state_on_stream(
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
-static void pre_update_freesync_state_on_stream(
+static void update_stream_irq_parameters(
        struct amdgpu_display_manager *dm,
        struct dm_crtc_state *new_crtc_state)
 {
@@ -6939,6 +6961,7 @@ static void pre_update_freesync_state_on_stream(
        struct mod_vrr_params vrr_params;
        struct mod_freesync_config config = new_crtc_state->freesync_config;
        struct amdgpu_device *adev = dm->adev;
+       struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
 
        if (!new_stream)
@@ -6952,7 +6975,7 @@ static void pre_update_freesync_state_on_stream(
                return;
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
-       vrr_params = new_crtc_state->vrr_params;
+       vrr_params = acrtc->dm_irq_params.vrr_params;
 
        if (new_crtc_state->vrr_supported &&
            config.min_refresh_in_uhz &&
@@ -6969,11 +6992,14 @@ static void pre_update_freesync_state_on_stream(
                                      &config, &vrr_params);
 
        new_crtc_state->freesync_timing_changed |=
-               (memcmp(&new_crtc_state->vrr_params.adjust,
-                       &vrr_params.adjust,
-                       sizeof(vrr_params.adjust)) != 0);
+               (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
+                       &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
 
-       new_crtc_state->vrr_params = vrr_params;
+       new_crtc_state->freesync_config = config;
+       /* Copy state for access from DM IRQ handler */
+       acrtc->dm_irq_params.freesync_config = config;
+       acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+       acrtc->dm_irq_params.vrr_params = vrr_params;
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
@@ -7197,7 +7223,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                         * on late submission of flips.
                         */
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
-                       last_flip_vblank = acrtc_attach->last_flip_vblank;
+                       last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
                }
 
@@ -7281,7 +7307,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
                        dc_stream_adjust_vmin_vmax(
                                dm->dc, acrtc_state->stream,
-                               &acrtc_state->vrr_params.adjust);
+                               &acrtc_attach->dm_irq_params.vrr_params.adjust);
                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
                }
                mutex_lock(&dm->dc_lock);
@@ -7431,34 +7457,6 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
                                   struct drm_atomic_state *state,
                                   bool nonblock)
 {
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       int i;
-
-       /*
-        * We evade vblank and pflip interrupts on CRTCs that are undergoing
-        * a modeset, being disabled, or have no active planes.
-        *
-        * It's done in atomic commit rather than commit tail for now since
-        * some of these interrupt handlers access the current CRTC state and
-        * potentially the stream pointer itself.
-        *
-        * Since the atomic state is swapped within atomic commit and not within
-        * commit tail this would leave to new state (that hasn't been committed yet)
-        * being accesssed from within the handlers.
-        *
-        * TODO: Fix this so we can do this in commit tail and not have to block
-        * in atomic check.
-        */
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
-               if (old_crtc_state->active &&
-                   (!new_crtc_state->active ||
-                    drm_atomic_crtc_needs_modeset(new_crtc_state)))
-                       manage_dm_interrupts(adev, acrtc, false);
-       }
        /*
         * Add check here for SoC's that support hardware cursor plane, to
         * unset legacy_cursor_update
@@ -7509,6 +7507,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dc_resource_state_copy_construct_current(dm->dc, dc_state);
        }
 
+       for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
+                                      new_crtc_state, i) {
+               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+               dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+               if (old_crtc_state->active &&
+                   (!new_crtc_state->active ||
+                    drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+                       manage_dm_interrupts(adev, acrtc, false);
+                       dc_stream_release(dm_old_crtc_state->stream);
+               }
+       }
+
        /* update changed items */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -7604,7 +7616,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        if (!status)
                                status = dc_stream_get_status_from_state(dc_state,
                                                                         dm_new_crtc_state->stream);
-
                        if (!status)
                                DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
                        else
@@ -7730,8 +7741,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
-               /* Update freesync active state. */
-               pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
+               /* For freesync config update on crtc state and params for irq */
+               update_stream_irq_parameters(dm, dm_new_crtc_state);
 
                /* Handle vrr on->off / off->on transitions */
                amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
@@ -7747,10 +7758,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
+               dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
                if (new_crtc_state->active &&
                    (!old_crtc_state->active ||
                     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+                       dc_stream_retain(dm_new_crtc_state->stream);
+                       acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
                        manage_dm_interrupts(adev, acrtc, true);
+
 #ifdef CONFIG_DEBUG_FS
                        /**
                         * Frontend may have changed so reapply the CRC capture
@@ -7994,8 +8010,6 @@ static void reset_freesync_config_for_crtc(
 {
        new_crtc_state->vrr_supported = false;
 
-       memset(&new_crtc_state->vrr_params, 0,
-              sizeof(new_crtc_state->vrr_params));
        memset(&new_crtc_state->vrr_infopacket, 0,
               sizeof(new_crtc_state->vrr_infopacket));
 }
@@ -8566,6 +8580,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        int ret, i;
        bool lock_and_validation_needed = false;
 
+       amdgpu_check_debugfs_connector_property_change(adev, state);
+
        ret = drm_atomic_helper_check_modeset(dev, state);
        if (ret)
                goto fail;
index a7856ae..34f6369 100644 (file)
@@ -149,6 +149,8 @@ struct amdgpu_dm_backlight_caps {
  * @cached_state: Caches device atomic state for suspend/resume
  * @cached_dc_state: Cached state of content streams
  * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ * @force_timing_sync: set via debugfs. When set, indicates that all connected
+ *                    displays will be forced to synchronize.
  */
 struct amdgpu_display_manager {
 
@@ -340,13 +342,19 @@ struct amdgpu_display_manager {
         * fake encoders used for DP MST.
         */
        struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
-        bool force_timing_sync;
+       bool force_timing_sync;
+};
+
+enum dsc_clock_force_state {
+       DSC_CLK_FORCE_DEFAULT = 0,
+       DSC_CLK_FORCE_ENABLE,
+       DSC_CLK_FORCE_DISABLE,
 };
 
 struct dsc_preferred_settings {
-       bool dsc_clock_en;
-       uint32_t dsc_slice_width;
-       uint32_t dsc_slice_height;
+       enum dsc_clock_force_state dsc_force_enable;
+       uint32_t dsc_num_slices_v;
+       uint32_t dsc_num_slices_h;
        uint32_t dsc_bits_per_pixel;
 };
 
@@ -434,7 +442,6 @@ struct dm_crtc_state {
 
        bool vrr_supported;
        struct mod_freesync_config freesync_config;
-       struct mod_vrr_params vrr_params;
        struct dc_info_packet vrr_infopacket;
 
        int abm_level;
index 94fcb08..8cd646e 100644 (file)
@@ -111,7 +111,6 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
 
        if (*param_nums > max_param_num)
                *param_nums = max_param_num;
-;
 
        wr_buf_ptr = wr_buf; /* reset buf pointer */
        wr_buf_count = 0; /* number of char already checked */
@@ -265,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        if (!wr_buf)
                return -ENOSPC;
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                           (long *)param, buf,
                                           max_param_num,
                                           &param_nums)) {
@@ -424,7 +423,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
        if (!wr_buf)
                return -ENOSPC;
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                           (long *)param, buf,
                                           max_param_num,
                                           &param_nums)) {
@@ -576,7 +575,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
        if (!wr_buf)
                return -ENOSPC;
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                           (long *)param, buf,
                                           max_param_num,
                                           &param_nums)) {
@@ -909,7 +908,7 @@ static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
        if (size < sizeof(connector->debugfs_dpcd_address))
-               return 0;
+               return -EINVAL;
 
        r = copy_from_user(&connector->debugfs_dpcd_address,
                        buf, sizeof(connector->debugfs_dpcd_address));
@@ -924,7 +923,7 @@ static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
        if (size < sizeof(connector->debugfs_dpcd_size))
-               return 0;
+               return -EINVAL;
 
        r = copy_from_user(&connector->debugfs_dpcd_size,
                        buf, sizeof(connector->debugfs_dpcd_size));
@@ -944,8 +943,8 @@ static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
        struct dc_link *link = connector->dc_link;
        uint32_t write_size = connector->debugfs_dpcd_size;
 
-       if (size < write_size)
-               return 0;
+       if (!write_size || size < write_size)
+               return -EINVAL;
 
        data = kzalloc(write_size, GFP_KERNEL);
        if (!data)
@@ -968,7 +967,7 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
        struct dc_link *link = connector->dc_link;
        uint32_t read_size = connector->debugfs_dpcd_size;
 
-       if (size < read_size)
+       if (!read_size || size < read_size)
                return 0;
 
        data = kzalloc(read_size, GFP_KERNEL);
@@ -1059,12 +1058,17 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
  *
  *     echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
  *
+ * This function can perform HPD unplug:
+ *
+ *     echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
+ *
  */
 static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
                                                        size_t size, loff_t *pos)
 {
        struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
        struct drm_connector *connector = &aconnector->base;
+       struct dc_link *link = NULL;
        struct drm_device *dev = connector->dev;
        enum dc_connection_type new_connection_type = dc_connection_none;
        char *wr_buf = NULL;
@@ -1086,11 +1090,13 @@ static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
                return -ENOSPC;
        }
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                                (long *)param, buf,
                                                max_param_num,
-                                               &param_nums))
+                                               &param_nums)) {
+               kfree(wr_buf);
                return -EINVAL;
+       }
 
        if (param_nums <= 0) {
                DRM_DEBUG_DRIVER("user data not be read\n");
@@ -1115,11 +1121,33 @@ static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
                drm_modeset_unlock_all(dev);
 
                drm_kms_helper_hotplug_event(dev);
+       } else if (param[0] == 0) {
+               if (!aconnector->dc_link)
+                       goto unlock;
 
-unlock:
-               mutex_unlock(&aconnector->hpd_lock);
+               link = aconnector->dc_link;
+
+               if (link->local_sink) {
+                       dc_sink_release(link->local_sink);
+                       link->local_sink = NULL;
+               }
+
+               link->dpcd_sink_count = 0;
+               link->type = dc_connection_none;
+               link->dongle_max_pix_clk = 0;
+
+               amdgpu_dm_update_connector_after_detect(aconnector);
+
+               drm_modeset_lock_all(dev);
+               dm_restore_drm_connector_state(dev, connector);
+               drm_modeset_unlock_all(dev);
+
+               drm_kms_helper_hotplug_event(dev);
        }
 
+unlock:
+       mutex_unlock(&aconnector->hpd_lock);
+
        kfree(wr_buf);
        return size;
 }
@@ -1200,9 +1228,14 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
  *
  * The write function: dp_dsc_clock_en_write
  * enables to force DSC on the connector.
- * User can write to either force enable DSC
+ * User can write to either force enable or force disable DSC
  * on the next modeset or set it to driver default
  *
+ * Accepted inputs:
+ * 0 - default DSC enablement policy
+ * 1 - force enable DSC on the connector
+ * 2 - force disable DSC on the connector (might cause fail in atomic_check)
+ *
  * Writing DSC settings is done with the following command:
  * - To force enable DSC (you need to specify
  * connector like DP-1):
@@ -1238,7 +1271,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
                return -ENOSPC;
        }
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                            (long *)param, buf,
                                            max_param_num,
                                            &param_nums)) {
@@ -1262,7 +1295,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
        if (!pipe_ctx || !pipe_ctx->stream)
                goto done;
 
-       aconnector->dsc_settings.dsc_clock_en = param[0];
+       if (param[0] == 1)
+               aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
+       else if (param[0] == 2)
+               aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
+       else
+               aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
 
 done:
        kfree(wr_buf);
@@ -1387,7 +1425,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
                return -ENOSPC;
        }
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                            (long *)param, buf,
                                            max_param_num,
                                            &param_nums)) {
@@ -1411,7 +1449,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
        if (!pipe_ctx || !pipe_ctx->stream)
                goto done;
 
-       aconnector->dsc_settings.dsc_slice_width = param[0];
+       if (param[0] > 0)
+               aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
+                                       pipe_ctx->stream->timing.h_addressable,
+                                       param[0]);
+       else
+               aconnector->dsc_settings.dsc_num_slices_h = 0;
 
 done:
        kfree(wr_buf);
@@ -1536,7 +1579,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
                return -ENOSPC;
        }
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                            (long *)param, buf,
                                            max_param_num,
                                            &param_nums)) {
@@ -1560,7 +1603,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
        if (!pipe_ctx || !pipe_ctx->stream)
                goto done;
 
-       aconnector->dsc_settings.dsc_slice_height = param[0];
+       if (param[0] > 0)
+               aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
+                                       pipe_ctx->stream->timing.v_addressable,
+                                       param[0]);
+       else
+               aconnector->dsc_settings.dsc_num_slices_v = 0;
 
 done:
        kfree(wr_buf);
@@ -1678,7 +1726,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
                return -ENOSPC;
        }
 
-       if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+       if (parse_write_buffer_into_params(wr_buf, size,
                                            (long *)param, buf,
                                            max_param_num,
                                            &param_nums)) {
@@ -2098,6 +2146,7 @@ static const struct {
        const struct file_operations *fops;
 } dp_debugfs_entries[] = {
                {"link_settings", &dp_link_settings_debugfs_fops},
+               {"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
                {"test_pattern", &dp_phy_test_pattern_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
index 694c5bc..c2cd184 100644 (file)
@@ -604,7 +604,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
        int i = 0;
 
        hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
-       if (hdcp_work == NULL)
+       if (ZERO_OR_NULL_PTR(hdcp_work))
                return NULL;
 
        hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
new file mode 100644 (file)
index 0000000..45825a3
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_PARAMS_H__
+#define __AMDGPU_DM_IRQ_PARAMS_H__
+
+struct dm_irq_params {
+       u32 last_flip_vblank;
+       struct mod_vrr_params vrr_params;
+       struct dc_stream_state *stream;
+       int active_planes;
+       struct mod_freesync_config freesync_config;
+};
+
+#endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */
index adbb448..eee19ed 100644 (file)
@@ -159,7 +159,20 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        u8 dsc_caps[16] = { 0 };
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+#if defined(CONFIG_HP_HOOK_WORKAROUND)
+       /*
+        * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
+        * because it only check the dsc/fec caps of the "port variable" and not the dock
+        *
+        * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
+        *
+        * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
+        *
+        */
 
+       if (!aconnector->dsc_aux && !port->parent->port_parent)
+               aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+#endif
        if (!aconnector->dsc_aux)
                return false;
 
@@ -453,9 +466,9 @@ struct dsc_mst_fairness_params {
        struct dc_dsc_bw_range bw_range;
        bool compression_possible;
        struct drm_dp_mst_port *port;
-       bool clock_overwrite;
-       uint32_t slice_width_overwrite;
-       uint32_t slice_height_overwrite;
+       enum dsc_clock_force_state clock_force_enable;
+       uint32_t num_slices_h;
+       uint32_t num_slices_v;
        uint32_t bpp_overwrite;
 };
 
@@ -496,15 +509,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
                        else
                                params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
 
-                       if (params[i].slice_width_overwrite)
-                               params[i].timing->dsc_cfg.num_slices_h = DIV_ROUND_UP(
-                                                                               params[i].timing->h_addressable,
-                                                                               params[i].slice_width_overwrite);
+                       if (params[i].num_slices_h)
+                               params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
 
-                       if (params[i].slice_height_overwrite)
-                               params[i].timing->dsc_cfg.num_slices_v = DIV_ROUND_UP(
-                                                                               params[i].timing->v_addressable,
-                                                                               params[i].slice_height_overwrite);
+                       if (params[i].num_slices_v)
+                               params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
                } else {
                        params[i].timing->flags.DSC = 0;
                }
@@ -638,7 +647,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
        for (i = 0; i < count; i++) {
                if (vars[i].dsc_enabled
                                && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
-                               && !params[i].clock_overwrite) {
+                               && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
                        kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
                        tried[i] = false;
                        remaining_to_try += 1;
@@ -718,11 +727,11 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                params[count].sink = stream->sink;
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
                params[count].port = aconnector->port;
-               params[count].clock_overwrite = aconnector->dsc_settings.dsc_clock_en;
-               if (params[count].clock_overwrite)
+               params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
+               if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
                        debugfs_overwrite = true;
-               params[count].slice_width_overwrite = aconnector->dsc_settings.dsc_slice_width;
-               params[count].slice_height_overwrite = aconnector->dsc_settings.dsc_slice_height;
+               params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+               params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
                params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
                params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
                dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
@@ -756,7 +765,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 
        /* Try max compression */
        for (i = 0; i < count; i++) {
-               if (params[i].compression_possible) {
+               if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
                        vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
                        vars[i].dsc_enabled = true;
                        vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
index c5f2216..6e575ff 100644 (file)
@@ -592,9 +592,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
        if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
                pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
                                                           &wm_with_clock_ranges);
-       else if (adev->smu.ppt_funcs)
-               smu_set_watermarks_for_clock_ranges(&adev->smu,
-                               &wm_with_clock_ranges);
 }
 
 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -667,49 +664,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
-       struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
-                       wm_with_clock_ranges.wm_dmif_clocks_ranges;
-       struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
-                       wm_with_clock_ranges.wm_mcif_clocks_ranges;
-       int32_t i;
 
-       wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
-       wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
-
-       for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
-               if (ranges->reader_wm_sets[i].wm_inst > 3)
-                       wm_dce_clocks[i].wm_set_id = WM_SET_A;
-               else
-                       wm_dce_clocks[i].wm_set_id =
-                                       ranges->reader_wm_sets[i].wm_inst;
-               wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
-                       ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
-               wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
-                       ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
-               wm_dce_clocks[i].wm_max_mem_clk_in_khz =
-                       ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
-               wm_dce_clocks[i].wm_min_mem_clk_in_khz =
-                       ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
-       }
-
-       for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
-               if (ranges->writer_wm_sets[i].wm_inst > 3)
-                       wm_soc_clocks[i].wm_set_id = WM_SET_A;
-               else
-                       wm_soc_clocks[i].wm_set_id =
-                                       ranges->writer_wm_sets[i].wm_inst;
-               wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
-                       ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
-               wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
-                       ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
-               wm_soc_clocks[i].wm_max_mem_clk_in_khz =
-                       ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
-               wm_soc_clocks[i].wm_min_mem_clk_in_khz =
-                       ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
-       }
-
-       smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
 
        return PP_SMU_RESULT_OK;
 }
@@ -810,7 +766,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 }
 
 static enum pp_smu_status pp_nv_set_pstate_handshake_support(
-       struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
+       struct pp_smu *pp, bool pstate_handshake_supported)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
@@ -920,60 +876,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-       struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
-       struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
-                       wm_with_clock_ranges.wm_dmif_clocks_ranges;
-       struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
-                       wm_with_clock_ranges.wm_mcif_clocks_ranges;
-       int32_t i;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
-
-       wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
-       wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
-
-       for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
-               if (ranges->reader_wm_sets[i].wm_inst > 3)
-                       wm_dce_clocks[i].wm_set_id = WM_SET_A;
-               else
-                       wm_dce_clocks[i].wm_set_id =
-                                       ranges->reader_wm_sets[i].wm_inst;
-
-               wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
-                       ranges->reader_wm_sets[i].min_drain_clk_mhz;
-
-               wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
-                       ranges->reader_wm_sets[i].max_drain_clk_mhz;
-
-               wm_dce_clocks[i].wm_min_mem_clk_in_khz =
-                       ranges->reader_wm_sets[i].min_fill_clk_mhz;
-
-               wm_dce_clocks[i].wm_max_mem_clk_in_khz =
-                       ranges->reader_wm_sets[i].max_fill_clk_mhz;
-       }
-
-       for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
-               if (ranges->writer_wm_sets[i].wm_inst > 3)
-                       wm_soc_clocks[i].wm_set_id = WM_SET_A;
-               else
-                       wm_soc_clocks[i].wm_set_id =
-                                       ranges->writer_wm_sets[i].wm_inst;
-               wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
-                               ranges->writer_wm_sets[i].min_fill_clk_mhz;
-
-               wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
-                       ranges->writer_wm_sets[i].max_fill_clk_mhz;
-
-               wm_soc_clocks[i].wm_min_mem_clk_in_khz =
-                       ranges->writer_wm_sets[i].min_drain_clk_mhz;
-
-               wm_soc_clocks[i].wm_max_mem_clk_in_khz =
-                       ranges->writer_wm_sets[i].max_drain_clk_mhz;
-       }
 
-       smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
 
        return PP_SMU_RESULT_OK;
 }
index 2d5c7da..29d64e7 100644 (file)
@@ -847,6 +847,73 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
        return result;
 }
 
+static enum bp_result get_soc_bb_info_v4_4(
+       struct bios_parser *bp,
+       struct bp_soc_bb_info *soc_bb_info)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
+
+       if (!soc_bb_info)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       if (!DATA_TABLES(smu_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl =  GET_IMAGE(struct atom_display_controller_info_v4_4,
+                                                       DATA_TABLES(dce_info));
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat;
+       soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat;
+       soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat;
+
+       return result;
+}
+
+static enum bp_result bios_parser_get_soc_bb_info(
+       struct dc_bios *dcb,
+       struct bp_soc_bb_info *soc_bb_info)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_UNSUPPORTED;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+
+       if (!soc_bb_info) /* check for bad input */
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_UNSUPPORTED;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                               DATA_TABLES(dce_info));
+       get_atom_data_table_revision(header, &tbl_revision);
+
+       switch (tbl_revision.major) {
+       case 4:
+               switch (tbl_revision.minor) {
+               case 1:
+               case 2:
+               case 3:
+                       break;
+               case 4:
+                       result = get_soc_bb_info_v4_4(bp, soc_bb_info);
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
 static enum bp_result get_embedded_panel_info_v2_1(
                struct bios_parser *bp,
                struct embedded_panel_info *info)
@@ -2222,7 +2289,9 @@ static const struct dc_vbios_funcs vbios_funcs = {
 
        .get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
 
-       .enable_lvtma_control = bios_parser_enable_lvtma_control
+       .enable_lvtma_control = bios_parser_enable_lvtma_control,
+
+       .get_soc_bb_info = bios_parser_get_soc_bb_info,
 };
 
 static bool bios_parser2_construct(
index d031bd3..807dca8 100644 (file)
@@ -79,8 +79,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
        memset(&dce_clk_params, 0, sizeof(dce_clk_params));
 
        /* Make sure requested clock isn't lower than minimum threshold*/
-       if (requested_clk_khz > 0)
-               requested_clk_khz = max(requested_clk_khz,
+       requested_clk_khz = max(requested_clk_khz,
                                clk_mgr_dce->base.dentist_vco_freq_khz / 62);
 
        dce_clk_params.target_clock_frequency = requested_clk_khz;
index c11c6b3..0267644 100644 (file)
@@ -80,7 +80,7 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
 /* ClocksStatePerformance */
 { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
 
-int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        int dprefclk_wdivider;
index 543afa3..2f8fee0 100644 (file)
@@ -761,6 +761,7 @@ void rn_clk_mgr_construct(
 {
        struct dc_debug_options *debug = &ctx->dc->debug;
        struct dpm_clocks clock_table = { 0 };
+       enum pp_smu_status status = 0;
 
        clk_mgr->base.ctx = ctx;
        clk_mgr->base.funcs = &dcn21_funcs;
@@ -783,7 +784,6 @@ void rn_clk_mgr_construct(
        } else {
                struct clk_log_info log_info = {0};
 
-               clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
                clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
 
                /* SMU Version 55.51.0 and up no longer have an issue
@@ -818,8 +818,10 @@ void rn_clk_mgr_construct(
        clk_mgr->base.bw_params = &rn_bw_params;
 
        if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
-               pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
-               if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+               status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
+
+               if (status == PP_SMU_RESULT_OK &&
+                   ctx->dc_bios && ctx->dc_bios->integrated_info) {
                        rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
                }
        }
index dc463d9..1eb29c3 100644 (file)
@@ -735,6 +735,8 @@ static bool dc_construct(struct dc *dc,
        dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
 #endif
 
+       dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
+
        if (dc->res_pool->funcs->update_bw_bounding_box)
                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
 
@@ -842,6 +844,60 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
        dc_release_state(current_ctx);
 }
 
+static void disable_vbios_mode_if_required(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       unsigned int i, j;
+
+       /* check if timing_changed, disable stream*/
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct dc_stream_state *stream = NULL;
+               struct dc_link *link = NULL;
+               struct pipe_ctx *pipe = NULL;
+
+               pipe = &context->res_ctx.pipe_ctx[i];
+               stream = pipe->stream;
+               if (stream == NULL)
+                       continue;
+
+               if (stream->link->local_sink &&
+                       stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+                       link = stream->link;
+               }
+
+               if (link != NULL) {
+                       unsigned int enc_inst, tg_inst = 0;
+                       unsigned int pix_clk_100hz;
+
+                       enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+                       if (enc_inst != ENGINE_ID_UNKNOWN) {
+                               for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+                                       if (dc->res_pool->stream_enc[j]->id == enc_inst) {
+                                               tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
+                                                       dc->res_pool->stream_enc[j]);
+                                               break;
+                                       }
+                               }
+
+                               dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+                                       dc->res_pool->dp_clock_source,
+                                       tg_inst, &pix_clk_100hz);
+
+                               if (link->link_status.link_active) {
+                                       uint32_t requested_pix_clk_100hz =
+                                               pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
+
+                                       if (pix_clk_100hz != requested_pix_clk_100hz) {
+                                               core_link_disable_stream(pipe);
+                                               pipe->stream->dpms_off = false;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
@@ -1246,6 +1302,19 @@ void dc_trigger_sync(struct dc *dc, struct dc_state *context)
        }
 }
 
+static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
+{
+       int i;
+       unsigned int stream_mask = 0;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].stream)
+                       stream_mask |= 1 << i;
+       }
+
+       return stream_mask;
+}
+
 /*
  * Applies given context to HW and copy it into current context.
  * It's up to the user to release the src context afterwards.
@@ -1265,15 +1334,17 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        for (i = 0; i < context->stream_count; i++)
                dc_streams[i] =  context->streams[i];
 
-       if (!dcb->funcs->is_accelerated_mode(dcb))
+       if (!dcb->funcs->is_accelerated_mode(dcb)) {
+               disable_vbios_mode_if_required(dc, context);
                dc->hwss.enable_accelerated_mode(dc, context);
+       }
 
-       for (i = 0; i < context->stream_count; i++) {
+       for (i = 0; i < context->stream_count; i++)
                if (context->streams[i]->apply_seamless_boot_optimization)
                        dc->optimize_seamless_boot_streams++;
-       }
 
-       if (dc->optimize_seamless_boot_streams == 0)
+       if (context->stream_count > dc->optimize_seamless_boot_streams ||
+               context->stream_count == 0)
                dc->hwss.prepare_bandwidth(dc, context);
 
        disable_dangling_plane(dc, context);
@@ -1355,13 +1426,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
-       if (dc->optimize_seamless_boot_streams == 0) {
+       if (context->stream_count > dc->optimize_seamless_boot_streams ||
+               context->stream_count == 0) {
                /* Must wait for no flips to be pending before doing optimize bw */
                wait_for_no_pipes_pending(dc, context);
                /* pplib is notified if disp_num changed */
                dc->hwss.optimize_bandwidth(dc, context);
        }
 
+       context->stream_mask = get_stream_mask(dc, context);
+
+       if (context->stream_mask != dc->current_state->stream_mask)
+               dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
+
        for (i = 0; i < context->stream_count; i++)
                context->streams[i]->mode_changed = false;
 
@@ -1481,13 +1558,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
        return true;
 }
 
-struct dc_state *dc_create_state(struct dc *dc)
+static void init_state(struct dc *dc, struct dc_state *context)
 {
-       struct dc_state *context = kvzalloc(sizeof(struct dc_state),
-                                           GFP_KERNEL);
-
-       if (!context)
-               return NULL;
        /* Each context must have their own instance of VBA and in order to
         * initialize and obtain IP and SOC the base DML instance from DC is
         * initially copied into every context
@@ -1495,6 +1567,17 @@ struct dc_state *dc_create_state(struct dc *dc)
 #ifdef CONFIG_DRM_AMD_DC_DCN
        memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
 #endif
+}
+
+struct dc_state *dc_create_state(struct dc *dc)
+{
+       struct dc_state *context = kzalloc(sizeof(struct dc_state),
+                                          GFP_KERNEL);
+
+       if (!context)
+               return NULL;
+
+       init_state(dc, context);
 
        kref_init(&context->refcount);
 
@@ -2415,8 +2498,7 @@ static void commit_planes_for_stream(struct dc *dc,
                                plane_state->triplebuffer_flips = false;
                                if (update_type == UPDATE_TYPE_FAST &&
                                        dc->hwss.program_triplebuffer != NULL &&
-                                       !plane_state->flip_immediate &&
-                                       !dc->debug.disable_tri_buf) {
+                                       !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
                                                /*triple buffer for VUpdate  only*/
                                                plane_state->triplebuffer_flips = true;
                                }
@@ -2443,8 +2525,7 @@ static void commit_planes_for_stream(struct dc *dc,
 
                        ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
 
-                       if (dc->hwss.program_triplebuffer != NULL &&
-                               !dc->debug.disable_tri_buf) {
+                       if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
                                /*turn off triple buffer for full update*/
                                dc->hwss.program_triplebuffer(
                                        dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
@@ -2509,8 +2590,7 @@ static void commit_planes_for_stream(struct dc *dc,
                                if (pipe_ctx->plane_state != plane_state)
                                        continue;
                                /*program triple buffer after lock based on flip type*/
-                               if (dc->hwss.program_triplebuffer != NULL &&
-                                       !dc->debug.disable_tri_buf) {
+                               if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
                                        /*only enable triplebuffer for  fast_update*/
                                        dc->hwss.program_triplebuffer(
                                                dc, pipe_ctx, plane_state->triplebuffer_flips);
@@ -2965,7 +3045,7 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
                        if (enable && !link->psr_settings.psr_allow_active)
                                return dc_link_set_psr_allow_active(link, true, false);
                        else if (!enable && link->psr_settings.psr_allow_active)
-                               return dc_link_set_psr_allow_active(link, false, false);
+                               return dc_link_set_psr_allow_active(link, false, true);
                }
        }
 
@@ -3018,4 +3098,10 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
                if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
                        core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
 }
+
+bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
+                                                struct dc_plane_state *plane)
+{
+       return false;
+}
 #endif
index c026b39..2a90804 100644 (file)
@@ -177,7 +177,7 @@ static bool is_ycbcr709_limited_type(
                ret = true;
        return ret;
 }
-enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
 {
        enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
 
index 437d1a7..fec87a2 100644 (file)
@@ -2441,7 +2441,7 @@ enum dc_status dc_link_validate_mode_timing(
        /* A hack to avoid failing any modes for EDID override feature on
         * topology change such as lower quality cable for DP or different dongle
         */
-       if (link->remote_sinks[0])
+       if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
                return DC_OK;
 
        /* Passive Dongle */
@@ -2566,7 +2566,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
        link->psr_settings.psr_allow_active = allow_active;
 
        if (psr != NULL && link->psr_settings.psr_feature_enabled)
-               psr->funcs->psr_enable(psr, allow_active);
+               psr->funcs->psr_enable(psr, allow_active, wait);
        else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
        else
@@ -2946,7 +2946,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
        pbn = get_pbn_from_timing(pipe_ctx);
        avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
 
-       stream_encoder->funcs->set_mst_bandwidth(
+       stream_encoder->funcs->set_throttled_vcp_size(
                stream_encoder,
                avg_time_slots_per_mtp);
 
@@ -2974,7 +2974,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
         */
 
        /* slot X.Y */
-       stream_encoder->funcs->set_mst_bandwidth(
+       stream_encoder->funcs->set_throttled_vcp_size(
                stream_encoder,
                avg_time_slots_per_mtp);
 
index b984eec..dec12de 100644 (file)
@@ -148,14 +148,6 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
        return p->payloads.count;
 }
 
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
-{
-       if (!p)
-               return;
-
-       dal_vector_destruct(&p->payloads);
-}
-
 #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
 
 void dal_ddc_i2c_payloads_add(
@@ -582,7 +574,7 @@ bool dal_ddc_service_query_ddc_data(
                                ddc->link,
                                &command);
 
-               dal_ddc_i2c_payloads_destroy(&payloads);
+               dal_vector_destruct(&payloads.payloads);
        }
 
        return success;
index d1d95d3..ff1e996 100644 (file)
@@ -49,14 +49,31 @@ static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b);
 
-static uint32_t get_training_aux_rd_interval(
-       struct dc_link *link,
-       uint32_t default_wait_in_micro_secs)
+static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+               const struct dc_link_settings *link_settings)
 {
        union training_aux_rd_interval training_rd_interval;
+       uint32_t wait_in_micro_secs = 100;
 
        memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+       core_link_read_dpcd(
+                       link,
+                       DP_TRAINING_AUX_RD_INTERVAL,
+                       (uint8_t *)&training_rd_interval,
+                       sizeof(training_rd_interval));
+       if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+               wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+       return wait_in_micro_secs;
+}
 
+static uint32_t get_eq_training_aux_rd_interval(
+       struct dc_link *link,
+       const struct dc_link_settings *link_settings)
+{
+       union training_aux_rd_interval training_rd_interval;
+       uint32_t wait_in_micro_secs = 400;
+
+       memset(&training_rd_interval, 0, sizeof(training_rd_interval));
        /* overwrite the delay if rev > 1.1*/
        if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
                /* DP 1.2 or later - retrieve delay through
@@ -68,10 +85,10 @@ static uint32_t get_training_aux_rd_interval(
                        sizeof(training_rd_interval));
 
                if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
-                       default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+                       wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
        }
 
-       return default_wait_in_micro_secs;
+       return wait_in_micro_secs;
 }
 
 static void wait_for_training_aux_rd_interval(
@@ -101,7 +118,16 @@ static void dpcd_set_training_pattern(
                dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
 }
 
-static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link)
+static enum dc_dp_training_pattern decide_cr_training_pattern(
+               const struct dc_link_settings *link_settings)
+{
+       enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
+
+       return pattern;
+}
+
+static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+               const struct dc_link_settings *link_settings)
 {
        enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
        struct encoder_feature_support *features = &link->link_enc->features;
@@ -132,7 +158,6 @@ static void dpcd_set_link_settings(
 
        union down_spread_ctrl downspread = { {0} };
        union lane_count_set lane_count_set = { {0} };
-       enum dc_dp_training_pattern dp_tr_pattern;
 
        downspread.raw = (uint8_t)
        (lt_settings->link_settings.link_spread);
@@ -143,9 +168,8 @@ static void dpcd_set_link_settings(
        lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
        lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
 
-       dp_tr_pattern = get_supported_tp(link);
 
-       if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) {
+       if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
                lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
                                link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
        }
@@ -373,34 +397,30 @@ static void dpcd_set_lt_pattern_and_lane_settings(
 static bool is_cr_done(enum dc_lane_count ln_count,
        union lane_status *dpcd_lane_status)
 {
-       bool done = true;
        uint32_t lane;
        /*LANEx_CR_DONE bits All 1's?*/
        for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
                if (!dpcd_lane_status[lane].bits.CR_DONE_0)
-                       done = false;
+                       return false;
        }
-       return done;
-
+       return true;
 }
 
 static bool is_ch_eq_done(enum dc_lane_count ln_count,
        union lane_status *dpcd_lane_status,
        union lane_align_status_updated *lane_status_updated)
 {
-       bool done = true;
        uint32_t lane;
        if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
-               done = false;
+               return false;
        else {
                for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
                        if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
                                !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
-                               done = false;
+                               return false;
                }
        }
-       return done;
-
+       return true;
 }
 
 static void update_drive_settings(
@@ -979,7 +999,7 @@ static void start_clock_recovery_pattern_early(struct dc_link *link,
 {
        DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
                        __func__);
-       dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+       dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
        dp_set_hw_lane_settings(link, lt_settings, offset);
        udelay(400);
 }
@@ -994,7 +1014,6 @@ static enum link_training_result perform_clock_recovery_sequence(
        uint32_t wait_time_microsec;
        struct link_training_settings req_settings;
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
-       enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
        union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
        union lane_align_status_updated dpcd_lane_status_updated;
 
@@ -1002,7 +1021,7 @@ static enum link_training_result perform_clock_recovery_sequence(
        retry_count = 0;
 
        if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
-               dp_set_hw_training_pattern(link, tr_pattern, offset);
+               dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
 
        /* najeeb - The synaptics MST hub can put the LT in
        * infinite loop by switching the VS
@@ -1029,7 +1048,7 @@ static enum link_training_result perform_clock_recovery_sequence(
                        dpcd_set_lt_pattern_and_lane_settings(
                                        link,
                                        lt_settings,
-                                       tr_pattern,
+                                       lt_settings->pattern_for_cr,
                                        offset);
                else
                        dpcd_set_lane_settings(
@@ -1113,7 +1132,7 @@ static inline enum link_training_result perform_link_training_int(
         * TPS4 must be used instead of POST_LT_ADJ_REQ.
         */
        if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
-                       get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4)
+                       lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
                return status;
 
        if (status == LINK_TRAINING_SUCCESS &&
@@ -1245,17 +1264,21 @@ static void initialize_training_settings(
        if (overrides->cr_pattern_time != NULL)
                lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
        else
-               lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
+               lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
 
        if (overrides->eq_pattern_time != NULL)
                lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
        else
-               lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
+               lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
 
+       if (overrides->pattern_for_cr != NULL)
+               lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
+       else
+               lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
        if (overrides->pattern_for_eq != NULL)
                lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
        else
-               lt_settings->pattern_for_eq = get_supported_tp(link);
+               lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
 
        if (overrides->enhanced_framing != NULL)
                lt_settings->enhanced_framing = *overrides->enhanced_framing;
@@ -1457,7 +1480,6 @@ bool dc_link_dp_perform_link_training_skip_aux(
        const struct dc_link_settings *link_setting)
 {
        struct link_training_settings lt_settings;
-       enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
 
        initialize_training_settings(
                        link,
@@ -1468,7 +1490,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
        /* 1. Perform_clock_recovery_sequence. */
 
        /* transmit training pattern for clock recovery */
-       dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
+       dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
 
        /* call HWSS to set lane settings*/
        dp_set_hw_lane_settings(link, &lt_settings, DPRX);
@@ -1610,6 +1632,9 @@ bool perform_link_training_with_retries(
 
        for (j = 0; j < attempts; ++j) {
 
+               DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
+                       __func__, (unsigned int)j + 1, attempts);
+
                dp_enable_link_phy(
                        link,
                        signal,
@@ -1638,6 +1663,9 @@ bool perform_link_training_with_retries(
                if (j == (attempts - 1))
                        break;
 
+               DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
+                       __func__, (unsigned int)j + 1, attempts);
+
                dp_disable_link_phy(link, signal);
 
                msleep(delay_between_attempts);
index dd88eb3..11a619b 100644 (file)
@@ -104,6 +104,12 @@ void dp_enable_link_phy(
        struct clock_source *dp_cs =
                        link->dc->res_pool->dp_clock_source;
        unsigned int i;
+
+       if (link->connector_signal == SIGNAL_TYPE_EDP) {
+               link->dc->hwss.edp_power_control(link, true);
+               link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+       }
+
        /* If the current pixel clock source is not DTO(happens after
         * switching from HDMI passive dongle to DP on the same connector),
         * switch the pixel clock source to DTO.
@@ -223,6 +229,8 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
                dp_receiver_power_ctrl(link, false);
 
        if (signal == SIGNAL_TYPE_EDP) {
+               if (link->dc->hwss.edp_backlight_control)
+                       link->dc->hwss.edp_backlight_control(link, false);
                link->link_enc->funcs->disable_output(link->link_enc, signal);
                link->dc->hwss.edp_power_control(link, false);
        } else {
@@ -485,13 +493,15 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
                                OPTC_DSC_DISABLED, 0, 0);
 
                /* disable DSC in stream encoder */
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
-                                       pipe_ctx->stream_res.stream_enc,
-                                       OPTC_DSC_DISABLED, 0, 0);
-
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                       pipe_ctx->stream_res.stream_enc, false, NULL);
+               if (dc_is_dp_signal(stream->signal)) {
+
+                       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+                                               pipe_ctx->stream_res.stream_enc,
+                                               OPTC_DSC_DISABLED, 0, 0);
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                       pipe_ctx->stream_res.stream_enc, false, NULL);
+                       }
                }
 
                /* disable DSC block */
@@ -528,7 +538,6 @@ out:
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 {
        struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
        struct dc_stream_state *stream = pipe_ctx->stream;
 
        if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
@@ -551,7 +560,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 
                DC_LOG_DSC(" ");
                dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (dc_is_dp_signal(stream->signal)) {
                        DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
                        pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
                                                                        pipe_ctx->stream_res.stream_enc,
@@ -560,7 +569,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
                }
        } else {
                /* disable DSC PPS in stream encoder */
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (dc_is_dp_signal(stream->signal)) {
                        pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
                                                pipe_ctx->stream_res.stream_enc, false, NULL);
                }
index c6b737d..e430148 100644 (file)
@@ -782,11 +782,18 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
 
        calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
 
-       data->recout.x = stream->dst.x;
-       if (stream->src.x < surf_clip.x)
-               data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
+       /*
+        * Only the leftmost ODM pipe should be offset by a nonzero distance
+        */
+       if (!pipe_ctx->prev_odm_pipe) {
+               data->recout.x = stream->dst.x;
+               if (stream->src.x < surf_clip.x)
+                       data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
                                                / stream->src.width;
 
+       } else
+               data->recout.x = 0;
+
        data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
        if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
                data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
@@ -957,7 +964,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        const struct dc_stream_state *stream = pipe_ctx->stream;
-       struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
+       struct pipe_ctx *odm_pipe = pipe_ctx;
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
        struct rect src = pipe_ctx->plane_state->src_rect;
        int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
@@ -988,21 +995,24 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
                swap(src.width, src.height);
        }
 
+       /*modified recout_skip_h calculation due to odm having no recout offset*/
+       while (odm_pipe->prev_odm_pipe) {
+               odm_idx++;
+               odm_pipe = odm_pipe->prev_odm_pipe;
+       }
+       /*odm_pipe is the leftmost pipe in the ODM group*/
+       recout_skip_h = odm_idx * data->recout.width;
+
        /* Recout matching initial vp offset = recout_offset - (stream dst offset +
         *                      ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
         *                      - (surf surf_src offset * 1/ full scl ratio))
         */
-       recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+       recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
+                               - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
                                        * stream->dst.width / stream->src.width -
                                        src.x * plane_state->dst_rect.width / src.width
                                        * stream->dst.width / stream->src.width);
-       /*modified recout_skip_h calculation due to odm having no recout offset*/
-       while (odm_pipe) {
-               odm_idx++;
-               odm_pipe = odm_pipe->prev_odm_pipe;
-       }
-       if (odm_idx)
-               recout_skip_h += odm_idx * data->recout.width;
+
 
        recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
                                        * stream->dst.height / stream->src.height -
index f42a17d..d48fd87 100644 (file)
@@ -123,7 +123,6 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
                return false;
        }
        stream->out_transfer_func->type = TF_TYPE_BYPASS;
-       stream->out_transfer_func->ctx = stream->ctx;
 
        stream->stream_id = stream->ctx->dc_stream_id_count;
        stream->ctx->dc_stream_id_count++;
@@ -298,7 +297,7 @@ bool dc_stream_set_cursor_attributes(
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        /* disable idle optimizations while updating cursor */
        if (dc->idle_optimizations_allowed) {
-               dc->hwss.apply_idle_power_optimizations(dc, false);
+               dc_allow_idle_optimizations(dc, false);
                reset_idle_optimizations = true;
        }
 
@@ -326,7 +325,7 @@ bool dc_stream_set_cursor_attributes(
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        /* re-enable idle optimizations if necessary */
        if (reset_idle_optimizations)
-               dc->hwss.apply_idle_power_optimizations(dc, true);
+               dc_allow_idle_optimizations(dc, true);
 
 #endif
        return true;
@@ -359,9 +358,8 @@ bool dc_stream_set_cursor_position(
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
 
        /* disable idle optimizations if enabling cursor */
-       if (dc->idle_optimizations_allowed &&
-                       !stream->cursor_position.enable && position->enable) {
-               dc->hwss.apply_idle_power_optimizations(dc, false);
+       if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
+               dc_allow_idle_optimizations(dc, false);
                reset_idle_optimizations = true;
        }
 
@@ -392,7 +390,7 @@ bool dc_stream_set_cursor_position(
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        /* re-enable idle optimizations if necessary */
        if (reset_idle_optimizations)
-               dc->hwss.apply_idle_power_optimizations(dc, true);
+               dc_allow_idle_optimizations(dc, true);
 
 #endif
        return true;
index ea1229a..3d7d274 100644 (file)
@@ -48,22 +48,17 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
        plane_state->in_transfer_func = dc_create_transfer_func();
        if (plane_state->in_transfer_func != NULL) {
                plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
-               plane_state->in_transfer_func->ctx = ctx;
        }
        plane_state->in_shaper_func = dc_create_transfer_func();
        if (plane_state->in_shaper_func != NULL) {
                plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
-               plane_state->in_shaper_func->ctx = ctx;
        }
 
        plane_state->lut3d_func = dc_create_3dlut_func();
-       if (plane_state->lut3d_func != NULL) {
-               plane_state->lut3d_func->ctx = ctx;
-       }
+
        plane_state->blend_tf = dc_create_transfer_func();
        if (plane_state->blend_tf != NULL) {
                plane_state->blend_tf->type = TF_TYPE_BYPASS;
-               plane_state->blend_tf->ctx = ctx;
        }
 
 }
index 1d9c8e0..82fe0ab 100644 (file)
@@ -42,7 +42,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.99"
+#define DC_VER "3.2.104"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -476,7 +476,7 @@ struct dc_debug_options {
        unsigned int force_odm_combine_4to1; //bit vector based on otg inst
 #endif
        unsigned int force_fclk_khz;
-       bool disable_tri_buf;
+       bool enable_tri_buf;
        bool dmub_offload_enabled;
        bool dmcub_emulation;
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
@@ -503,6 +503,7 @@ struct dc_debug_options {
        bool usbc_combo_phy_reset_wa;
        bool disable_dsc;
        bool enable_dram_clock_change_one_display_vactive;
+       bool force_ignore_link_settings;
 };
 
 struct dc_debug_data {
@@ -660,6 +661,7 @@ struct dc_init_data {
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        bool force_smu_not_present;
 #endif
+       bool force_ignore_link_settings;
 };
 
 struct dc_callback_init {
@@ -745,7 +747,6 @@ struct dc_transfer_func {
        enum dc_transfer_func_predefined tf;
        /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
        uint32_t sdr_ref_white_level;
-       struct dc_context *ctx;
        union {
                struct pwl_params pwl;
                struct dc_transfer_func_distributed_points tf_pts;
@@ -772,7 +773,6 @@ struct dc_3dlut {
        struct tetrahedral_params lut_3d;
        struct fixed31_32 hdr_multiplier;
        union dc_3dlut_state state;
-       struct dc_context *ctx;
 };
 /*
  * This structure is filled in by dc_surface_get_status and contains
@@ -1250,6 +1250,9 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
 
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+                                                struct dc_plane_state *plane);
+
 void dc_allow_idle_optimizations(struct dc *dc, bool allow);
 
 /*
index 0811f94..e146e3c 100644 (file)
@@ -140,6 +140,10 @@ struct dc_vbios_funcs {
        enum bp_result (*enable_lvtma_control)(
                struct dc_bios *bios,
                uint8_t uc_pwr_on);
+
+       enum bp_result (*get_soc_bb_info)(
+               struct dc_bios *dcb,
+               struct bp_soc_bb_info *soc_bb_info);
 };
 
 struct bios_registers {
index eea2429..b987548 100644 (file)
@@ -132,3 +132,19 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
                /* Continue spinning so we don't hang the ASIC. */
        }
 }
+
+bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
+                                   unsigned int stream_mask)
+{
+       struct dmub_srv *dmub;
+       const uint32_t timeout = 30;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       dmub = dc_dmub_srv->dmub;
+
+       return dmub_srv_send_gpint_command(
+                      dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
+                      stream_mask, timeout) == DMUB_STATUS_OK;
+}
index a3a09cc..bb4ab61 100644 (file)
@@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
 
 void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
 
+bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
+                                   unsigned int stream_mask);
 #endif /* _DMUB_DC_SRV_H_ */
index a8a3b06..80a2191 100644 (file)
@@ -123,6 +123,7 @@ struct dc_link_training_overrides {
 
        uint16_t *cr_pattern_time;
        uint16_t *eq_pattern_time;
+       enum dc_dp_training_pattern *pattern_for_cr;
        enum dc_dp_training_pattern *pattern_for_eq;
 
        enum dc_link_spread *downspread;
index e002ef7..266b93a 100644 (file)
@@ -237,6 +237,8 @@ enum dc_detect_reason {
        DETECT_REASON_BOOT,
        DETECT_REASON_HPD,
        DETECT_REASON_HPDRX,
+       DETECT_REASON_FALLBACK,
+       DETECT_REASON_RETRAIN
 };
 
 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
index aa8e095..c47a197 100644 (file)
@@ -122,7 +122,7 @@ struct dc_context {
 };
 
 
-#define DC_MAX_EDID_BUFFER_SIZE 1024
+#define DC_MAX_EDID_BUFFER_SIZE 1280
 #define DC_EDID_BLOCK_SIZE 128
 #define MAX_SURFACE_NUM 4
 #define NUM_PIXEL_FORMATS 10
index df7f826..74f7619 100644 (file)
@@ -159,11 +159,15 @@ static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
 static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
 {
        struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
-       uint32_t value;
+       uint32_t blon, blon_ovrd, pwrseq_target_state;
 
-       REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+       REG_GET_2(PWRSEQ_CNTL, LVTMA_BLON, &blon, LVTMA_BLON_OVRD, &blon_ovrd);
+       REG_GET(PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, &pwrseq_target_state);
 
-       return value;
+       if (blon_ovrd)
+               return blon;
+       else
+               return pwrseq_target_state;
 }
 
 static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
index 99c68ca..6bd1196 100644 (file)
        SR(BL_PWM_CNTL2), \
        SR(BL_PWM_PERIOD_CNTL), \
        SR(BL_PWM_GRP1_REG_LOCK), \
-       SR(BIOS_SCRATCH_2)
+       NBIO_SR(BIOS_SCRATCH_2)
 
 #define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
        .field_name = reg_name ## __ ## field_name ## post_fix
 
 #define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON_OVRD, mask_sh),\
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, mask_sh), \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
        DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
 
 #define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
        type LVTMA_BLON;\
+       type LVTMA_BLON_OVRD;\
        type LVTMA_DIGON;\
        type LVTMA_DIGON_OVRD;\
+       type LVTMA_PWRSEQ_TARGET_STATE; \
        type LVTMA_PWRSEQ_TARGET_STATE_R; \
        type BL_PWM_REF_DIV; \
        type BL_PWM_EN; \
index 4cdaaf4..5054bb5 100644 (file)
@@ -710,7 +710,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute(
        ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
 }
 
-static void dce110_stream_encoder_set_mst_bandwidth(
+static void dce110_stream_encoder_set_throttled_vcp_size(
        struct stream_encoder *enc,
        struct fixed31_32 avg_time_slots_per_mtp)
 {
@@ -1621,8 +1621,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
                dce110_stream_encoder_dvi_set_stream_attribute,
        .lvds_set_stream_attribute =
                dce110_stream_encoder_lvds_set_stream_attribute,
-       .set_mst_bandwidth =
-               dce110_stream_encoder_set_mst_bandwidth,
+       .set_throttled_vcp_size =
+               dce110_stream_encoder_set_throttled_vcp_size,
        .update_hdmi_info_packets =
                dce110_stream_encoder_update_hdmi_info_packets,
        .stop_hdmi_info_packets =
index 5167d6b..67af67e 100644 (file)
@@ -119,10 +119,11 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
 /**
  * Enable/Disable PSR.
  */
-static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
 {
        union dmub_rb_cmd cmd;
        struct dc_context *dc = dmub->ctx;
+       uint32_t retry_count, psr_state = 0;
 
        cmd.psr_enable.header.type = DMUB_CMD__PSR;
 
@@ -136,6 +137,30 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
        dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       /* Below loops 1000 x 500us = 500 ms.
+        *  Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+        *  least a few frames. Should never hit the max retry assert below.
+        */
+       if (wait) {
+               for (retry_count = 0; retry_count <= 1000; retry_count++) {
+                       dmub_psr_get_state(dmub, &psr_state);
+
+                       if (enable) {
+                               if (psr_state != 0)
+                                       break;
+                       } else {
+                               if (psr_state == 0)
+                                       break;
+                       }
+
+                       udelay(500);
+               }
+
+               /* assert if max retry hit */
+               if (retry_count >= 1000)
+                       ASSERT(0);
+       }
 }
 
 /**
@@ -231,10 +256,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        copy_settings_data->smu_optimizations_en                = psr_context->allow_smu_optimizations;
        copy_settings_data->frame_delay                         = psr_context->frame_delay;
        copy_settings_data->frame_cap_ind                       = psr_context->psrFrameCaptureIndicationReq;
+       copy_settings_data->init_sdp_deadline                   = psr_context->sdpTransmitLineNumDeadline;
+       copy_settings_data->debug.u32All = 0;
        copy_settings_data->debug.bitfields.visual_confirm      = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
                                                                        true : false;
-       copy_settings_data->debug.bitfields.use_hw_lock_mgr     = 1;
-       copy_settings_data->init_sdp_deadline                   = psr_context->sdpTransmitLineNumDeadline;
+       copy_settings_data->debug.bitfields.use_hw_lock_mgr             = 1;
 
        dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
index f404fec..dc121ed 100644 (file)
@@ -36,7 +36,7 @@ struct dmub_psr {
 
 struct dmub_psr_funcs {
        bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
-       void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+       void (*psr_enable)(struct dmub_psr *dmub, bool enable, bool wait);
        void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
        void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
 };
index 0603ddc..3ac6c7b 100644 (file)
@@ -810,37 +810,66 @@ void dce110_edp_power_control(
 
        if (power_up !=
                link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
+
+               unsigned long long current_ts = dm_get_timestamp(ctx);
+               unsigned long long time_since_edp_poweroff_ms =
+                               div64_u64(dm_get_elapse_time_in_ns(
+                                               ctx,
+                                               current_ts,
+                                               link->link_trace.time_stamp.edp_poweroff), 1000000);
+               unsigned long long time_since_edp_poweron_ms =
+                               div64_u64(dm_get_elapse_time_in_ns(
+                                               ctx,
+                                               current_ts,
+                                               link->link_trace.time_stamp.edp_poweron), 1000000);
+               DC_LOG_HW_RESUME_S3(
+                               "%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
+                               __func__,
+                               power_up,
+                               current_ts,
+                               link->link_trace.time_stamp.edp_poweroff,
+                               link->link_trace.time_stamp.edp_poweron,
+                               time_since_edp_poweroff_ms,
+                               time_since_edp_poweron_ms);
+
                /* Send VBIOS command to prompt eDP panel power */
                if (power_up) {
-                       unsigned long long current_ts = dm_get_timestamp(ctx);
-                       unsigned long long duration_in_ms =
-                                       div64_u64(dm_get_elapse_time_in_ns(
-                                                       ctx,
-                                                       current_ts,
-                                                       link->link_trace.time_stamp.edp_poweroff), 1000000);
-                       unsigned long long wait_time_ms = 0;
-
-                       /* max 500ms from LCDVDD off to on */
-                       unsigned long long edp_poweroff_time_ms = 500;
+                       /* edp requires a min of 500ms from LCDVDD off to on */
+                       unsigned long long remaining_min_edp_poweroff_time_ms = 500;
 
+                       /* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
                        if (link->local_sink != NULL)
-                               edp_poweroff_time_ms =
-                                               500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
-                       if (link->link_trace.time_stamp.edp_poweroff == 0)
-                               wait_time_ms = edp_poweroff_time_ms;
-                       else if (duration_in_ms < edp_poweroff_time_ms)
-                               wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
-
-                       if (wait_time_ms) {
-                               msleep(wait_time_ms);
-                               dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
-                                               __func__, wait_time_ms);
+                               remaining_min_edp_poweroff_time_ms +=
+                                       link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+
+                       /* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
+                       if (link->link_trace.time_stamp.edp_poweroff != 0) {
+                               if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
+                                       remaining_min_edp_poweroff_time_ms =
+                                               remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
+                               else
+                                       remaining_min_edp_poweroff_time_ms = 0;
                        }
 
+                       if (remaining_min_edp_poweroff_time_ms) {
+                               DC_LOG_HW_RESUME_S3(
+                                               "%s: remaining_min_edp_poweroff_time_ms=%llu: begin wait.\n",
+                                               __func__, remaining_min_edp_poweroff_time_ms);
+                               msleep(remaining_min_edp_poweroff_time_ms);
+                               DC_LOG_HW_RESUME_S3(
+                                               "%s: remaining_min_edp_poweroff_time_ms=%llu: end wait.\n",
+                                               __func__, remaining_min_edp_poweroff_time_ms);
+                               dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
+                                               __func__, remaining_min_edp_poweroff_time_ms);
+                       } else {
+                               DC_LOG_HW_RESUME_S3(
+                                               "%s: remaining_min_edp_poweroff_time_ms=%llu: no wait required.\n",
+                                               __func__, remaining_min_edp_poweroff_time_ms);
+                       }
                }
 
                DC_LOG_HW_RESUME_S3(
-                               "%s: Panel Power action: %s\n",
+                               "%s: BEGIN: Panel Power action: %s\n",
                                __func__, (power_up ? "On":"Off"));
 
                cntl.action = power_up ?
@@ -864,12 +893,23 @@ void dce110_edp_power_control(
 
                bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
 
+               DC_LOG_HW_RESUME_S3(
+                               "%s: END: Panel Power action: %s bp_result=%u\n",
+                               __func__, (power_up ? "On":"Off"),
+                               bp_result);
+
                if (!power_up)
                        /*save driver power off time stamp*/
                        link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
                else
                        link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
 
+               DC_LOG_HW_RESUME_S3(
+                               "%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
+                               __func__,
+                               link->link_trace.time_stamp.edp_poweroff,
+                               link->link_trace.time_stamp.edp_poweron);
+
                if (bp_result != BP_RESULT_OK)
                        DC_LOG_ERROR(
                                        "%s: Panel Power bp_result: %d\n",
@@ -1614,7 +1654,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                // enable fastboot if backend is enabled on eDP
                if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
                        /* Set optimization flag on eDP stream*/
-                       if (edp_stream) {
+                       if (edp_stream && edp_link->link_status.link_active) {
                                edp_stream->apply_edp_fast_boot_optimization = true;
                                can_apply_edp_fast_boot = true;
                        }
@@ -2697,7 +2737,7 @@ static void program_output_csc(struct dc *dc,
        }
 }
 
-void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
        struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
        struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -2742,7 +2782,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
                mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 }
 
-void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 {
        struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
 
@@ -2850,6 +2890,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .setup_stereo = NULL,
        .set_avmute = dce110_set_avmute,
        .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dce110_set_cursor_position,
index 47a39eb..7a00fe5 100644 (file)
@@ -325,8 +325,6 @@ bool cm_helper_translate_curve_to_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE_CTX(output_tf->ctx);
-
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
@@ -524,8 +522,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE_CTX(output_tf->ctx);
-
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
index cedf359..db5615a 100644 (file)
@@ -734,6 +734,9 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
        struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
        struct dc_plane_address earliest_inuse_address;
 
+       if (hubp && hubp->power_gated)
+               return false;
+
        REG_GET(DCSURF_FLIP_CONTROL,
                        SURFACE_FLIP_PENDING, &flip_pending);
 
index 8ca94f5..d0f3bf9 100644 (file)
@@ -2765,7 +2765,7 @@ bool dcn10_disconnect_pipes(
                struct dc *dc,
                struct dc_state *context)
 {
-               bool found_stream = false;
+               bool found_pipe = false;
                int i, j;
                struct dce_hwseq *hws = dc->hwseq;
                struct dc_state *old_ctx = dc->current_state;
@@ -2805,26 +2805,28 @@ bool dcn10_disconnect_pipes(
                                        old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
 
                                        /* Find the top pipe in the new ctx for the bottom pipe that we
-                                        * want to remove by comparing the streams. If both pipes are being
-                                        * disabled then do it in the regular pipe programming sequence
+                                        * want to remove by comparing the streams and planes. If both
+                                        * pipes are being disabled then do it in the regular pipe
+                                        * programming sequence
                                         */
                                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                                                if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
+                                                       old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state &&
                                                        !context->res_ctx.pipe_ctx[j].top_pipe &&
                                                        !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
-                                                       found_stream = true;
+                                                       found_pipe = true;
                                                        break;
                                                }
                                        }
 
                                        // Disconnect if the top pipe lost it's pipe split
-                                       if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
+                                       if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
                                                hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
                                                DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
                                                mpcc_disconnected = true;
                                        }
                                }
-                               found_stream = false;
+                               found_pipe = false;
                        }
                }
 
index a1d1559..b24c8ae 100644 (file)
@@ -66,6 +66,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 2972392..800be26 100644 (file)
@@ -288,6 +288,17 @@ void optc1_program_timing(
        if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
                h_div = H_TIMING_DIV_BY2;
 
+       if (REG(OPTC_DATA_FORMAT_CONTROL)) {
+               uint32_t data_fmt = 0;
+
+               if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+                       data_fmt = 1;
+               else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+                       data_fmt = 2;
+
+               REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
                if (optc1->opp_count == 4)
index 1abd81e..a78712c 100644 (file)
@@ -798,7 +798,7 @@ static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
                .max_hdmi_pixel_clock = 600000,
                .hdmi_ycbcr420_supported = true,
-               .dp_ycbcr420_supported = false,
+               .dp_ycbcr420_supported = true,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_HBR3_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true,
index 842abb4..f70fcad 100644 (file)
@@ -619,7 +619,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
        enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
 }
 
-void enc1_stream_encoder_set_mst_bandwidth(
+void enc1_stream_encoder_set_throttled_vcp_size(
        struct stream_encoder *enc,
        struct fixed31_32 avg_time_slots_per_mtp)
 {
@@ -896,10 +896,10 @@ void enc1_stream_encoder_dp_blank(
         */
        REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
        /* Larger delay to wait until VBLANK - use max retry of
-        * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+        * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
         * a little more because we may not trust delay accuracy.
         */
-       max_retries = DP_BLANK_MAX_RETRY * 250;
+       max_retries = DP_BLANK_MAX_RETRY * 501;
 
        /* disable DP stream */
        REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -1616,8 +1616,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
                enc1_stream_encoder_hdmi_set_stream_attribute,
        .dvi_set_stream_attribute =
                enc1_stream_encoder_dvi_set_stream_attribute,
-       .set_mst_bandwidth =
-               enc1_stream_encoder_set_mst_bandwidth,
+       .set_throttled_vcp_size =
+               enc1_stream_encoder_set_throttled_vcp_size,
        .update_hdmi_info_packets =
                enc1_stream_encoder_update_hdmi_info_packets,
        .stop_hdmi_info_packets =
index 30eae74..b99d252 100644 (file)
@@ -588,7 +588,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
        struct dc_crtc_timing *crtc_timing,
        bool is_dual_link);
 
-void enc1_stream_encoder_set_mst_bandwidth(
+void enc1_stream_encoder_set_throttled_vcp_size(
        struct stream_encoder *enc,
        struct fixed31_32 avg_time_slots_per_mtp);
 
index bb920d0..368818d 100644 (file)
@@ -908,6 +908,9 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
        struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
        struct dc_plane_address earliest_inuse_address;
 
+       if (hubp && hubp->power_gated)
+               return false;
+
        REG_GET(DCSURF_FLIP_CONTROL,
                        SURFACE_FLIP_PENDING, &flip_pending);
 
index c8cfd3b..01530e6 100644 (file)
@@ -1251,6 +1251,11 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
                return;
        }
 
+       /* Detect plane change */
+       if (old_pipe->plane_state != new_pipe->plane_state) {
+               new_pipe->update_flags.bits.plane_changed = true;
+       }
+
        /* Detect top pipe only changes */
        if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
                /* Detect odm changes */
@@ -1392,6 +1397,7 @@ static void dcn20_update_dchubp_dpp(
                        &pipe_ctx->ttu_regs);
 
        if (pipe_ctx->update_flags.bits.enable ||
+                       pipe_ctx->update_flags.bits.plane_changed ||
                        plane_state->update_flags.bits.bpp_change ||
                        plane_state->update_flags.bits.input_csc_change ||
                        plane_state->update_flags.bits.color_space_change ||
@@ -1414,6 +1420,7 @@ static void dcn20_update_dchubp_dpp(
        }
 
        if (pipe_ctx->update_flags.bits.mpcc
+                       || pipe_ctx->update_flags.bits.plane_changed
                        || plane_state->update_flags.bits.global_alpha_change
                        || plane_state->update_flags.bits.per_pixel_alpha_change) {
                // MPCC inst is equal to pipe index in practice
@@ -1515,6 +1522,7 @@ static void dcn20_update_dchubp_dpp(
        }
 
        if (pipe_ctx->update_flags.bits.enable ||
+                       pipe_ctx->update_flags.bits.plane_changed ||
                        pipe_ctx->update_flags.bits.opp_changed ||
                        plane_state->update_flags.bits.pixel_format_change ||
                        plane_state->update_flags.bits.horizontal_mirror_change ||
@@ -1539,7 +1547,9 @@ static void dcn20_update_dchubp_dpp(
                hubp->power_gated = false;
        }
 
-       if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+       if (pipe_ctx->update_flags.bits.enable ||
+               pipe_ctx->update_flags.bits.plane_changed ||
+               plane_state->update_flags.bits.addr_update)
                hws->funcs.update_plane_addr(dc, pipe_ctx);
 
 
@@ -1632,16 +1642,26 @@ void dcn20_program_front_end_for_ctx(
        struct dce_hwseq *hws = dc->hwseq;
        DC_LOGGER_INIT(dc->ctx->logger);
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+       /* Carry over GSL groups in case the context is changing. */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe_ctx =
+                       &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == old_pipe_ctx->stream)
+                       pipe_ctx->stream_res.gsl_group =
+                               old_pipe_ctx->stream_res.gsl_group;
+       }
+
+       if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
-               if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
-                       ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
-                       if (dc->hwss.program_triplebuffer != NULL &&
-                               !dc->debug.disable_tri_buf) {
+                       if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+                               ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
                                /*turn off triple buffer for full update*/
                                dc->hwss.program_triplebuffer(
-                                       dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+                                               dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
                        }
                }
        }
@@ -1909,9 +1929,9 @@ void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->stream_res.dsc) {
                struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
 
-               dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
+               hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
                while (odm_pipe) {
-                       dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
+                       hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
                        odm_pipe = odm_pipe->next_odm_pipe;
                }
        }
@@ -1924,9 +1944,9 @@ void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->stream_res.dsc) {
                struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
 
-               dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
+               hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
                while (odm_pipe) {
-                       dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
+                       hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
                        odm_pipe = odm_pipe->next_odm_pipe;
                }
        }
index 966e179..072193c 100644 (file)
@@ -68,6 +68,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 8c16967..d8b18c5 100644 (file)
@@ -239,7 +239,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
        int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
                        / opp_cnt;
        uint32_t memory_mask;
-       uint32_t data_fmt = 0;
 
        ASSERT(opp_cnt == 2);
 
@@ -262,13 +261,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
                REG_SET(OPTC_MEMORY_CONFIG, 0,
                        OPTC_MEM_SEL, memory_mask);
 
-       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
-               data_fmt = 1;
-       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-               data_fmt = 2;
-
-       REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
        REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
                        OPTC_NUM_OF_INPUT_SEGMENT, 1,
                        OPTC_SEG0_SRC_SEL, opp_id[0],
index 1b98744..d50a9c3 100644 (file)
@@ -150,7 +150,6 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
        .dispclk_delay_subtotal = 87, //
        .dcfclk_cstate_latency = 10, // SRExitTime
        .max_inter_dcn_tile_repeaters = 8,
-
        .xfc_supported = true,
        .xfc_fill_bw_overhead_percent = 10.0,
        .xfc_fill_constant_bytes = 0,
@@ -298,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
                        },
                },
        .num_states = 5,
-       .sr_exit_time_us = 8.6,
-       .sr_enter_plus_exit_time_us = 10.9,
+       .sr_exit_time_us = 11.6,
+       .sr_enter_plus_exit_time_us = 13.9,
        .urgent_latency_us = 4.0,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -409,8 +408,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
                        },
                },
        .num_states = 5,
-       .sr_exit_time_us = 8.6,
-       .sr_enter_plus_exit_time_us = 10.9,
+       .sr_exit_time_us = 11.6,
+       .sr_enter_plus_exit_time_us = 13.9,
        .urgent_latency_us = 4.0,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -1075,7 +1074,6 @@ static const struct dc_debug_options debug_defaults_drv = {
                .disable_pplib_wm_range = false,
                .scl_reset_length10 = true,
                .sanity_checks = false,
-               .disable_tri_buf = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
 };
 
@@ -1092,6 +1090,7 @@ static const struct dc_debug_options debug_defaults_diags = {
                .disable_stutter = true,
                .scl_reset_length10 = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
+               .enable_tri_buf = true,
 };
 
 void dcn20_dpp_destroy(struct dpp **dpp)
@@ -2203,9 +2202,9 @@ int dcn20_populate_dml_pipes_from_context(
                /* todo: default max for now, until there is logic reflecting this in dc*/
                pipes[pipe_cnt].dout.output_bpc = 12;
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-               /*fill up the audio sample rate*/
+               /*fill up the audio sample rate (unit in kHz)*/
                get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
-               pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate;
+               pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
 #endif
                /*
                 * For graphic plane, cursor number is 1, nv12 is 0
index e3984f0..4075ae1 100644 (file)
@@ -561,8 +561,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
                enc1_stream_encoder_hdmi_set_stream_attribute,
        .dvi_set_stream_attribute =
                enc1_stream_encoder_dvi_set_stream_attribute,
-       .set_mst_bandwidth =
-               enc1_stream_encoder_set_mst_bandwidth,
+       .set_throttled_vcp_size =
+               enc1_stream_encoder_set_throttled_vcp_size,
        .update_hdmi_info_packets =
                enc2_stream_encoder_update_hdmi_info_packets,
        .stop_hdmi_info_packets =
index 2ba880c..2b7396c 100644 (file)
@@ -69,6 +69,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 78743ae..e73785e 100644 (file)
@@ -894,6 +894,8 @@ static const struct dc_debug_options debug_defaults_diags = {
                .disable_pplib_wm_range = true,
                .disable_stutter = true,
                .disable_48mhz_pwrdwn = true,
+               .disable_psr = true,
+               .enable_tri_buf = true
 };
 
 enum dcn20_clk_src_array_id {
index 025637a..bd2a068 100644 (file)
@@ -31,9 +31,21 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
        dcn30_dio_link_encoder.o dcn30_resource.o
 
 
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4
-
+ifdef CONFIG_X86
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
+endif
+
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
@@ -45,8 +57,10 @@ ifdef IS_OLD_GCC
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
 # (8B stack alignment).
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mpreferred-stack-boundary=4
 else
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -msse2
 endif
 
 AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
index a139a87..41a1d0e 100644 (file)
@@ -122,8 +122,6 @@ bool cm3_helper_translate_curve_to_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE_CTX(output_tf->ctx);
-
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
@@ -314,8 +312,6 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE_CTX(output_tf->ctx);
-
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
        hw_points = 0;
index f5e80a0..6c0f7ef 100644 (file)
@@ -790,8 +790,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
                enc3_stream_encoder_hdmi_set_stream_attribute,
        .dvi_set_stream_attribute =
                enc3_stream_encoder_dvi_set_stream_attribute,
-       .set_mst_bandwidth =
-               enc1_stream_encoder_set_mst_bandwidth,
+       .set_throttled_vcp_size =
+               enc1_stream_encoder_set_throttled_vcp_size,
        .update_hdmi_info_packets =
                enc3_stream_encoder_update_hdmi_info_packets,
        .stop_hdmi_info_packets =
index a5d750e..204773f 100644 (file)
@@ -35,7 +35,6 @@
 #include "dcn30_dpp.h"
 #include "dcn10/dcn10_cm_common.h"
 #include "dcn30_cm_common.h"
-#include "clk_mgr.h"
 #include "reg_helper.h"
 #include "abm.h"
 #include "clk_mgr.h"
@@ -220,15 +219,13 @@ static void dcn30_set_writeback(
                struct dc_writeback_info *wb_info,
                struct dc_state *context)
 {
-       struct dwbc *dwb;
        struct mcif_wb *mcif_wb;
        struct mcif_buf_params *mcif_buf_params;
 
        ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
        ASSERT(wb_info->wb_enabled);
        ASSERT(wb_info->mpcc_inst >= 0);
-       ASSERT(wb_info->mpcc_inst < 4);
-       dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+       ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count);
        mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
        mcif_buf_params = &wb_info->mcif_buf_params;
 
@@ -692,26 +689,23 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
 
 bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
 {
-       unsigned int surface_size;
-
        if (!dc->ctx->dmub_srv)
                return false;
 
        if (enable) {
-               if (dc->current_state
-                               && dc->current_state->stream_count == 1 // single display only
-                               && dc->current_state->stream_status[0].plane_count == 1 // single surface only
-                               && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
-                               // Only 8 and 16 bit formats
-                               && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
-                               && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
-
-                       surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
-                                       dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
-                                       (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
-
+               if (dc->current_state) {
+                       int i;
+
+                       /* First, check no-memory-requests case */
+                       for (i = 0; i < dc->current_state->stream_count; i++) {
+                               if (dc->current_state->stream_status[i]
+                                           .plane_count)
+                                       /* Fail eligibility on a visible stream */
+                                       break;
+                       }
                }
 
+               /* No applicable optimizations */
                return false;
        }
 
index 19daa45..7c90c22 100644 (file)
@@ -69,6 +69,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 224c8d1..b1f228f 100644 (file)
@@ -179,8 +179,7 @@ void optc3_set_dsc_config(struct timing_generator *optc,
 
 }
 
-
-static void optc3_set_odm_bypass(struct timing_generator *optc,
+void optc3_set_odm_bypass(struct timing_generator *optc,
                const struct dc_crtc_timing *dc_crtc_timing)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -210,7 +209,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
        int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
                        / opp_cnt;
        uint32_t memory_mask = 0;
-       uint32_t data_fmt = 0;
 
        /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
         * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
@@ -241,13 +239,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
                REG_SET(OPTC_MEMORY_CONFIG, 0,
                        OPTC_MEM_SEL, memory_mask);
 
-       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
-               data_fmt = 1;
-       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-               data_fmt = 2;
-
-       REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
        if (opp_cnt == 2) {
                REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
                                OPTC_NUM_OF_INPUT_SEGMENT, 1,
@@ -277,7 +268,7 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
  *
  * Options: any time,  start of frame, dp start of frame (range timing)
  */
-void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
        uint32_t mode = enable ? 2 : 0;
index 33f13c1..3796168 100644 (file)
@@ -339,4 +339,8 @@ void optc3_set_dsc_config(struct timing_generator *optc,
 
 void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
 
+void optc3_set_odm_bypass(struct timing_generator *optc,
+               const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_tg_init(struct timing_generator *optc);
+
 #endif /* __DC_OPTC_DCN30_H__ */
index 8be4f21..24fb39a 100644 (file)
@@ -79,6 +79,7 @@
 
 #include "reg_helper.h"
 #include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
 
@@ -832,7 +833,7 @@ static const struct dc_plane_cap plane_cap = {
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
-       .disable_dmcu = true,
+       .disable_dmcu = true, //No DMCU on DCN30
        .force_abm_enable = false,
        .timing_trace = false,
        .clock_trace = true,
@@ -849,10 +850,11 @@ static const struct dc_debug_options debug_defaults_drv = {
        .underflow_assert_delay_us = 0xFFFFFFFF,
        .dwb_fi_phase = -1, // -1 = disable,
        .dmub_command_table = true,
+       .disable_psr = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-       .disable_dmcu = true,
+       .disable_dmcu = true, //No dmcu on DCN30
        .force_abm_enable = false,
        .timing_trace = true,
        .clock_trace = true,
@@ -865,6 +867,8 @@ static const struct dc_debug_options debug_defaults_diags = {
        .scl_reset_length10 = true,
        .dwb_fi_phase = -1, // -1 = disable
        .dmub_command_table = true,
+       .disable_psr = true,
+       .enable_tri_buf = true,
 };
 
 void dcn30_dpp_destroy(struct dpp **dpp)
@@ -1312,6 +1316,9 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
                        dce_abm_destroy(&pool->base.multiple_abms[i]);
        }
 
+       if (pool->base.psr != NULL)
+               dmub_psr_destroy(&pool->base.psr);
+
        if (pool->base.dccg != NULL)
                dcn_dccg_destroy(&pool->base.dccg);
 }
@@ -1821,6 +1828,22 @@ static bool init_soc_bounding_box(struct dc *dc,
        loaded_ip->max_num_dpp = pool->base.pipe_count;
        loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
        dcn20_patch_bounding_box(dc, loaded_bb);
+
+       if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+               struct bp_soc_bb_info bb_info = {0};
+
+               if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+                       if (bb_info.dram_clock_change_latency_100ns > 0)
+                               dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+
+                       if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+                               dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+                       if (bb_info.dram_sr_exit_latency_100ns > 0)
+                               dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+               }
+       }
+
        return true;
 }
 
@@ -1876,6 +1899,48 @@ static bool dcn30_split_stream_for_mpc_or_odm(
        return true;
 }
 
+static struct pipe_ctx *dcn30_find_split_pipe(
+               struct dc *dc,
+               struct dc_state *context,
+               int old_index)
+{
+       struct pipe_ctx *pipe = NULL;
+       int i;
+
+       if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+               pipe = &context->res_ctx.pipe_ctx[old_index];
+               pipe->pipe_idx = old_index;
+       }
+
+       if (!pipe)
+               for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+                       if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+                                       && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+                               if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+                                       pipe = &context->res_ctx.pipe_ctx[i];
+                                       pipe->pipe_idx = i;
+                                       break;
+                               }
+                       }
+               }
+
+       /*
+        * May need to fix pipes getting tossed from 1 opp to another on flip
+        * Add for debugging transient underflow during topology updates:
+        * ASSERT(pipe);
+        */
+       if (!pipe)
+               for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+                       if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+                               pipe = &context->res_ctx.pipe_ctx[i];
+                               pipe->pipe_idx = i;
+                               break;
+                       }
+               }
+
+       return pipe;
+}
+
 static bool dcn30_internal_validate_bw(
                struct dc *dc,
                struct dc_state *context,
@@ -2001,6 +2066,7 @@ static bool dcn30_internal_validate_bw(
                                dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
                        memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
                        memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+                       repopulate_pipes = true;
                } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
                        struct pipe_ctx *top_pipe = pipe->top_pipe;
                        struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2015,6 +2081,7 @@ static bool dcn30_internal_validate_bw(
                        pipe->stream = NULL;
                        memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
                        memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+                       repopulate_pipes = true;
                } else
                        ASSERT(0); /* Should never try to merge master pipe */
 
@@ -2022,8 +2089,10 @@ static bool dcn30_internal_validate_bw(
 
        for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
                struct pipe_ctx *hsplit_pipe = NULL;
                bool odm;
+               int old_index = -1;
 
                if (!pipe->stream || newly_split[i])
                        continue;
@@ -2035,7 +2104,20 @@ static bool dcn30_internal_validate_bw(
                        continue;
 
                if (split[i]) {
-                       hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       if (odm) {
+                               if (split[i] == 4 && old_pipe->next_odm_pipe->next_odm_pipe)
+                                       old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+                               else if (old_pipe->next_odm_pipe)
+                                       old_index = old_pipe->next_odm_pipe->pipe_idx;
+                       } else {
+                               if (split[i] == 4 && old_pipe->bottom_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                                       old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+                               else if (old_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                                       old_index = old_pipe->bottom_pipe->pipe_idx;
+                       }
+                       hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(hsplit_pipe);
                        if (!hsplit_pipe)
                                goto validate_fail;
@@ -2049,8 +2131,16 @@ static bool dcn30_internal_validate_bw(
                        repopulate_pipes = true;
                }
                if (split[i] == 4) {
-                       struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       struct pipe_ctx *pipe_4to1;
 
+                       if (odm && old_pipe->next_odm_pipe)
+                               old_index = old_pipe->next_odm_pipe->pipe_idx;
+                       else if (!odm && old_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                               old_index = old_pipe->bottom_pipe->pipe_idx;
+                       else
+                               old_index = -1;
+                       pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(pipe_4to1);
                        if (!pipe_4to1)
                                goto validate_fail;
@@ -2060,7 +2150,14 @@ static bool dcn30_internal_validate_bw(
                                goto validate_fail;
                        newly_split[pipe_4to1->pipe_idx] = true;
 
-                       pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       if (odm && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+                               old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+                       else if (!odm && old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                               old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+                       else
+                               old_index = -1;
+                       pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(pipe_4to1);
                        if (!pipe_4to1)
                                goto validate_fail;
@@ -2104,7 +2201,7 @@ validate_out:
        return out;
 }
 
-static void dcn30_calculate_wm(
+void dcn30_calculate_wm_and_dlg(
                struct dc *dc, struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
                int pipe_cnt,
@@ -2112,6 +2209,8 @@ static void dcn30_calculate_wm(
 {
        int i, pipe_idx;
        double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+       bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+                       dm_dram_clock_change_unsupported;
 
        if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
                dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -2145,30 +2244,12 @@ static void dcn30_calculate_wm(
        pipes[0].clks_cfg.voltage = vlevel;
        pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
 
-       /* Set C:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * pstate latency overriden to 5us
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
        /* Set D:
         * DCFCLK: Min Required
         * FCLK(proportional to UCLK): 1GHz or Max
         * sr_enter_exit = 4, sr_exit = 2us
         */
+       /*
        if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
                context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
                context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
@@ -2182,26 +2263,72 @@ static void dcn30_calculate_wm(
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       */
 
-       /* Set A:
+       /* Set C:
         * DCFCLK: Min Required
         * FCLK(proportional to UCLK): 1GHz or Max
-        *
-        * Set A calculated last so that following calculations are based on Set A
+        * pstate latency overridden to 5us
         */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+               unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+               unsigned int min_dram_speed_mts_margin = 160;
+
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
+
+               if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+                       min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+               for (i = 3; i > 0; i--) {
+                       if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
+                                       (min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
+                               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+               }
+
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       if (!pstate_en) {
+               /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+                * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+                */
+               context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0x13FFFF;
+       } else {
+               /* Set A:
+                * DCFCLK: Min Required
+                * FCLK(proportional to UCLK): 1GHz or Max
+                *
+                * Set A calculated last so that following calculations are based on Set A
+                */
+               if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+                       context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+                       context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+                       context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+               }
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        }
-       context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+       /* Make set D = set A until set D is enabled */
+       context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
 
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                if (!context->res_ctx.pipe_ctx[i].stream)
@@ -2221,6 +2348,13 @@ static void dcn30_calculate_wm(
 
                pipe_idx++;
        }
+
+       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+       if (!pstate_en)
+               /* Restore full p-state latency */
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                               dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
 }
 
 bool dcn30_validate_bandwidth(struct dc *dc,
@@ -2253,8 +2387,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
                goto validate_out;
        }
 
-       dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel);
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+       dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
 
        BW_VAL_TRACE_END_WATERMARKS();
 
@@ -2422,6 +2555,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
        .link_enc_create = dcn30_link_encoder_create,
        .panel_cntl_create = dcn30_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -2623,6 +2757,14 @@ static bool dcn30_resource_construct(
                }
        }
        pool->base.timing_generator_count = i;
+       /* PSR */
+       pool->base.psr = dmub_psr_create(ctx);
+
+       if (pool->base.psr == NULL) {
+               dm_error("DC: failed to create PSR obj!\n");
+               BREAK_TO_DEBUGGER();
+               goto create_fail;
+       }
 
        /* ABM */
        for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
index c9d5f94..d163812 100644 (file)
@@ -55,6 +55,11 @@ unsigned int dcn30_calc_max_scaled_time(
 
 bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
                bool fast_validate);
+void dcn30_calculate_wm_and_dlg(
+               struct dc *dc, struct dc_state *context,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt,
+               int vlevel);
 void dcn30_populate_dml_writeback_from_context(
                struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
 
index ae608c3..3586934 100644 (file)
@@ -30,8 +30,6 @@
  * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
  */
 
-typedef bool BOOLEAN;
-
 enum pp_smu_ver {
        /*
         * PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -240,7 +238,7 @@ struct pp_smu_funcs_nv {
         * DC hardware
         */
        enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
-                       BOOLEAN pstate_handshake_supported);
+                       bool pstate_handshake_supported);
 };
 
 #define PP_SMU_NUM_SOCCLK_DPM_LEVELS  8
index 80170f9..860e72a 100644 (file)
@@ -2635,15 +2635,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
        }
 
        if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
-                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
+                       mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
                mode_lib->vba.DRAMClockChangeWatermark += 25;
 
                for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
-                       if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
-                               if (mode_lib->vba.DRAMClockChangeWatermark >
-                               dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
-                                       mode_lib->vba.MinTTUVBlank[k] += 25;
-                       }
+                       if (mode_lib->vba.DRAMClockChangeWatermark >
+                       dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+                               mode_lib->vba.MinTTUVBlank[k] += 25;
                }
 
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
index 1e557dd..d0b9094 100644 (file)
@@ -33,7 +33,7 @@ struct display_mode_lib;
 
 // Function: dml_rq_dlg_get_rq_reg
 //  Main entry point for test to get the register values out of this DML class.
-//  This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+//  This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
 //  and then populate the rq_regs struct
 // Input:
 //  pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
index 0d53e87..27cf8be 100644 (file)
@@ -33,7 +33,7 @@ struct display_mode_lib;
 
 // Function: dml_rq_dlg_get_rq_reg
 //  Main entry point for test to get the register values out of this DML class.
-//  This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+//  This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
 //  and then populate the rq_regs struct
 // Input:
 //  pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
index a576eed..367c82b 100644 (file)
@@ -1294,7 +1294,7 @@ static unsigned int CalculateVMAndRowBytes(
        unsigned int MacroTileHeight;
        unsigned int ExtraDPDEBytesFrame;
        unsigned int PDEAndMetaPTEBytesFrame;
-       unsigned int PixelPTEReqHeightPTEs;
+       unsigned int PixelPTEReqHeightPTEs = 0;
 
        if (DCCEnable == true) {
                *MetaRequestHeight = 8 * BlockHeight256Bytes;
index 2beb284..9e0ae18 100644 (file)
@@ -597,7 +597,8 @@ static void CalculateStutterEfficiency(
                double meta_row_bw[],
                double dpte_row_bw[],
                double *StutterEfficiencyNotIncludingVBlank,
-               double *StutterEfficiency);
+               double *StutterEfficiency,
+               double *StutterPeriodOut);
 
 static void CalculateSwathAndDETConfiguration(
                bool ForceSingleDPP,
@@ -3134,7 +3135,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        v->meta_row_bw,
                        v->dpte_row_bw,
                        &v->StutterEfficiencyNotIncludingVBlank,
-                       &v->StutterEfficiency);
+                       &v->StutterEfficiency,
+                       &v->StutterPeriod);
 }
 
 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
@@ -3235,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
                *BytePerPixelDETC = 0;
                *BytePerPixelY = 4;
                *BytePerPixelC = 0;
-       } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+       } else if (SourcePixelFormat == dm_444_16) {
                *BytePerPixelDETY = 2;
                *BytePerPixelDETC = 0;
                *BytePerPixelY = 2;
@@ -5305,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        ViewportExceedsSurface = true;
 
                if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
-                               && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+                               && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
                        if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
                                ViewportExceedsSurface = true;
                        }
@@ -5515,7 +5517,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                        if (WritebackPixelFormat[k] == dm_444_64) {
                                WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
                        }
-                       if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave || mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+                       if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
                                WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
                        }
                        WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
@@ -5556,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                }
        }
 
-       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
        } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
@@ -6151,7 +6153,8 @@ static void CalculateStutterEfficiency(
                double meta_row_bw[],
                double dpte_row_bw[],
                double *StutterEfficiencyNotIncludingVBlank,
-               double *StutterEfficiency)
+               double *StutterEfficiency,
+               double *StutterPeriodOut)
 {
        double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
        double FrameTimeForMinFullDETBufferingTime = 0;
@@ -6262,6 +6265,9 @@ static void CalculateStutterEfficiency(
        }
 
        *StutterEfficiency =  (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100;
+
+       if (StutterPeriodOut)
+               *StutterPeriodOut = StutterPeriod;
 }
 
 static void CalculateSwathAndDETConfiguration(
index 5bb10f6..416bf6f 100644 (file)
@@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
                *BytePerPixelDETC = 0;
                *BytePerPixelY = 4;
                *BytePerPixelC = 0;
-       } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+       } else if (SourcePixelFormat == dm_444_16) {
                *BytePerPixelDETY = 2;
                *BytePerPixelDETC = 0;
                *BytePerPixelY = 2;
index e5b17e1..c04965c 100644 (file)
@@ -32,7 +32,7 @@ struct display_mode_lib;
 
 // Function: dml_rq_dlg_get_rq_reg
 //  Main entry point for test to get the register values out of this DML class.
-//  This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+//  This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
 //  and then populate the rq_regs struct
 // Input:
 //  pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.)
index cf98aa8..e883864 100644 (file)
@@ -162,7 +162,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
 }
 
 
-/* fucntion table */
+/* function table */
 static const struct hw_factory_funcs funcs = {
        .init_ddc_data = dal_hw_ddc_init,
        .init_generic = NULL,
index b38c96c..7d36b56 100644 (file)
@@ -194,7 +194,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
 }
 
 
-/* fucntion table */
+/* function table */
 static const struct hw_factory_funcs funcs = {
        .init_ddc_data = dal_hw_ddc_init,
        .init_generic = dal_hw_generic_init,
index 83f798c..9b63c6c 100644 (file)
@@ -221,7 +221,7 @@ static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
        generic->base.regs = &generic_regs[en].gpio;
 }
 
-/* fucntion table */
+/* function table */
 static const struct hw_factory_funcs funcs = {
        .init_ddc_data = dal_hw_ddc_init,
        .init_generic = dal_hw_generic_init,
index 907c591..2f57ee6 100644 (file)
@@ -202,7 +202,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
 }
 
 
-/* fucntion table */
+/* function table */
 static const struct hw_factory_funcs funcs = {
        .init_ddc_data = dal_hw_ddc_init,
        .init_generic = dal_hw_generic_init,
index 7e7fb65..3be2c90 100644 (file)
@@ -218,7 +218,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
 }
 
 
-/* fucntion table */
+/* function table */
 static const struct hw_factory_funcs funcs = {
        .init_ddc_data = dal_hw_ddc_init,
        .init_generic = dal_hw_generic_init,
index 329395e..6e6bc66 100644 (file)
@@ -101,7 +101,7 @@ struct resource_funcs {
                                        struct dc *dc,
                                        struct dc_state *context,
                                        bool fast_validate);
-       void (*calculate_wm)(
+       void (*calculate_wm_and_dlg)(
                                struct dc *dc, struct dc_state *context,
                                display_e2e_pipe_params_st *pipes,
                                int pipe_cnt,
@@ -300,6 +300,7 @@ union pipe_update_flags {
                uint32_t gamut_remap : 1;
                uint32_t scaler : 1;
                uint32_t viewport : 1;
+               uint32_t plane_changed : 1;
        } bits;
        uint32_t raw;
 };
@@ -396,6 +397,7 @@ struct dc_state {
        struct dc_stream_state *streams[MAX_PIPES];
        struct dc_stream_status stream_status[MAX_PIPES];
        uint8_t stream_count;
+       uint8_t stream_mask;
 
        struct resource_context res_ctx;
 
@@ -410,6 +412,10 @@ struct dc_state {
        struct clk_mgr *clk_mgr;
 
        struct kref refcount;
+
+       struct {
+               unsigned int stutter_period_us;
+       } perf_params;
 };
 
 #endif /* _CORE_TYPES_H_ */
index 11ce06e..0184cef 100644 (file)
@@ -143,7 +143,7 @@ struct stream_encoder_funcs {
                struct stream_encoder *enc,
                struct dc_crtc_timing *crtc_timing);
 
-       void (*set_mst_bandwidth)(
+       void (*set_throttled_vcp_size)(
                struct stream_encoder *enc,
                struct fixed31_32 avg_time_slots_per_mtp);
 
index 944c032..1053b16 100644 (file)
@@ -46,9 +46,10 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
        struct dc_crtc_timing *crtc_timing,
        bool is_dual_link) {}
 
-static void virtual_stream_encoder_set_mst_bandwidth(
+static void virtual_stream_encoder_set_throttled_vcp_size(
        struct stream_encoder *enc,
-       struct fixed31_32 avg_time_slots_per_mtp) {}
+       struct fixed31_32 avg_time_slots_per_mtp)
+{}
 
 static void virtual_stream_encoder_update_hdmi_info_packets(
        struct stream_encoder *enc,
@@ -98,6 +99,12 @@ static void virtual_setup_stereo_sync(
                        bool enable)
 {}
 
+static void virtual_stream_encoder_set_dsc_pps_info_packet(
+               struct stream_encoder *enc,
+               bool enable,
+               uint8_t *dsc_packed_pps)
+{}
+
 static const struct stream_encoder_funcs virtual_str_enc_funcs = {
        .dp_set_odm_combine =
                virtual_enc_dp_set_odm_combine,
@@ -107,8 +114,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
                virtual_stream_encoder_hdmi_set_stream_attribute,
        .dvi_set_stream_attribute =
                virtual_stream_encoder_dvi_set_stream_attribute,
-       .set_mst_bandwidth =
-               virtual_stream_encoder_set_mst_bandwidth,
+       .set_throttled_vcp_size =
+               virtual_stream_encoder_set_throttled_vcp_size,
        .update_hdmi_info_packets =
                virtual_stream_encoder_update_hdmi_info_packets,
        .stop_hdmi_info_packets =
@@ -127,6 +134,7 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
        .hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute,
        .dig_connect_to_otg = virtual_dig_connect_to_otg,
        .setup_stereo_sync = virtual_setup_stereo_sync,
+       .dp_set_dsc_pps_info_packet = virtual_stream_encoder_set_dsc_pps_info_packet,
 };
 
 bool virtual_stream_encoder_construct(
index d7e7f2e..d103ec1 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x4e5b2f46f
+#define DMUB_FW_VERSION_GIT_HASH 0x9cf8f05fe
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 29
+#define DMUB_FW_VERSION_REVISION 35
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -57,6 +57,7 @@
 
 #define SET_ABM_PIPE_GRADUALLY_DISABLE           0
 #define SET_ABM_PIPE_IMMEDIATELY_DISABLE         255
+#define SET_ABM_PIPE_IMMEDIATE_KEEP_GAIN_DISABLE 254
 #define SET_ABM_PIPE_NORMAL                      1
 
 /* Maximum number of streams on any ASIC. */
 #define PHYSICAL_ADDRESS_LOC union large_integer
 #endif
 
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
 #ifndef dmub_memcpy
 #define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
 #endif
@@ -81,6 +78,10 @@ extern "C" {
 #define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
 #endif
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #ifndef dmub_udelay
 #define dmub_udelay(microseconds) udelay(microseconds)
 #endif
@@ -97,6 +98,7 @@ union dmub_psr_debug_flags {
        struct {
                uint32_t visual_confirm : 1;
                uint32_t use_hw_lock_mgr : 1;
+               uint32_t log_line_nums : 1;
        } bitfields;
 
        uint32_t u32All;
@@ -169,7 +171,7 @@ union dmub_fw_boot_status {
                uint32_t dal_fw : 1;
                uint32_t mailbox_rdy : 1;
                uint32_t optimized_init_done : 1;
-               uint32_t reserved : 29;
+               uint32_t restore_required : 1;
        } bits;
        uint32_t all;
 };
@@ -178,6 +180,7 @@ enum dmub_fw_boot_status_bit {
        DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0),
        DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1),
        DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2),
+       DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3),
 };
 
 /* Register bit definition for SCRATCH15 */
@@ -297,9 +300,17 @@ enum dmub_cmd_type {
        DMUB_CMD__PSR = 64,
        DMUB_CMD__ABM = 66,
        DMUB_CMD__HW_LOCK = 69,
+       DMUB_CMD__DP_AUX_ACCESS = 70,
+       DMUB_CMD__OUTBOX1_ENABLE = 71,
        DMUB_CMD__VBIOS = 128,
 };
 
+enum dmub_out_cmd_type {
+       DMUB_OUT_CMD__NULL = 0,
+       DMUB_OUT_CMD__DP_AUX_REPLY = 1,
+       DMUB_OUT_CMD__DP_HPD_NOTIFY = 2,
+};
+
 #pragma pack(push, 1)
 
 struct dmub_cmd_header {
@@ -455,6 +466,78 @@ struct dmub_rb_cmd_dpphy_init {
        uint8_t reserved[60];
 };
 
+enum dp_aux_request_action {
+       DP_AUX_REQ_ACTION_I2C_WRITE             = 0x00,
+       DP_AUX_REQ_ACTION_I2C_READ              = 0x10,
+       DP_AUX_REQ_ACTION_I2C_STATUS_REQ        = 0x20,
+       DP_AUX_REQ_ACTION_I2C_WRITE_MOT         = 0x40,
+       DP_AUX_REQ_ACTION_I2C_READ_MOT          = 0x50,
+       DP_AUX_REQ_ACTION_I2C_STATUS_REQ_MOT    = 0x60,
+       DP_AUX_REQ_ACTION_DPCD_WRITE            = 0x80,
+       DP_AUX_REQ_ACTION_DPCD_READ             = 0x90
+};
+
+/* DP AUX command */
+struct aux_transaction_parameters {
+       uint8_t is_i2c_over_aux;
+       uint8_t action;
+       uint8_t length;
+       uint8_t pad;
+       uint32_t address;
+       uint8_t data[16];
+};
+
+struct dmub_cmd_dp_aux_control_data {
+       uint32_t handle;
+       uint8_t port_index;
+       uint8_t sw_crc_enabled;
+       uint16_t timeout;
+       struct aux_transaction_parameters dpaux;
+};
+
+struct dmub_rb_cmd_dp_aux_access {
+       struct dmub_cmd_header header;
+       struct dmub_cmd_dp_aux_control_data aux_control;
+};
+
+struct dmub_rb_cmd_outbox1_enable {
+       struct dmub_cmd_header header;
+       uint32_t enable;
+};
+
+/* DP AUX Reply command - OutBox Cmd */
+struct aux_reply_data {
+       uint8_t command;
+       uint8_t length;
+       uint8_t pad[2];
+       uint8_t data[16];
+};
+
+struct aux_reply_control_data {
+       uint32_t handle;
+       uint8_t phy_port_index;
+       uint8_t result;
+       uint16_t pad;
+};
+
+struct dmub_rb_cmd_dp_aux_reply {
+       struct dmub_cmd_header header;
+       struct aux_reply_control_data control;
+       struct aux_reply_data reply_data;
+};
+
+struct dp_hpd_data {
+       uint8_t phy_port_index;
+       uint8_t hpd_type;
+       uint8_t hpd_status;
+       uint8_t pad;
+};
+
+struct dmub_rb_cmd_dp_hpd_notify {
+       struct dmub_cmd_header header;
+       struct dp_hpd_data hpd_data;
+};
+
 /*
  * Command IDs should be treated as stable ABI.
  * Do not reuse or modify IDs.
@@ -684,8 +767,15 @@ union dmub_rb_cmd {
        struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
        struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
        struct dmub_rb_cmd_abm_init_config abm_init_config;
+       struct dmub_rb_cmd_dp_aux_access dp_aux_access;
+       struct dmub_rb_cmd_outbox1_enable outbox1_enable;
 };
 
+union dmub_rb_out_cmd {
+       struct dmub_rb_cmd_common cmd_common;
+       struct dmub_rb_cmd_dp_aux_reply dp_aux_reply;
+       struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify;
+};
 #pragma pack(pop)
 
 
@@ -758,6 +848,25 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
        return true;
 }
 
+static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
+                                     const union dmub_rb_out_cmd *cmd)
+{
+       uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+       const uint8_t *src = (uint8_t *)cmd;
+
+       if (dmub_rb_full(rb))
+               return false;
+
+       dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+
+       rb->wrpt += DMUB_RB_CMD_SIZE;
+
+       if (rb->wrpt >= rb->capacity)
+               rb->wrpt %= rb->capacity;
+
+       return true;
+}
+
 static inline bool dmub_rb_front(struct dmub_rb *rb,
                                 union dmub_rb_cmd  *cmd)
 {
@@ -771,6 +880,23 @@ static inline bool dmub_rb_front(struct dmub_rb *rb,
        return true;
 }
 
+static inline bool dmub_rb_out_front(struct dmub_rb *rb,
+                                union dmub_rb_out_cmd  *cmd)
+{
+       const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+       uint64_t *dst = (uint64_t *)cmd;
+       int i;
+
+       if (dmub_rb_empty(rb))
+               return false;
+
+       // copying data
+       for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+               *dst++ = *src++;
+
+       return true;
+}
+
 static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
 {
        if (dmub_rb_empty(rb))
@@ -791,12 +917,10 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
 
        while (rptr != wptr) {
                uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
-               //uint64_t volatile *p = (uint64_t volatile *)data;
-               uint64_t temp;
                int i;
 
                for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
-                       temp = *data++;
+                       *data++;
 
                rptr += DMUB_RB_CMD_SIZE;
                if (rptr >= rb->capacity)
index 21011ed..7c78292 100644 (file)
@@ -318,4 +318,10 @@ struct bp_encoder_cap_info {
        uint32_t RESERVED:27;
 };
 
+struct bp_soc_bb_info {
+       uint32_t dram_clock_change_latency_100ns;
+       uint32_t dram_sr_exit_latency_100ns;
+       uint32_t dram_sr_enter_exit_latency_100ns;
+};
+
 #endif /*__DAL_BIOS_PARSER_TYPES_H__ */
index 550f46e..7392a89 100644 (file)
@@ -80,6 +80,7 @@ struct link_training_settings {
 
        uint16_t cr_pattern_time;
        uint16_t eq_pattern_time;
+       enum dc_dp_training_pattern pattern_for_cr;
        enum dc_dp_training_pattern pattern_for_eq;
 
        bool enhanced_framing;
index e9fbd94..20e554e 100644 (file)
@@ -470,6 +470,14 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
                if (reset_status != MOD_HDCP_STATUS_SUCCESS)
                        push_error_status(hdcp, reset_status);
        }
+
+       /* Clear CP_IRQ status if needed */
+       if (event_ctx.event == MOD_HDCP_EVENT_CPIRQ) {
+               status = mod_hdcp_clear_cp_irq_status(hdcp);
+               if (status != MOD_HDCP_STATUS_SUCCESS)
+                       push_error_status(hdcp, status);
+       }
+
        return status;
 }
 
index b0cefed..6c678cf 100644 (file)
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
 
 /* hdcp version helpers */
 static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
index bb5130f..f7b5583 100644 (file)
@@ -30,6 +30,8 @@
 #define KSV_READ_SIZE 0xf      /* 0x6803b - 0x6802c */
 #define HDCP_MAX_AUX_TRANSACTION_SIZE 16
 
+#define DP_CP_IRQ (1 << 2)
+
 enum mod_hdcp_ddc_message_id {
        MOD_HDCP_MESSAGE_ID_INVALID = -1,
 
@@ -645,3 +647,18 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
                status = MOD_HDCP_STATUS_INVALID_OPERATION;
        return status;
 }
+
+enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
+{
+       uint8_t clear_cp_irq_bit = DP_CP_IRQ;
+       uint32_t size = 1;
+
+       if (is_dp_hdcp(hdcp)) {
+               uint32_t cp_irq_addrs = (hdcp->connection.link.dp.rev >= 0x14)
+                               ? DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0:DP_DEVICE_SERVICE_IRQ_VECTOR;
+               return hdcp->config.ddc.funcs.write_dpcd(hdcp->config.ddc.handle, cp_irq_addrs,
+                               &clear_cp_irq_bit, size) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+       }
+
+       return MOD_HDCP_STATUS_INVALID_OPERATION;
+}
index d3192b9..47f8ee2 100644 (file)
@@ -27,7 +27,7 @@
 #define MOD_HDCP_LOG_H_
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
index fb1161d..3a367a5 100644 (file)
@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
        enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
        if (!psp->dtm_context.dtm_initialized) {
-               DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+               DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
                display->state = MOD_HDCP_DISPLAY_INACTIVE;
                return MOD_HDCP_STATUS_FAILURE;
        }
index e98c84e..10dc481 100644 (file)
@@ -47,6 +47,40 @@ enum amd_apu_flags {
        AMD_APU_IS_RENOIR = 0x00000008UL,
 };
 
+/**
+* DOC: IP Blocks
+*
+* GPUs are composed of IP (intellectual property) blocks. These
+* IP blocks provide various functionalities: display, graphics,
+* video decode, etc. The IP blocks that comprise a particular GPU
+* are listed in the GPU's respective SoC file. amdgpu_device.c
+* acquires the list of IP blocks for the GPU in use on initialization.
+* It can then operate on this list to perform standard driver operations
+* such as: init, fini, suspend, resume, etc.
+* 
+*
+* IP block implementations are named using the following convention:
+* <functionality>_v<version> (E.g.: gfx_v6_0).
+*/
+
+/**
+* enum amd_ip_block_type - Used to classify IP blocks by functionality.
+*
+* @AMD_IP_BLOCK_TYPE_COMMON: GPU Family
+* @AMD_IP_BLOCK_TYPE_GMC: Graphics Memory Controller
+* @AMD_IP_BLOCK_TYPE_IH: Interrupt Handler
+* @AMD_IP_BLOCK_TYPE_SMC: System Management Controller
+* @AMD_IP_BLOCK_TYPE_PSP: Platform Security Processor
+* @AMD_IP_BLOCK_TYPE_DCE: Display and Compositing Engine
+* @AMD_IP_BLOCK_TYPE_GFX: Graphics and Compute Engine
+* @AMD_IP_BLOCK_TYPE_SDMA: System DMA Engine
+* @AMD_IP_BLOCK_TYPE_UVD: Unified Video Decoder
+* @AMD_IP_BLOCK_TYPE_VCE: Video Compression Engine
+* @AMD_IP_BLOCK_TYPE_ACP: Audio Co-Processor
+* @AMD_IP_BLOCK_TYPE_VCN: Video Core/Codec Next
+* @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
+* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
+*/
 enum amd_ip_block_type {
        AMD_IP_BLOCK_TYPE_COMMON,
        AMD_IP_BLOCK_TYPE_GMC,
@@ -128,6 +162,34 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_ATHUB                   (1 << 16)
 #define AMD_PG_SUPPORT_JPEG                    (1 << 17)
 
+/**
+ * enum PP_FEATURE_MASK - Used to mask power play features.
+ *
+ * @PP_SCLK_DPM_MASK: Dynamic adjustment of the system (graphics) clock.
+ * @PP_MCLK_DPM_MASK: Dynamic adjustment of the memory clock.
+ * @PP_PCIE_DPM_MASK: Dynamic adjustment of PCIE clocks and lanes.
+ * @PP_SCLK_DEEP_SLEEP_MASK: System (graphics) clock deep sleep.
+ * @PP_POWER_CONTAINMENT_MASK: Power containment.
+ * @PP_UVD_HANDSHAKE_MASK: Unified video decoder handshake.
+ * @PP_SMC_VOLTAGE_CONTROL_MASK: Dynamic voltage control.
+ * @PP_VBI_TIME_SUPPORT_MASK: Vertical blank interval support.
+ * @PP_ULV_MASK: Ultra low voltage.
+ * @PP_ENABLE_GFX_CG_THRU_SMU: SMU control of GFX engine clockgating.
+ * @PP_CLOCK_STRETCH_MASK: Clock stretching.
+ * @PP_OD_FUZZY_FAN_CONTROL_MASK: Overdrive fuzzy fan control.
+ * @PP_SOCCLK_DPM_MASK: Dynamic adjustment of the SoC clock.
+ * @PP_DCEFCLK_DPM_MASK: Dynamic adjustment of the Display Controller Engine Fabric clock.
+ * @PP_OVERDRIVE_MASK: Over- and under-clocking support.
+ * @PP_GFXOFF_MASK: Dynamic graphics engine power control.
+ * @PP_ACG_MASK: Adaptive clock generator.
+ * @PP_STUTTER_MODE: Stutter mode.
+ * @PP_AVFS_MASK: Adaptive voltage and frequency scaling.
+ *
+ * To override these settings on boot, append amdgpu.ppfeaturemask=<mask> to
+ * the kernel's command line parameters. This is usually done through a system's
+ * boot loader (E.g. GRUB). If manually loading the driver, pass
+ * ppfeaturemask=<mask> as a modprobe parameter.
+ */
 enum PP_FEATURE_MASK {
        PP_SCLK_DPM_MASK = 0x1,
        PP_MCLK_DPM_MASK = 0x2,
@@ -165,56 +227,59 @@ enum DC_DEBUG_MASK {
 };
 
 enum amd_dpm_forced_level;
+
 /**
  * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ * @name: Name of IP block
+ * @early_init: sets up early driver state (pre sw_init),
+ *              does not configure hw - Optional
+ * @late_init: sets up late driver/hw state (post hw_init) - Optional
+ * @sw_init: sets up driver state, does not configure hw
+ * @sw_fini: tears down driver state, does not configure hw
+ * @hw_init: sets up the hw state
+ * @hw_fini: tears down the hw state
+ * @late_fini: final cleanup
+ * @suspend: handles IP specific hw/sw changes for suspend
+ * @resume: handles IP specific hw/sw changes for resume
+ * @is_idle: returns current IP block idle status
+ * @wait_for_idle: poll for idle
+ * @check_soft_reset: check soft reset the IP block
+ * @pre_soft_reset: pre soft reset the IP block
+ * @soft_reset: soft reset the IP block
+ * @post_soft_reset: post soft reset the IP block
+ * @set_clockgating_state: enable/disable cg for the IP block
+ * @set_powergating_state: enable/disable pg for the IP block
+ * @get_clockgating_state: get current clockgating status
+ * @enable_umd_pstate: enable UMD powerstate
+ *
+ * These hooks provide an interface for controlling the operational state
+ * of IP blocks. After acquiring a list of IP blocks for the GPU in use,
+ * the driver can make chip-wide state changes by walking this list and
+ * making calls to hooks from each IP block. This list is ordered to ensure
+ * that the driver initializes the IP blocks in a safe sequence.
  */
 struct amd_ip_funcs {
-       /** @name: Name of IP block */
        char *name;
-       /**
-        * @early_init:
-        *
-        * sets up early driver state (pre sw_init),
-        * does not configure hw - Optional
-        */
        int (*early_init)(void *handle);
-       /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
        int (*late_init)(void *handle);
-       /** @sw_init: sets up driver state, does not configure hw */
        int (*sw_init)(void *handle);
-       /** @sw_fini: tears down driver state, does not configure hw */
        int (*sw_fini)(void *handle);
-       /** @hw_init: sets up the hw state */
        int (*hw_init)(void *handle);
-       /** @hw_fini: tears down the hw state */
        int (*hw_fini)(void *handle);
-       /** @late_fini: final cleanup */
        void (*late_fini)(void *handle);
-       /** @suspend: handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
-       /** @resume: handles IP specific hw/sw changes for resume */
        int (*resume)(void *handle);
-       /** @is_idle: returns current IP block idle status */
        bool (*is_idle)(void *handle);
-       /** @wait_for_idle: poll for idle */
        int (*wait_for_idle)(void *handle);
-       /** @check_soft_reset: check soft reset the IP block */
        bool (*check_soft_reset)(void *handle);
-       /** @pre_soft_reset: pre soft reset the IP block */
        int (*pre_soft_reset)(void *handle);
-       /** @soft_reset: soft reset the IP block */
        int (*soft_reset)(void *handle);
-       /** @post_soft_reset: post soft reset the IP block */
        int (*post_soft_reset)(void *handle);
-       /** @set_clockgating_state: enable/disable cg for the IP block */
        int (*set_clockgating_state)(void *handle,
                                     enum amd_clockgating_state state);
-       /** @set_powergating_state: enable/disable pg for the IP block */
        int (*set_powergating_state)(void *handle,
                                     enum amd_powergating_state state);
-       /** @get_clockgating_state: get current clockgating status */
        void (*get_clockgating_state)(void *handle, u32 *flags);
-       /** @enable_umd_pstate: enable UMD powerstate */
        int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
 };
 
index 1116779..e245e91 100644 (file)
 #define mmDB_STENCIL_WRITE_BASE_DEFAULT                                          0x00000000
 #define mmDB_RESERVED_REG_1_DEFAULT                                              0x00000000
 #define mmDB_RESERVED_REG_3_DEFAULT                                              0x00000000
+#define mmDB_VRS_OVERRIDE_CNTL_DEFAULT                                           0x00000000
 #define mmDB_Z_READ_BASE_HI_DEFAULT                                              0x00000000
 #define mmDB_STENCIL_READ_BASE_HI_DEFAULT                                        0x00000000
 #define mmDB_Z_WRITE_BASE_HI_DEFAULT                                             0x00000000
 #define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT                                  0x00000000
 #define mmPA_STEREO_CNTL_DEFAULT                                                 0x00000000
 #define mmPA_STATE_STEREO_X_DEFAULT                                              0x00000000
+#define mmPA_CL_VRS_CNTL_DEFAULT                                                 0x00000000
 #define mmPA_SU_POINT_SIZE_DEFAULT                                               0x00000000
 #define mmPA_SU_POINT_MINMAX_DEFAULT                                             0x00000000
 #define mmPA_SU_LINE_CNTL_DEFAULT                                                0x00000000
index cbaad7d..66a4151 100644 (file)
 #define mmDB_RESERVED_REG_1_BASE_IDX                                                                   1
 #define mmDB_RESERVED_REG_3                                                                            0x0017
 #define mmDB_RESERVED_REG_3_BASE_IDX                                                                   1
+#define mmDB_VRS_OVERRIDE_CNTL                                                                         0x0019
+#define mmDB_VRS_OVERRIDE_CNTL_BASE_IDX                                                                1
 #define mmDB_Z_READ_BASE_HI                                                                            0x001a
 #define mmDB_Z_READ_BASE_HI_BASE_IDX                                                                   1
 #define mmDB_STENCIL_READ_BASE_HI                                                                      0x001b
 #define mmPA_STEREO_CNTL_BASE_IDX                                                                      1
 #define mmPA_STATE_STEREO_X                                                                            0x0211
 #define mmPA_STATE_STEREO_X_BASE_IDX                                                                   1
+#define mmPA_CL_VRS_CNTL                                                                               0x0212
+#define mmPA_CL_VRS_CNTL_BASE_IDX                                                                      1
 #define mmPA_SU_POINT_SIZE                                                                             0x0280
 #define mmPA_SU_POINT_SIZE_BASE_IDX                                                                    1
 #define mmPA_SU_POINT_MINMAX                                                                           0x0281
index c2d035e..aed799d 100644 (file)
 #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT                                                         0x3
 #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT                                                          0x4
 #define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT                                                          0x8
+#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE__SHIFT                                                      0x10
 #define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT                                                           0x18
 #define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK                                                      0x00000001L
 #define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK                                                       0x00000002L
 #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK                                                           0x00000008L
 #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK                                                            0x00000010L
 #define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK                                                            0x00000F00L
+#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE_MASK                                                        0x00FF0000L
 #define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK                                                             0x7F000000L
 //DB_DFSM_CONFIG
 #define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT                                                                    0x0
 #define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT                                                    0x18
 #define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT                                                  0x19
 #define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT                                                  0x1a
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT                                                           0x1c
 #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT__SHIFT                                                     0x1e
 #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC__SHIFT                                                  0x1f
 #define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK                                          0x00000001L
 #define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK                                                      0x01000000L
 #define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK                                                    0x02000000L
 #define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK                                                    0x04000000L
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK                                                             0x10000000L
 #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_MASK                                                       0x40000000L
 #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC_MASK                                                    0x80000000L
 //CB_HW_CONTROL
 #define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT                                                      0x0
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT                                               0x1
 #define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC__SHIFT                                               0x3
 #define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX__SHIFT                                                   0x4
+#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN__SHIFT                                    0x5
 #define CB_HW_CONTROL__RMI_CREDITS__SHIFT                                                                     0x6
 #define CB_HW_CONTROL__CHICKEN_BITS__SHIFT                                                                    0xc
 #define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS__SHIFT                                                0xf
 #define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT                                              0x1e
 #define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT                                    0x1f
 #define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK                                                        0x00000001L
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK                                                 0x00000002L
 #define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC_MASK                                                 0x00000008L
 #define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX_MASK                                                     0x00000010L
+#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN_MASK                                      0x00000020L
 #define CB_HW_CONTROL__RMI_CREDITS_MASK                                                                       0x00000FC0L
 #define CB_HW_CONTROL__CHICKEN_BITS_MASK                                                                      0x00007000L
 #define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS_MASK                                                  0x00008000L
 #define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT                                                         0x16
 #define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT                                                         0x17
 #define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT                                               0x19
+#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE__SHIFT                                                       0x1a
 #define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT                                                 0x1b
 #define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK                                                0x00000003L
 #define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK                                              0x0000001CL
 #define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK                                                           0x00400000L
 #define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK                                                           0x00800000L
 #define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK                                                 0x02000000L
+#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE_MASK                                                         0x04000000L
 #define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK                                                   0x18000000L
 //DB_HTILE_DATA_BASE
 #define DB_HTILE_DATA_BASE__BASE_256B__SHIFT                                                                  0x0
 //DB_RESERVED_REG_3
 #define DB_RESERVED_REG_3__FIELD_1__SHIFT                                                                     0x0
 #define DB_RESERVED_REG_3__FIELD_1_MASK                                                                       0x003FFFFFL
+//DB_VRS_OVERRIDE_CNTL
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT                                          0x0
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X__SHIFT                                                      0x4
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y__SHIFT                                                      0x6
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK                                            0x00000007L
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X_MASK                                                        0x00000030L
+#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y_MASK                                                        0x000000C0L
 //DB_Z_READ_BASE_HI
 #define DB_Z_READ_BASE_HI__BASE_HI__SHIFT                                                                     0x0
 #define DB_Z_READ_BASE_HI__BASE_HI_MASK                                                                       0x000000FFL
 #define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT                                                    0x18
 #define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT                                                         0x19
 #define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT                                                          0x1b
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT                                                            0x1c
 #define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT                                                    0x1d
 #define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT                                                   0x1e
 #define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK                                                               0x00000001L
 #define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK                                                      0x01000000L
 #define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK                                                           0x02000000L
 #define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK                                                            0x08000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK                                                              0x10000000L
 #define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK                                                      0x20000000L
 #define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK                                                     0x40000000L
 //PA_CL_NANINF_CNTL
 //PA_STATE_STEREO_X
 #define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT                                                             0x0
 #define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK                                                               0xFFFFFFFFL
+//PA_CL_VRS_CNTL
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT                                                      0x0
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT                                                   0x3
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT                                                       0x6
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT                                                      0x9
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT                                                         0xd
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT                                                     0xe
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK                                                        0x00000007L
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK                                                     0x00000038L
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK                                                         0x000001C0L
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK                                                        0x00000E00L
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK                                                           0x00002000L
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK                                                       0x00004000L
 //PA_SU_POINT_SIZE
 #define PA_SU_POINT_SIZE__HEIGHT__SHIFT                                                                       0x0
 #define PA_SU_POINT_SIZE__WIDTH__SHIFT                                                                        0x10
 #define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT                                                      0x10
 #define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT                                                             0x11
 #define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT                                                                 0x12
+#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING__SHIFT                                                           0x13
 #define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK                                                               0x00000001L
 #define DB_HTILE_SURFACE__FULL_CACHE_MASK                                                                     0x00000002L
 #define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK                                                               0x00000004L
 #define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK                                                        0x00010000L
 #define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK                                                               0x00020000L
 #define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK                                                                   0x00040000L
+#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING_MASK                                                             0x00180000L
 //DB_SRESULTS_COMPARE_STATE0
 #define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT                                                       0x0
 #define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT                                                      0x4
 #define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR0_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR1_ATTRIB3
 #define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR1_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR2_ATTRIB3
 #define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR2_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR3_ATTRIB3
 #define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR3_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR4_ATTRIB3
 #define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR4_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR5_ATTRIB3
 #define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR5_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR6_ATTRIB3
 #define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR6_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 //CB_COLOR7_ATTRIB3
 #define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT                                                                  0x0
 #define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT                                                                 0xd
 #define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT                                                          0x1a
 #define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL__SHIFT                                                              0x1b
 #define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT                                                            0x1e
+#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT                                                        0x1f
 #define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK                                                                    0x00001FFFL
 #define CB_COLOR7_ATTRIB3__META_LINEAR_MASK                                                                   0x00002000L
 #define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK                                                                 0x0007C000L
 #define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED_MASK                                                            0x04000000L
 #define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL_MASK                                                                0x38000000L
 #define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK                                                              0x40000000L
+#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK                                                          0x80000000L
 
 
 // addressBlock: gc_gfxudec
index f41556a..629a8a3 100644 (file)
 #define mmGCEA_EDC_CNT2_BASE_IDX                                                                       0
 #define mmGCEA_EDC_CNT3                                                                                0x071b
 #define mmGCEA_EDC_CNT3_BASE_IDX                                                                       0
+#define mmGCEA_ERR_STATUS                                                                              0x0712
+#define mmGCEA_ERR_STATUS_BASE_IDX                                                                     0
 
 // addressBlock: gc_gfxudec
 // base address: 0x30000
 #define mmRLC_EDC_CNT2                                                                                 0x4d41
 #define mmRLC_EDC_CNT2_BASE_IDX                                                                        1
 
-#endif
\ No newline at end of file
+#endif
index 07aceff..524ba44 100644 (file)
 #define mmUVD_LMI_CTRL2_BASE_IDX                                                                       1
 #define mmUVD_MASTINT_EN                                                                               0x0540
 #define mmUVD_MASTINT_EN_BASE_IDX                                                                      1
+#define mmUVD_FW_STATUS                                                                                0x0557
+#define mmUVD_FW_STATUS_BASE_IDX                                                                       1
 #define mmJPEG_CGC_CTRL                                                                                0x0565
 #define mmJPEG_CGC_CTRL_BASE_IDX                                                                       1
 #define mmUVD_LMI_CTRL                                                                                 0x0566
 #define mmUVD_CONTEXT_ID2_BASE_IDX                                                                     1
 
 
+
 #endif
index b427f73..919be18 100644 (file)
 #define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT                                                                   0x0
 #define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK                                                                     0xFFFFFFFFL
 
+//UVD_FW_STATUS
+#define UVD_FW_STATUS__BUSY__SHIFT                                                                            0x0
+#define UVD_FW_STATUS__ACTIVE__SHIFT                                                                          0x1
+#define UVD_FW_STATUS__SEND_EFUSE_REQ__SHIFT                                                                  0x2
+#define UVD_FW_STATUS__DONE__SHIFT                                                                            0x8
+#define UVD_FW_STATUS__PASS__SHIFT                                                                            0x10
+#define UVD_FW_STATUS__FAIL__SHIFT                                                                            0x11
+#define UVD_FW_STATUS__INVALID_LEN__SHIFT                                                                     0x12
+#define UVD_FW_STATUS__INVALID_0_PADDING__SHIFT                                                               0x13
+#define UVD_FW_STATUS__INVALID_NONCE__SHIFT                                                                   0x14
+#define UVD_FW_STATUS__BUSY_MASK                                                                              0x00000001L
+#define UVD_FW_STATUS__ACTIVE_MASK                                                                            0x00000002L
+#define UVD_FW_STATUS__SEND_EFUSE_REQ_MASK                                                                    0x00000004L
+#define UVD_FW_STATUS__DONE_MASK                                                                              0x00000100L
+#define UVD_FW_STATUS__PASS_MASK                                                                              0x00010000L
+#define UVD_FW_STATUS__FAIL_MASK                                                                              0x00020000L
+#define UVD_FW_STATUS__INVALID_LEN_MASK                                                                       0x00040000L
+#define UVD_FW_STATUS__INVALID_0_PADDING_MASK                                                                 0x00080000L
+#define UVD_FW_STATUS__INVALID_NONCE_MASK                                                                     0x00100000L
+
 
 #endif
index c0efd90..58cf7ad 100644 (file)
 #define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT                                                              0x7
 #define VCN_FEATURES__HAS_SCLR_DEC__SHIFT                                                                     0x8
 #define VCN_FEATURES__HAS_VP9_DEC__SHIFT                                                                      0x9
+#define VCN_FEATURES__HAS_AV1_DEC__SHIFT                                                                      0xa
 #define VCN_FEATURES__HAS_EFC_ENC__SHIFT                                                                      0xb
 #define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT                                                              0xc
 #define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT                                                               0xd
 #define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK                                                                0x00000080L
 #define VCN_FEATURES__HAS_SCLR_DEC_MASK                                                                       0x00000100L
 #define VCN_FEATURES__HAS_VP9_DEC_MASK                                                                        0x00000200L
+#define VCN_FEATURES__HAS_AV1_DEC_MASK                                                                        0x00000400L
 #define VCN_FEATURES__HAS_EFC_ENC_MASK                                                                        0x00000800L
 #define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK                                                                0x00001000L
 #define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK                                                                 0x00002000L
 #define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT                                                                    0x18
 #define UVD_SUVD_CGC_GATE__EFC__SHIFT                                                                         0x19
 #define UVD_SUVD_CGC_GATE__SAOE__SHIFT                                                                        0x1a
+#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT                                                                     0x1b
 #define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT                                                                    0x1c
 #define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT                                                                    0x1d
+#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT                                                                     0x1e
 #define UVD_SUVD_CGC_GATE__SMPA__SHIFT                                                                        0x1f
 #define UVD_SUVD_CGC_GATE__SRE_MASK                                                                           0x00000001L
 #define UVD_SUVD_CGC_GATE__SIT_MASK                                                                           0x00000002L
 #define UVD_SUVD_CGC_GATE__IME_HEVC_MASK                                                                      0x01000000L
 #define UVD_SUVD_CGC_GATE__EFC_MASK                                                                           0x02000000L
 #define UVD_SUVD_CGC_GATE__SAOE_MASK                                                                          0x04000000L
+#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK                                                                       0x08000000L
 #define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK                                                                      0x10000000L
 #define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK                                                                      0x20000000L
+#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK                                                                       0x40000000L
 #define UVD_SUVD_CGC_GATE__SMPA_MASK                                                                          0x80000000L
 //UVD_SUVD_CGC_STATUS
 #define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT                                                                  0x0
 #define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT                                                             0x1b
 #define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT                                                                  0x1c
 #define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT                                                                 0x1d
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT                                                              0x1e
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT                                                              0x1f
 #define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK                                                                    0x00000001L
 #define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK                                                                    0x00000002L
 #define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK                                                                    0x00000004L
 #define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK                                                               0x08000000L
 #define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK                                                                    0x10000000L
 #define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK                                                                   0x20000000L
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK                                                                0x40000000L
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK                                                                0x80000000L
 //UVD_SUVD_CGC_CTRL
 #define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT                                                                    0x0
 #define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT                                                                    0x1
 #define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT                                                                   0xc
 #define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT                                                                  0xd
 #define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT                                                                  0xe
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT                                                                0xf
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT                                                                0x10
 #define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT                                                                   0x11
 #define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT                                                                    0x1c
 #define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT                                                                    0x1d
 #define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK                                                                     0x00001000L
 #define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK                                                                    0x00002000L
 #define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK                                                                    0x00004000L
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK                                                                  0x00008000L
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK                                                                  0x00010000L
 #define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK                                                                     0x00020000L
 #define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK                                                                      0x10000000L
 #define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK                                                                      0x20000000L
 #define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT                                                                0x0
 #define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT                                                                0x1
 #define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT                                                               0x3
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT                                                             0x4
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT                                                             0x5
 #define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT                                                                0x6
 #define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT                                                                0x7
 #define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT                                                                0x8
 #define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK                                                                  0x00000001L
 #define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK                                                                  0x00000002L
 #define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK                                                                 0x00000008L
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK                                                               0x00000010L
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK                                                               0x00000020L
 #define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK                                                                  0x00000040L
 #define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK                                                                  0x00000080L
 #define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK                                                                  0x00000100L
 //UVD_SUVD_CGC_GATE2
 #define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT                                                                      0x0
 #define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT                                                                      0x1
+#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT                                                                    0x2
+#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT                                                                    0x3
 #define UVD_SUVD_CGC_GATE2__MPC1__SHIFT                                                                       0x4
 #define UVD_SUVD_CGC_GATE2__MPBE0_MASK                                                                        0x00000001L
 #define UVD_SUVD_CGC_GATE2__MPBE1_MASK                                                                        0x00000002L
+#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK                                                                      0x00000004L
+#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK                                                                      0x00000008L
 #define UVD_SUVD_CGC_GATE2__MPC1_MASK                                                                         0x00000010L
 //UVD_SUVD_INT_STATUS2
 #define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT                                                            0x0
 #define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT                                                             0x5
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT                                                         0x6
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT                                                          0xb
 #define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK                                                              0x0000001FL
 #define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK                                                               0x00000020L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK                                                           0x000007C0L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK                                                            0x00000800L
 //UVD_SUVD_INT_EN2
 #define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT                                                             0x0
 #define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT                                                              0x5
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT                                                          0x6
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT                                                           0xb
 #define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK                                                               0x0000001FL
 #define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK                                                                0x00000020L
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK                                                            0x000007C0L
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK                                                             0x00000800L
 //UVD_SUVD_INT_ACK2
 #define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT                                                           0x0
 #define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT                                                            0x5
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT                                                        0x6
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT                                                         0xb
 #define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK                                                             0x0000001FL
 #define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK                                                              0x00000020L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK                                                          0x000007C0L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK                                                           0x00000800L
 
 
 // addressBlock: uvd0_ecpudec
index fc592f6..e37b4b9 100644 (file)
@@ -212,6 +212,15 @@ struct tile_config {
  * IH ring entry. This function allows the KFD ISR to get the VMID
  * from the fault status register as early as possible.
  *
+ * @get_cu_occupancy: Function pointer that returns to caller the number
+ * of wave fronts that are in flight for all of the queues of a process
+ * as identified by its pasid. It is important to note that the value
+ * returned by this function is a snapshot of current moment and cannot
+ * guarantee any minimum for the number of waves in-flight. This function
+ * is defined for devices that belong to GFX9 and later GFX families. Care
+ * must be taken in calling this function as it is not defined for devices
+ * that belong to GFX8 and below GFX families.
+ *
  * This structure contains function pointers to services that the kgd driver
  * provides to amdkfd driver.
  *
@@ -286,6 +295,9 @@ struct kfd2kgd_calls {
        void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
                        uint32_t vmid, uint64_t page_table_base);
        uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+       void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
+                       int *max_waves_per_cu);
 };
 
 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */
index 0aec28f..94132c7 100644 (file)
@@ -281,6 +281,7 @@ struct amd_pm_funcs {
        int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
        int (*get_power_profile_mode)(void *handle, char *buf);
        int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+       int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size);
        int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
        int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
        int (*smu_i2c_bus_access)(void *handle, bool acquire);
index 2d924e8..5298166 100644 (file)
@@ -827,6 +827,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                        return -EINVAL;
                }
        } else {
+
+               if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+                       ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+                                                               parameter,
+                                                               parameter_size);
+                       if (ret) {
+                               pm_runtime_mark_last_busy(ddev->dev);
+                               pm_runtime_put_autosuspend(ddev->dev);
+                               return -EINVAL;
+                       }
+               }
+
                if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
                        ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
                                                parameter, parameter_size);
index dff4a5f..f6e0e7d 100644 (file)
@@ -349,6 +349,10 @@ enum amdgpu_pcie_gen {
                ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
                        (adev)->powerplay.pp_handle, parameter, size))
 
+#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
+               ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
+                       (adev)->powerplay.pp_handle, type, parameter, size))
+
 #define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
                ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
                        (adev)->powerplay.pp_handle, type, parameter, size))
index d22a759..44fd0cd 100644 (file)
@@ -270,6 +270,7 @@ struct smu_table_context
         */
        struct smu_table                driver_table;
        struct smu_table                memory_pool;
+       struct smu_table                dummy_read_1_table;
        uint8_t                         thermal_controller_type;
 
        void                            *overdrive_table;
@@ -452,6 +453,7 @@ struct smu_context
 
        struct work_struct throttling_logging_work;
        atomic64_t throttle_int_counter;
+       struct work_struct interrupt_work;
 
        unsigned fan_max_rpm;
        unsigned manual_fan_speed_rpm;
@@ -500,7 +502,7 @@ struct pptable_funcs {
        bool (*is_dpm_running)(struct smu_context *smu);
        int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
        int (*set_watermarks_table)(struct smu_context *smu,
-                                   struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+                                   struct pp_smu_wm_range_sets *clock_ranges);
        int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
        int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
        int (*set_default_od_settings)(struct smu_context *smu);
@@ -590,7 +592,6 @@ struct pptable_funcs {
        int (*mode2_reset)(struct smu_context *smu);
        int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
        int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
-       int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
        int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
        void (*log_thermal_throttling_event)(struct smu_context *smu);
        size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
@@ -600,6 +601,8 @@ struct pptable_funcs {
        int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
        int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
        int (*get_fan_parameters)(struct smu_context *smu);
+       int (*post_init)(struct smu_context *smu);
+       void (*interrupt_work)(struct smu_context *smu);
 };
 
 typedef enum {
@@ -703,7 +706,6 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
 
 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
-int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
 
 int smu_get_clock_by_type(struct smu_context *smu,
                          enum amd_pp_clock_type type,
@@ -755,7 +757,7 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
 int smu_write_watermarks_table(struct smu_context *smu);
 int smu_set_watermarks_for_clock_ranges(
                struct smu_context *smu,
-               struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+               struct pp_smu_wm_range_sets *clock_ranges);
 
 /* smu to display interface */
 extern int smu_display_configuration_change(struct smu_context *smu, const
index 1b3529e..3898a95 100644 (file)
@@ -340,6 +340,9 @@ struct pp_hwmgr_func {
        int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size);
+       int (*set_fine_grain_clk_vol)(struct pp_hwmgr *hwmgr,
+                                     enum PP_OD_DPM_TABLE_COMMAND type,
+                                     long *input, uint32_t size);
        int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
        int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
        int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
@@ -347,6 +350,8 @@ struct pp_hwmgr_func {
        int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
        int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
        int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
        int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
index dea8fe9..c498158 100644 (file)
@@ -54,7 +54,8 @@ typedef struct {
        uint16_t MaxMclk;
 
        uint8_t  WmSetting;
-       uint8_t  Padding[3];
+       uint8_t  WmType;
+       uint8_t  Padding[2];
 } WatermarkRowGeneric_t;
 
 #define NUM_WM_RANGES 4
index 5ef9c92..1275246 100644 (file)
@@ -27,9 +27,9 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x35
+#define SMU11_DRIVER_IF_VERSION 0x39
 
-#define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
+#define PPTABLE_Sienna_Cichlid_SMU_VERSION 6
 
 #define NUM_GFXCLK_DPM_LEVELS  16
 #define NUM_SMNCLK_DPM_LEVELS  2
@@ -169,7 +169,7 @@ typedef enum {
 #define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN       0x00000200
 #define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400
 #define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCEFCLK   0x00000800
-#define DPM_OVERRIDE_ENABLE_FAST_FCLK_TIMER          0x00001000
+#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER         0x00001000
 #define DPM_OVERRIDE_DISABLE_VCN_PG                  0x00002000
 #define DPM_OVERRIDE_DISABLE_FMAX_VMAX               0x00004000
 
@@ -793,8 +793,18 @@ typedef struct {
 
   // SECTION: Sku Reserved
   uint8_t          CustomerVariant;
-  uint8_t          Spare[3];
-  uint32_t         SkuReserved[14];
+
+  //VC BTC parameters are only applicable to VDD_GFX domain
+  uint8_t          VcBtcEnabled;
+  uint16_t         VcBtcVminT0;                 // T0_VMIN
+  uint16_t         VcBtcFixedVminAgingOffset;   // FIXED_VMIN_AGING_OFFSET 
+  uint16_t         VcBtcVmin2PsmDegrationGb;    // VMIN_TO_PSM_DEGRADATION_GB 
+  uint32_t         VcBtcPsmA;                   // A_PSM
+  uint32_t         VcBtcPsmB;                   // B_PSM
+  uint32_t         VcBtcVminA;                  // A_VMIN
+  uint32_t         VcBtcVminB;                  // B_VMIN  
+  
+  uint32_t         SkuReserved[9];
 
 
   // MAJOR SECTION: BOARD PARAMETERS
@@ -952,7 +962,7 @@ typedef struct {
   uint8_t                FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
   uint8_t                FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
   uint16_t               MaxOpTemp;            // Degree Celcius
-  uint16_t               Padding_16[1];
+  int16_t                VddGfxOffset;         // in mV
   uint8_t                FanZeroRpmEnable;
   uint8_t                FanZeroRpmStopTemp;
   uint8_t                FanMode;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h
new file mode 100644 (file)
index 0000000..beab6d7
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef SMU_11_0_CDR_TABLE
+#define SMU_11_0_CDR_TABLE
+
+
+#pragma pack(push, 1)
+
+/// CDR table : PRBS sequence for DQ toggles
+
+/*static unsigned int NoDbiPrbs7[] =
+{
+//256 bytes, 256 byte aligned
+0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+};
+
+
+static unsigned int DbiPrbs7[] =
+{
+// 256 bytes, 256 byte aligned
+0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+};
+*/
+
+
+//4096 bytes, 256 byte aligned
+static unsigned int NoDbiPrbs7[] =
+{
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+    0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+    0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+    0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+};
+
+// 4096 bytes, 256 byte aligned
+static unsigned int DbiPrbs7[] =
+{
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+    0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+    0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+    0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+};
+
+#pragma pack(pop)
+
+#endif
index 7b585e2..35fc46d 100644 (file)
        __SMU_DUMMY_MAP(GmiPwrDnControl), \
        __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
        __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
+       __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH), \
+       __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \
+       __SMU_DUMMY_MAP(GET_UMC_FW_WA), \
        __SMU_DUMMY_MAP(Mode1Reset), \
 
 #undef __SMU_DUMMY_MAP
index 1f9575a..2d1c3ba 100644 (file)
@@ -30,8 +30,8 @@
 #define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x36
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x35
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x4
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x39
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x5
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
@@ -280,5 +280,7 @@ int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement);
 
+void smu_v11_0_interrupt_work(struct smu_context *smu);
+
 #endif
 #endif
index fa0174d..26181b6 100644 (file)
 
 #define PPSMC_MSG_SetMGpuFanBoostLimitRpm        0x4C
 
-#define PPSMC_Message_Count                      0x4D
+#define PPSMC_MSG_SetDriverDummyTableDramAddrHigh 0x4E
+#define PPSMC_MSG_SetDriverDummyTableDramAddrLow  0x4F
+
+#define PPSMC_MSG_GetUMCFWWA                     0x50
+
+#define PPSMC_Message_Count                      0x51
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
index a6321f2..eab9768 100644 (file)
@@ -911,6 +911,19 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
        return ret;
 }
 
+static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
+               return 0;
+
+       return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
+}
+
 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
 {
        struct pp_hwmgr *hwmgr = handle;
@@ -920,7 +933,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 
        if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
                pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return -EINVAL;
+               return 0;
        }
 
        return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
@@ -1645,6 +1658,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .set_powergating_by_smu = pp_set_powergating_by_smu,
        .get_power_profile_mode = pp_get_power_profile_mode,
        .set_power_profile_mode = pp_set_power_profile_mode,
+       .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
        .odn_edit_dpm_table = pp_odn_edit_dpm_table,
        .set_mp1_state = pp_dpm_set_mp1_state,
        .set_power_limit = pp_set_power_limit,
index 9ee8cf8..cf60f39 100644 (file)
@@ -242,6 +242,34 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
        return 0;
 }
 
+static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
+               smu10_data->gfx_actual_soft_min_freq = clock;
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       smu10_data->gfx_actual_soft_min_freq,
+                                       NULL);
+       }
+       return 0;
+}
+
+static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (clock && smu10_data->gfx_max_freq_limit != (clock * 100))  {
+               smu10_data->gfx_max_freq_limit = clock * 100;
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       clock,
+                                       NULL);
+       }
+       return 0;
+}
+
 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
 {
        struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -527,6 +555,9 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
        hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
 
+       /* enable the pp_od_clk_voltage sysfs file */
+       hwmgr->od_enabled = 1;
+
        return result;
 }
 
@@ -563,6 +594,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        struct smu10_hwmgr *data = hwmgr->backend;
        uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
        uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
+       uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
+       uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
 
        if (hwmgr->smu_version < 0x1E3700) {
                pr_info("smu firmware version too old, can not set dpm level\n");
@@ -648,7 +681,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
-                                               SMU10_UMD_PSTATE_VCE,
+                                               SMU10_UMD_PSTATE_PROFILE_VCE,
                                                NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -665,7 +698,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
-                                               SMU10_UMD_PSTATE_VCE,
+                                               SMU10_UMD_PSTATE_PROFILE_VCE,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
@@ -676,13 +709,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
-                                               SMU10_UMD_PSTATE_PEAK_FCLK :
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
                                                min_mclk,
                                                NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_MIN_SOCCLK,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
@@ -695,11 +728,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
@@ -947,6 +980,26 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        ((mclk_table->entries[i].clk / 100)
                                         == now) ? "*" : "");
                break;
+       case OD_SCLK:
+               if (hwmgr->od_enabled) {
+                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+
+                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
+                       size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+               }
+               break;
+       case OD_RANGE:
+               if (hwmgr->od_enabled) {
+                       uint32_t min_freq, max_freq = 0;
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+                       size = sprintf(buf, "%s:\n", "OD_RANGE");
+                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+                               min_freq, max_freq);
+               }
+               break;
        default:
                break;
        }
@@ -1181,8 +1234,19 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
        struct smu10_hwmgr *data = hwmgr->backend;
        struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
        Watermarks_t *table = &(data->water_marks_table);
+       struct amdgpu_device *adev = hwmgr->adev;
+       int i;
 
        smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
+
+       if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+               for (i = 0; i < NUM_WM_RANGES; i++)
+                       table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
+
+               for (i = 0; i < NUM_WM_RANGES; i++)
+                       table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
+       }
+
        smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
        data->water_marks_exist = true;
        return 0;
@@ -1348,6 +1412,32 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
                                                   NULL);
 }
 
+static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long *input, uint32_t size)
+{
+       if (!hwmgr->od_enabled) {
+               pr_err("Fine grain not support\n");
+               return -EINVAL;
+       }
+
+       if (size != 2) {
+               pr_err("Input parameter number not correct\n");
+               return -EINVAL;
+       }
+
+       if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+               if (input[0] == 0)
+                       smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
+               else if (input[0] == 1)
+                       smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
+               else
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .backend_init = smu10_hwmgr_backend_init,
        .backend_fini = smu10_hwmgr_backend_fini,
@@ -1388,9 +1478,12 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .powergate_sdma = smu10_powergate_sdma,
        .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
        .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+       .set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
+       .set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
        .get_power_profile_mode = smu10_get_power_profile_mode,
        .set_power_profile_mode = smu10_set_power_profile_mode,
        .asic_reset = smu10_asic_reset,
+       .set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
index 0f969de..6c9b5f0 100644 (file)
@@ -284,7 +284,7 @@ struct smu10_hwmgr {
        uint32_t                        dclk_soft_min;
        uint32_t                        gfx_actual_soft_min_freq;
        uint32_t                        gfx_min_freq_limit;
-       uint32_t                        gfx_max_freq_limit;
+       uint32_t                        gfx_max_freq_limit; /* in 10Khz*/
 
        bool                           vcn_power_gated;
        bool                           vcn_dpg_mode;
@@ -310,6 +310,7 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
 #define SMU10_UMD_PSTATE_SOCCLK                 626
 #define SMU10_UMD_PSTATE_FCLK                   933
 #define SMU10_UMD_PSTATE_VCE                    0x03C00320
+#define SMU10_UMD_PSTATE_PROFILE_VCE            0x02AD0229
 
 #define SMU10_UMD_PSTATE_PEAK_SOCCLK            757
 #define SMU10_UMD_PSTATE_PEAK_FCLK              1200
index 4a3b64a..1e8919b 100644 (file)
@@ -1585,9 +1585,19 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->current_profile_setting.sclk_down_hyst = 100;
        data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
        data->current_profile_setting.bupdate_mclk = 1;
-       data->current_profile_setting.mclk_up_hyst = 0;
-       data->current_profile_setting.mclk_down_hyst = 100;
-       data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+       if (adev->gmc.vram_width == 256) {
+               data->current_profile_setting.mclk_up_hyst = 10;
+               data->current_profile_setting.mclk_down_hyst = 60;
+               data->current_profile_setting.mclk_activity = 25;
+       } else if (adev->gmc.vram_width == 128) {
+               data->current_profile_setting.mclk_up_hyst = 5;
+               data->current_profile_setting.mclk_down_hyst = 16;
+               data->current_profile_setting.mclk_activity = 20;
+       } else if (adev->gmc.vram_width == 64) {
+               data->current_profile_setting.mclk_up_hyst = 3;
+               data->current_profile_setting.mclk_down_hyst = 16;
+               data->current_profile_setting.mclk_activity = 20;
+       }
        hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
        hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
        hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2873,7 +2883,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
                if (hwmgr->is_kicker)
                        switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
                else
-                       switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+                       switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
                break;
        case CHIP_VEGAM:
                switch_limit_us = 30;
index c378a00..7eada30 100644 (file)
@@ -4659,7 +4659,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
        if ((data->water_marks_bitmap & WaterMarksExist) &&
                        !(data->water_marks_bitmap & WaterMarksLoaded)) {
                result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
-               PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+               PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
                data->water_marks_bitmap |= WaterMarksLoaded;
        }
 
index f0680dd..dc206fa 100644 (file)
@@ -2444,7 +2444,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
                        !(data->water_marks_bitmap & WaterMarksLoaded)) {
                result = smum_smc_table_manager(hwmgr,
                                                (uint8_t *)wm_table, TABLE_WATERMARKS, false);
-               PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+               PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
                data->water_marks_bitmap |= WaterMarksLoaded;
        }
 
index adfbcbe..8a9aee8 100644 (file)
@@ -61,9 +61,6 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
        uint32_t reg;
        uint32_t ret;
 
-       /* Due to the L1 policy problem under SRIOV, we have to use
-        * mmMP1_SMN_C2PMSG_103 as the driver response register
-        */
        if (hwmgr->pp_one_vf) {
                reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
 
@@ -148,10 +145,6 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
        smu9_wait_for_response(hwmgr);
 
-       /* Due to the L1 policy problem under SRIOV, we have to use
-        * mmMP1_SMN_C2PMSG_101 as the driver message register and
-        * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
-        */
        if (hwmgr->pp_one_vf) {
                WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
                WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
index 1e222c5..daf122f 100644 (file)
@@ -209,11 +209,13 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
        int ret;
        struct cgs_firmware_info info = {0};
 
-       ret = cgs_get_firmware_info(hwmgr->device,
-                                   CGS_UCODE_ID_SMU,
-                                   &info);
-       if (ret || !info.kptr)
-               return -EINVAL;
+       if (!amdgpu_sriov_vf((struct amdgpu_device *)hwmgr->adev)) {
+               ret = cgs_get_firmware_info(hwmgr->device,
+                                               CGS_UCODE_ID_SMU,
+                                               &info);
+               if (ret || !info.kptr)
+                       return -EINVAL;
+       }
 
        priv = kzalloc(sizeof(struct vega10_smumgr), GFP_KERNEL);
 
index 7a55ece..b1e5ec0 100644 (file)
@@ -361,20 +361,16 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
        int ret = 0;
        uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 
-       mutex_lock(&feature->mutex);
        bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
-       mutex_unlock(&feature->mutex);
 
        ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
                                             SMU_FEATURE_MAX/32);
        if (ret)
                return ret;
 
-       mutex_lock(&feature->mutex);
        bitmap_or(feature->allowed, feature->allowed,
                      (unsigned long *)allowed_feature_mask,
                      feature->feature_num);
-       mutex_unlock(&feature->mutex);
 
        return ret;
 }
@@ -421,6 +417,9 @@ static int smu_early_init(void *handle)
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
        mutex_init(&smu->mutex);
+       mutex_init(&smu->smu_baco.mutex);
+       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+       smu->smu_baco.platform_support = false;
 
        return smu_set_funcs(adev);
 }
@@ -473,20 +472,15 @@ static int smu_late_init(void *handle)
        if (!smu->pm_enabled)
                return 0;
 
-       ret = smu_set_default_od_settings(smu);
+       ret = smu_post_init(smu);
        if (ret) {
-               dev_err(adev->dev, "Failed to setup default OD settings!\n");
+               dev_err(adev->dev, "Failed to post smu init!\n");
                return ret;
        }
 
-       /*
-        * Set initialized values (get from vbios) to dpm tables context such as
-        * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
-        * type of clks.
-        */
-       ret = smu_set_default_dpm_table(smu);
+       ret = smu_set_default_od_settings(smu);
        if (ret) {
-               dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+               dev_err(adev->dev, "Failed to setup default OD settings!\n");
                return ret;
        }
 
@@ -578,9 +572,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
        struct smu_table *tables = smu_table->tables;
        struct smu_table *driver_table = &(smu_table->driver_table);
 
-       if (!tables)
-               return 0;
-
        if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
                amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
                                      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
@@ -657,6 +648,45 @@ static int smu_free_memory_pool(struct smu_context *smu)
        return 0;
 }
 
+static int smu_alloc_dummy_read_table(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *dummy_read_1_table =
+                       &smu_table->dummy_read_1_table;
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       dummy_read_1_table->size = 0x40000;
+       dummy_read_1_table->align = PAGE_SIZE;
+       dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+       ret = amdgpu_bo_create_kernel(adev,
+                                     dummy_read_1_table->size,
+                                     dummy_read_1_table->align,
+                                     dummy_read_1_table->domain,
+                                     &dummy_read_1_table->bo,
+                                     &dummy_read_1_table->mc_address,
+                                     &dummy_read_1_table->cpu_addr);
+       if (ret)
+               dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
+
+       return ret;
+}
+
+static void smu_free_dummy_read_table(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *dummy_read_1_table =
+                       &smu_table->dummy_read_1_table;
+
+
+       amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
+                             &dummy_read_1_table->mc_address,
+                             &dummy_read_1_table->cpu_addr);
+
+       memset(dummy_read_1_table, 0, sizeof(struct smu_table));
+}
+
 static int smu_smc_table_sw_init(struct smu_context *smu)
 {
        int ret;
@@ -692,6 +722,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
        if (ret)
                return ret;
 
+       ret = smu_alloc_dummy_read_table(smu);
+       if (ret)
+               return ret;
+
        ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
        if (ret)
                return ret;
@@ -705,6 +739,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
 
        smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
 
+       smu_free_dummy_read_table(smu);
+
        ret = smu_free_memory_pool(smu);
        if (ret)
                return ret;
@@ -736,6 +772,19 @@ static void smu_throttling_logging_work_fn(struct work_struct *work)
        smu_log_thermal_throttling(smu);
 }
 
+static void smu_interrupt_work_fn(struct work_struct *work)
+{
+       struct smu_context *smu = container_of(work, struct smu_context,
+                                              interrupt_work);
+
+       mutex_lock(&smu->mutex);
+
+       if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
+               smu->ppt_funcs->interrupt_work(smu);
+
+       mutex_unlock(&smu->mutex);
+}
+
 static int smu_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -749,15 +798,12 @@ static int smu_sw_init(void *handle)
        bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
        bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
 
-       mutex_init(&smu->smu_baco.mutex);
-       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
-       smu->smu_baco.platform_support = false;
-
        mutex_init(&smu->sensor_lock);
        mutex_init(&smu->metrics_lock);
        mutex_init(&smu->message_lock);
 
        INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
+       INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
        atomic64_set(&smu->throttle_int_counter, 0);
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -788,10 +834,13 @@ static int smu_sw_init(void *handle)
 
        smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
        smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
-       ret = smu_init_microcode(smu);
-       if (ret) {
-               dev_err(adev->dev, "Failed to load smu firmware!\n");
-               return ret;
+
+       if (!amdgpu_sriov_vf(adev)) {
+               ret = smu_init_microcode(smu);
+               if (ret) {
+                       dev_err(adev->dev, "Failed to load smu firmware!\n");
+                       return ret;
+               }
        }
 
        ret = smu_smc_table_sw_init(smu);
@@ -969,21 +1018,14 @@ static int smu_smc_hw_setup(struct smu_context *smu)
                return ret;
        }
 
-       ret = smu_disable_umc_cdr_12gbps_workaround(smu);
-       if (ret) {
-               dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
-               return ret;
-       }
-
        /*
-        * For Navi1X, manually switch it to AC mode as PMFW
-        * may boot it with DC mode.
+        * Set initialized values (get from vbios) to dpm tables context such as
+        * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+        * type of clks.
         */
-       ret = smu_set_power_source(smu,
-                                  adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
-                                  SMU_POWER_SOURCE_DC);
+       ret = smu_set_default_dpm_table(smu);
        if (ret) {
-               dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
+               dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
                return ret;
        }
 
@@ -1129,7 +1171,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_NAVI12))
+           (adev->asic_type <= CHIP_NAVY_FLOUNDER))
                return 0;
 
        /*
@@ -1168,6 +1210,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        int ret = 0;
 
        cancel_work_sync(&smu->throttling_logging_work);
+       cancel_work_sync(&smu->interrupt_work);
 
        ret = smu_disable_thermal_alert(smu);
        if (ret) {
@@ -1188,7 +1231,6 @@ static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
-       int ret = 0;
 
        if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1204,17 +1246,15 @@ static int smu_hw_fini(void *handle)
 
        adev->pm.dpm_enabled = false;
 
-       ret = smu_smc_hw_cleanup(smu);
-       if (ret)
-               return ret;
-
-       return 0;
+       return smu_smc_hw_cleanup(smu);
 }
 
 int smu_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
-       int ret = 0;
+       int ret;
+
+       amdgpu_gfx_off_ctrl(smu->adev, false);
 
        ret = smu_hw_fini(adev);
        if (ret)
@@ -1225,8 +1265,12 @@ int smu_reset(struct smu_context *smu)
                return ret;
 
        ret = smu_late_init(adev);
+       if (ret)
+               return ret;
 
-       return ret;
+       amdgpu_gfx_off_ctrl(smu->adev, true);
+
+       return 0;
 }
 
 static int smu_suspend(void *handle)
@@ -1784,25 +1828,19 @@ int smu_write_watermarks_table(struct smu_context *smu)
 }
 
 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
-               struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+               struct pp_smu_wm_range_sets *clock_ranges)
 {
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (smu->disable_watermark)
+               return 0;
 
-       if (!smu->disable_watermark &&
-                       smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-                       smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-               ret = smu_set_watermarks_table(smu, clock_ranges);
+       mutex_lock(&smu->mutex);
 
-               if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
-                       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-                       smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
-               }
-       }
+       ret = smu_set_watermarks_table(smu, clock_ranges);
 
        mutex_unlock(&smu->mutex);
 
@@ -2269,19 +2307,6 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
        return ret;
 }
 
-int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return -EOPNOTSUPP;
-
-       if (smu->ppt_funcs->set_active_display_count)
-               ret = smu->ppt_funcs->set_active_display_count(smu, count);
-
-       return ret;
-}
-
 int smu_get_clock_by_type(struct smu_context *smu,
                          enum amd_pp_clock_type type,
                          struct amd_pp_clocks *clocks)
index 2ce6ad9..fc37628 100644 (file)
@@ -386,11 +386,9 @@ static int arcturus_check_powerplay_table(struct smu_context *smu)
                table_context->power_play_table;
        struct smu_baco_context *smu_baco = &smu->smu_baco;
 
-       mutex_lock(&smu_baco->mutex);
        if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
            powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
                smu_baco->platform_support = true;
-       mutex_unlock(&smu_baco->mutex);
 
        table_context->thermal_controller_type =
                powerplay_table->thermal_controller_type;
@@ -2390,6 +2388,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = arcturus_get_fan_parameters,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 42d53cc..8d8081c 100644 (file)
@@ -45,6 +45,7 @@
 
 #include "asic_reg/mp/mp_11_0_sh_mask.h"
 #include "smu_cmn.h"
+#include "smu_11_0_cdr_table.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
@@ -139,6 +140,9 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(GetVoltageByDpm,                PPSMC_MSG_GetVoltageByDpm,              0),
        MSG_MAP(GetVoltageByDpmOverdrive,       PPSMC_MSG_GetVoltageByDpmOverdrive,     0),
        MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,      0),
+       MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
+       MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
+       MSG_MAP(GET_UMC_FW_WA,                  PPSMC_MSG_GetUMCFWWA,                   0),
 };
 
 static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -279,9 +283,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
                                | FEATURE_MASK(FEATURE_FW_CTF_BIT)
                                | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
 
-       if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
-               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
        if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
 
@@ -291,11 +292,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
        if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
 
-       if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
-               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
-                               | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
-                               | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
-
        if (adev->pm.pp_feature & PP_ULV_MASK)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
 
@@ -320,19 +316,24 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
        if (smu->dc_controlled_by_gpio)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
 
-       /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
-       if (is_asic_secure(smu)) {
-               /* only for navi10 A0 */
-               if ((adev->asic_type == CHIP_NAVI10) &&
-                       (adev->rev_id == 0)) {
-                       *(uint64_t *)feature_mask &=
-                                       ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
-                                         | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
-                                         | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT));
-                       *(uint64_t *)feature_mask &=
-                                       ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
-               }
-       }
+       if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+       /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
+       if (!(is_asic_secure(smu) &&
+            (adev->asic_type == CHIP_NAVI10) &&
+            (adev->rev_id == 0)) &&
+           (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
+               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
+                               | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
+                               | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
+
+       /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
+       if (is_asic_secure(smu) &&
+           (adev->asic_type == CHIP_NAVI10) &&
+           (adev->rev_id == 0))
+               *(uint64_t *)feature_mask &=
+                               ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
 
        return 0;
 }
@@ -347,11 +348,9 @@ static int navi10_check_powerplay_table(struct smu_context *smu)
        if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
                smu->dc_controlled_by_gpio = true;
 
-       mutex_lock(&smu_baco->mutex);
        if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
            powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
                smu_baco->platform_support = true;
-       mutex_unlock(&smu_baco->mutex);
 
        table_context->thermal_controller_type =
                powerplay_table->thermal_controller_type;
@@ -1602,57 +1601,43 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
 }
 
 static int navi10_set_watermarks_table(struct smu_context *smu,
-                                      struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+                                      struct pp_smu_wm_range_sets *clock_ranges)
 {
        Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
        int i;
 
        if (clock_ranges) {
-               if (clock_ranges->num_wm_dmif_sets > 4 ||
-                   clock_ranges->num_wm_mcif_sets > 4)
+               if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+                   clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
                        return -EINVAL;
 
-               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-                       table->WatermarkRow[1][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+                       table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+                               clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+                               clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+                               clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+                               clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+                       table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
+                               clock_ranges->reader_wm_sets[i].wm_inst;
                }
 
-               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-                       table->WatermarkRow[0][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+                       table->WatermarkRow[WM_SOCCLK][i].MinClock =
+                               clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+                               clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+                               clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+                               clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+                       table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+                               clock_ranges->writer_wm_sets[i].wm_inst;
                }
 
                smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -2196,59 +2181,46 @@ static int navi10_run_btc(struct smu_context *smu)
        return ret;
 }
 
-static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
 {
-       int result = 0;
-
-       if (!enable)
-               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
-       else
-               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
-
-       return result;
-}
+       struct amdgpu_device *adev = smu->adev;
 
-static inline bool navi10_need_umc_cdr_12gbps_workaround(struct amdgpu_device *adev)
-{
-       if (adev->asic_type != CHIP_NAVI10)
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
                return false;
 
-       if (adev->pdev->device == 0x731f &&
-           (adev->pdev->revision == 0xc2 ||
-            adev->pdev->revision == 0xc3 ||
-            adev->pdev->revision == 0xca ||
-            adev->pdev->revision == 0xcb))
+       if (adev->asic_type == CHIP_NAVI10 ||
+           adev->asic_type == CHIP_NAVI14)
                return true;
-       else
-               return false;
+
+       return false;
 }
 
-static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
 {
        uint32_t uclk_count, uclk_min, uclk_max;
-       uint32_t smu_version;
        int ret = 0;
 
-       if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
-               return 0;
-
-       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
-       if (ret)
-               return ret;
-
-       /* This workaround is available only for 42.50 or later SMC firmwares */
-       if (smu_version < 0x2A3200)
+       /* This workaround can be applied only with uclk dpm enabled */
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
                return 0;
 
        ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
        if (ret)
                return ret;
 
-       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
        if (ret)
                return ret;
 
-       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+       /*
+        * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
+        * This workaround is needed only when the max uclk frequency
+        * not greater than that.
+        */
+       if (uclk_max > 0x2EE)
+               return 0;
+
+       ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
        if (ret)
                return ret;
 
@@ -2265,8 +2237,97 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
        /*
         * In this case, SMU already disabled dummy pstate during enablement
         * of UCLK DPM, we have to re-enabled it.
-        * */
-       return navi10_dummy_pstate_control(smu, true);
+        */
+       return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
+}
+
+static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *dummy_read_table =
+                               &smu_table->dummy_read_1_table;
+       char *dummy_table = dummy_read_table->cpu_addr;
+       int ret = 0;
+       uint32_t i;
+
+       for (i = 0; i < 0x40000; i += 0x1000 * 2) {
+               memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
+               dummy_table += 0x1000;
+               memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
+               dummy_table += 0x1000;
+       }
+
+       amdgpu_asic_flush_hdp(smu->adev, NULL);
+
+       ret = smu_cmn_send_smc_msg_with_param(smu,
+                                             SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
+                                             upper_32_bits(dummy_read_table->mc_address),
+                                             NULL);
+       if (ret)
+               return ret;
+
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
+                                              lower_32_bits(dummy_read_table->mc_address),
+                                              NULL);
+}
+
+static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint8_t umc_fw_greater_than_v136 = false;
+       uint8_t umc_fw_disable_cdr = false;
+       uint32_t pmfw_version;
+       uint32_t param;
+       int ret = 0;
+
+       if (!navi10_need_umc_cdr_workaround(smu))
+               return 0;
+
+       ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+       if (ret) {
+               dev_err(adev->dev, "Failed to get smu version!\n");
+               return ret;
+       }
+
+       /*
+        * The messages below are only supported by Navi10 42.53.0 and later
+        * PMFWs and Navi14 53.29.0 and later PMFWs.
+        * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
+        * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
+        * - PPSMC_MSG_GetUMCFWWA
+        */
+       if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
+           ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_GET_UMC_FW_WA,
+                                                     0,
+                                                     &param);
+               if (ret)
+                       return ret;
+
+               /* First bit indicates if the UMC f/w is above v137 */
+               umc_fw_greater_than_v136 = param & 0x1;
+
+               /* Second bit indicates if hybrid-cdr is disabled */
+               umc_fw_disable_cdr = param & 0x2;
+
+               /* w/a only allowed if UMC f/w is <= 136 */
+               if (umc_fw_greater_than_v136)
+                       return 0;
+
+               if (umc_fw_disable_cdr) {
+                       if (adev->asic_type == CHIP_NAVI10)
+                               return navi10_umc_hybrid_cdr_workaround(smu);
+               } else {
+                       return navi10_set_dummy_pstates_table_location(smu);
+               }
+       } else {
+               if (adev->asic_type == CHIP_NAVI10)
+                       return navi10_umc_hybrid_cdr_workaround(smu);
+       }
+
+       return 0;
 }
 
 static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
@@ -2275,8 +2336,6 @@ static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
        req->I2CcontrollerPort = 0;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
@@ -2314,6 +2373,12 @@ static int navi10_i2c_read_data(struct i2c_adapter *control,
        struct smu_table_context *smu_table = &adev->smu.smu_table;
        struct smu_table *table = &smu_table->driver_table;
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        navi10_fill_i2c_req(&req, false, address, numbytes, data);
 
@@ -2350,6 +2415,12 @@ static int navi10_i2c_write_data(struct i2c_adapter *control,
        SwI2cRequest_t req;
        struct amdgpu_device *adev = to_amdgpu_device(control);
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        navi10_fill_i2c_req(&req, true, address, numbytes, data);
 
@@ -2578,6 +2649,39 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
                                               NULL);
 }
 
+static int navi10_post_smu_init(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
+       ret = navi10_run_umc_cdr_workaround(smu);
+       if (ret) {
+               dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
+               return ret;
+       }
+
+       if (!smu->dc_controlled_by_gpio) {
+               /*
+                * For Navi1X, manually switch it to AC mode as PMFW
+                * may boot it with DC mode.
+                */
+               ret = smu_v11_0_set_power_source(smu,
+                                                adev->pm.ac_power ?
+                                                SMU_POWER_SOURCE_AC :
+                                                SMU_POWER_SOURCE_DC);
+               if (ret) {
+                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
+                                       adev->pm.ac_power ? "AC" : "DC");
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
        .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
        .set_default_dpm_table = navi10_set_default_dpm_table,
@@ -2652,7 +2756,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .set_default_od_settings = navi10_set_default_od_settings,
        .od_edit_dpm_table = navi10_od_edit_dpm_table,
        .run_btc = navi10_run_btc,
-       .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
        .set_power_source = smu_v11_0_set_power_source,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
        .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
@@ -2661,6 +2764,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = navi10_get_fan_parameters,
+       .post_init = navi10_post_smu_init,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index 5c22611..c27806f 100644 (file)
@@ -298,11 +298,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
                table_context->power_play_table;
        struct smu_baco_context *smu_baco = &smu->smu_baco;
 
-       mutex_lock(&smu_baco->mutex);
        if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
            powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
                smu_baco->platform_support = true;
-       mutex_unlock(&smu_baco->mutex);
 
        table_context->thermal_controller_type =
                powerplay_table->thermal_controller_type;
@@ -1409,58 +1407,43 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
 }
 
 static int sienna_cichlid_set_watermarks_table(struct smu_context *smu,
-                                              struct dm_pp_wm_sets_with_clock_ranges_soc15
-                                              *clock_ranges)
+                                              struct pp_smu_wm_range_sets *clock_ranges)
 {
        Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
        int i;
 
        if (clock_ranges) {
-               if (clock_ranges->num_wm_dmif_sets > 4 ||
-                   clock_ranges->num_wm_mcif_sets > 4)
+               if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+                   clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
                        return -EINVAL;
 
-               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-                       table->WatermarkRow[1][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+                       table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+                               clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+                               clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+                               clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+                       table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+                               clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+                       table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
+                               clock_ranges->reader_wm_sets[i].wm_inst;
                }
 
-               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-                       table->WatermarkRow[0][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                               1000));
-                       table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+                       table->WatermarkRow[WM_SOCCLK][i].MinClock =
+                               clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+                               clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+                               clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+                       table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+                               clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+                       table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+                               clock_ranges->writer_wm_sets[i].wm_inst;
                }
 
                smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -2291,11 +2274,6 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
        dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
        dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
        dev_info(smu->adev->dev, "SkuReserved[8] = 0x%x\n", pptable->SkuReserved[8]);
-       dev_info(smu->adev->dev, "SkuReserved[9] = 0x%x\n", pptable->SkuReserved[9]);
-       dev_info(smu->adev->dev, "SkuReserved[10] = 0x%x\n", pptable->SkuReserved[10]);
-       dev_info(smu->adev->dev, "SkuReserved[11] = 0x%x\n", pptable->SkuReserved[11]);
-       dev_info(smu->adev->dev, "SkuReserved[12] = 0x%x\n", pptable->SkuReserved[12]);
-       dev_info(smu->adev->dev, "SkuReserved[13] = 0x%x\n", pptable->SkuReserved[13]);
 
        dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
        dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
@@ -2444,8 +2422,6 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
        req->I2CcontrollerPort = 0;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
@@ -2483,6 +2459,12 @@ static int sienna_cichlid_i2c_read_data(struct i2c_adapter *control,
        struct smu_table_context *smu_table = &adev->smu.smu_table;
        struct smu_table *table = &smu_table->driver_table;
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        sienna_cichlid_fill_i2c_req(&req, false, address, numbytes, data);
 
@@ -2519,6 +2501,12 @@ static int sienna_cichlid_i2c_write_data(struct i2c_adapter *control,
        SwI2cRequest_t req;
        struct amdgpu_device *adev = to_amdgpu_device(control);
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        sienna_cichlid_fill_i2c_req(&req, true, address, numbytes, data);
 
@@ -2806,6 +2794,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = sienna_cichlid_get_fan_parameters,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
index f5aeb0b..2380759 100644 (file)
@@ -322,39 +322,42 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
        void *table;
        uint16_t version_major, version_minor;
 
-       hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
-       version_major = le16_to_cpu(hdr->header.header_version_major);
-       version_minor = le16_to_cpu(hdr->header.header_version_minor);
-       if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER) {
-               dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
-               switch (version_minor) {
-               case 0:
-                       ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
-                       break;
-               case 1:
-                       ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
-                                                        smu->smu_table.boot_values.pp_table_id);
-                       break;
-               default:
-                       ret = -EINVAL;
-                       break;
+       if (!amdgpu_sriov_vf(adev)) {
+               hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+               version_major = le16_to_cpu(hdr->header.header_version_major);
+               version_minor = le16_to_cpu(hdr->header.header_version_minor);
+               if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
+                       adev->asic_type == CHIP_NAVY_FLOUNDER) {
+                       dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
+                       switch (version_minor) {
+                       case 0:
+                               ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
+                               break;
+                       case 1:
+                               ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
+                                                               smu->smu_table.boot_values.pp_table_id);
+                               break;
+                       default:
+                               ret = -EINVAL;
+                               break;
+                       }
+                       if (ret)
+                               return ret;
+                       goto out;
                }
-               if (ret)
-                       return ret;
+       }
 
-       } else {
-               dev_info(adev->dev, "use vbios provided pptable\n");
-               index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-                                                   powerplayinfo);
+       dev_info(adev->dev, "use vbios provided pptable\n");
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                                               powerplayinfo);
 
-               ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
-                                             (uint8_t **)&table);
-               if (ret)
-                       return ret;
-               size = atom_table_size;
-       }
+       ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+                                               (uint8_t **)&table);
+       if (ret)
+               return ret;
+       size = atom_table_size;
 
+out:
        if (!smu->smu_table.power_play_table)
                smu->smu_table.power_play_table = table;
        if (!smu->smu_table.power_play_table_size)
@@ -453,9 +456,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (smu_power->power_context || smu_power->power_context_size != 0)
-               return -EINVAL;
-
        smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
                                           GFP_KERNEL);
        if (!smu_power->power_context)
@@ -469,9 +469,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
 
-       if (!smu_power->power_context || smu_power->power_context_size == 0)
-               return -EINVAL;
-
        kfree(smu_power->power_context);
        smu_power->power_context = NULL;
        smu_power->power_context_size = 0;
@@ -700,18 +697,16 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 
 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
 {
-       int ret = 0;
        struct amdgpu_device *adev = smu->adev;
 
        /* Navy_Flounder do not support to change display num currently */
        if (adev->asic_type == CHIP_NAVY_FLOUNDER)
                return 0;
 
-       if (!smu->pm_enabled)
-               return ret;
-
-       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-       return ret;
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_NumOfDisplays,
+                                              count,
+                                              NULL);
 }
 
 
@@ -721,7 +716,6 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
 
-       mutex_lock(&feature->mutex);
        if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
                goto failed;
 
@@ -738,7 +732,6 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
                goto failed;
 
 failed:
-       mutex_unlock(&feature->mutex);
        return ret;
 }
 
@@ -775,9 +768,6 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
 {
        int ret = 0;
 
-       if (!smu->pm_enabled)
-               return ret;
-
        if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -947,12 +937,45 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
        return 0;
 }
 
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+       return smu_cmn_send_smc_msg(smu,
+                               SMU_MSG_ReenableAcDcInterrupt,
+                               NULL);
+}
+
+static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
+{
+       int ret = 0;
+
+       if (smu->dc_controlled_by_gpio &&
+           smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
+               ret = smu_v11_0_ack_ac_dc_interrupt(smu);
+
+       return ret;
+}
+
+void smu_v11_0_interrupt_work(struct smu_context *smu)
+{
+       if (smu_v11_0_ack_ac_dc_interrupt(smu))
+               dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
+}
+
 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
 {
-       if (smu->smu_table.thermal_controller_type)
-               return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+       int ret = 0;
 
-       return 0;
+       if (smu->smu_table.thermal_controller_type) {
+               ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * After init there might have been missed interrupts triggered
+        * before driver registers for interrupt (Ex. AC/DC).
+        */
+       return smu_v11_0_process_pending_interrupt(smu);
 }
 
 int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
@@ -1177,12 +1200,10 @@ int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
                                     uint32_t pstate)
 {
-       int ret = 0;
-       ret = smu_cmn_send_smc_msg_with_param(smu,
-                                         SMU_MSG_SetXgmiMode,
-                                         pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_SetXgmiMode,
+                                              pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
                                          NULL);
-       return ret;
 }
 
 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
@@ -1250,13 +1271,6 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
        return 0;
 }
 
-static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
-{
-       return smu_cmn_send_smc_msg(smu,
-                               SMU_MSG_ReenableAcDcInterrupt,
-                               NULL);
-}
-
 #define THM_11_0__SRCID__THM_DIG_THERM_L2H             0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
 #define THM_11_0__SRCID__THM_DIG_THERM_H2L             1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
 
@@ -1312,11 +1326,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                        switch (ctxid) {
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
-                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               schedule_work(&smu->interrupt_work);
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
-                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               schedule_work(&smu->interrupt_work);
                                break;
                        case 0x7:
                                /*
@@ -1413,11 +1427,7 @@ int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
 
 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
 {
-       int ret = 0;
-
-       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
-
-       return ret;
+       return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
 }
 
 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
@@ -1428,13 +1438,8 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
 bool smu_v11_0_baco_is_support(struct smu_context *smu)
 {
        struct smu_baco_context *smu_baco = &smu->smu_baco;
-       bool baco_support;
 
-       mutex_lock(&smu_baco->mutex);
-       baco_support = smu_baco->platform_support;
-       mutex_unlock(&smu_baco->mutex);
-
-       if (!baco_support)
+       if (!smu_baco->platform_support)
                return false;
 
        /* Arcturus does not support this bit mask */
@@ -1521,13 +1526,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu)
 
 int smu_v11_0_baco_exit(struct smu_context *smu)
 {
-       int ret = 0;
-
-       ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
-       if (ret)
-               return ret;
-
-       return ret;
+       return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
 }
 
 int smu_v11_0_mode1_reset(struct smu_context *smu)
index 3b9ac72..66c1026 100644 (file)
@@ -222,14 +222,16 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu,
                        *sclk_mask = 0;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
                if (mclk_mask)
-                       *mclk_mask = 0;
+                       /* mclk levels are in reverse order */
+                       *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
                if(sclk_mask)
                        /* The sclk as gfxclk and has three level about max/min/current */
                        *sclk_mask = 3 - 1;
 
                if(mclk_mask)
-                       *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+                       /* mclk levels are in reverse order */
+                       *mclk_mask = 0;
 
                if(soc_mask)
                        *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
@@ -323,7 +325,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
                case SMU_UCLK:
                case SMU_FCLK:
                case SMU_MCLK:
-                       ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min);
+                       ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);
                        if (ret)
                                goto failed;
                        break;
@@ -832,9 +834,59 @@ static int renoir_set_performance_level(struct smu_context *smu,
                ret = renoir_force_dpm_limit_value(smu, false);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
-       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
                ret = renoir_unforce_dpm_levels(smu);
                break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetHardMinGfxClk,
+                                                     RENOIR_UMD_PSTATE_GFXCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetHardMinFclkByFreq,
+                                                     RENOIR_UMD_PSTATE_FCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetHardMinSocclkByFreq,
+                                                     RENOIR_UMD_PSTATE_SOCCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetHardMinVcn,
+                                                     RENOIR_UMD_PSTATE_VCNCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetSoftMaxGfxClk,
+                                                     RENOIR_UMD_PSTATE_GFXCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetSoftMaxFclkByFreq,
+                                                     RENOIR_UMD_PSTATE_FCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetSoftMaxSocclkByFreq,
+                                                     RENOIR_UMD_PSTATE_SOCCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_SetSoftMaxVcn,
+                                                     RENOIR_UMD_PSTATE_VCNCLK,
+                                                     NULL);
+               if (ret)
+                       return ret;
+               break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
                ret = renoir_get_profiling_clk_mask(smu, level,
@@ -863,50 +915,48 @@ static int renoir_set_performance_level(struct smu_context *smu,
  */
 static int renoir_set_watermarks_table(
                struct smu_context *smu,
-               struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+               struct pp_smu_wm_range_sets *clock_ranges)
 {
        Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
        int i;
 
        if (clock_ranges) {
-               if (clock_ranges->num_wm_dmif_sets > 4 ||
-                               clock_ranges->num_wm_mcif_sets > 4)
+               if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+                   clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
                        return -EINVAL;
 
                /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
-               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+               for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
                        table->WatermarkRow[WM_DCFCLK][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+                               clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
                        table->WatermarkRow[WM_DCFCLK][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+                               clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
                        table->WatermarkRow[WM_DCFCLK][i].MinMclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+                               clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
                        table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
-                       table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+                               clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+                       table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+                               clock_ranges->reader_wm_sets[i].wm_inst;
+                       table->WatermarkRow[WM_DCFCLK][i].WmType =
+                               clock_ranges->reader_wm_sets[i].wm_type;
                }
 
-               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+               for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
                        table->WatermarkRow[WM_SOCCLK][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+                               clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
                        table->WatermarkRow[WM_SOCCLK][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+                               clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
                        table->WatermarkRow[WM_SOCCLK][i].MinMclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+                               clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
                        table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
-                               cpu_to_le16((uint16_t)
-                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
-                       table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
-                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+                               clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+                       table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+                               clock_ranges->writer_wm_sets[i].wm_inst;
+                       table->WatermarkRow[WM_SOCCLK][i].WmType =
+                               clock_ranges->writer_wm_sets[i].wm_type;
                }
 
                smu->watermarks_bitmap |= WATERMARKS_EXIST;
index 8c3f004..11c3c22 100644 (file)
@@ -29,5 +29,6 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
 #define RENOIR_UMD_PSTATE_GFXCLK       700
 #define RENOIR_UMD_PSTATE_SOCCLK       678
 #define RENOIR_UMD_PSTATE_FCLK         800
+#define RENOIR_UMD_PSTATE_VCNCLK       0x022D01D8
 
 #endif
index a58ea08..c30d333 100644 (file)
@@ -112,6 +112,9 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
        struct amdgpu_device *adev = smu->adev;
        int ret = 0, index = 0;
 
+       if (smu->adev->in_pci_err_recovery)
+               return 0;
+
        index = smu_cmn_to_asic_specific_index(smu,
                                               CMN2ASIC_MAPPING_MSG,
                                               msg);
@@ -343,9 +346,9 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
        return ret;
 }
 
-static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
-                                              uint64_t feature_mask,
-                                              bool enabled)
+int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+                                       uint64_t feature_mask,
+                                       bool enabled)
 {
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
index 6d00ad7..ab577be 100644 (file)
@@ -52,6 +52,10 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
                             uint32_t *feature_mask,
                             uint32_t num);
 
+int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+                                       uint64_t feature_mask,
+                                       bool enabled);
+
 int smu_cmn_feature_set_enabled(struct smu_context *smu,
                                enum smu_feature_mask mask,
                                bool enable);
index 38c1017..c5adbe4 100644 (file)
@@ -42,6 +42,7 @@
 #define smu_check_fw_version(smu)                                      smu_ppt_funcs(check_fw_version, 0, smu)
 #define smu_write_pptable(smu)                                         smu_ppt_funcs(write_pptable, 0, smu)
 #define smu_set_min_dcef_deep_sleep(smu, clk)                          smu_ppt_funcs(set_min_dcef_deep_sleep, 0, smu, clk)
+#define smu_set_active_display_count(smu, count)                       smu_ppt_funcs(set_active_display_count, 0, smu, count)
 #define smu_set_driver_table_location(smu)                             smu_ppt_funcs(set_driver_table_location, 0, smu)
 #define smu_set_tool_table_location(smu)                               smu_ppt_funcs(set_tool_table_location, 0, smu)
 #define smu_notify_memory_pool_location(smu)                           smu_ppt_funcs(notify_memory_pool_location, 0, smu)
@@ -83,7 +84,6 @@
 #define smu_asic_set_performance_level(smu, level)                     smu_ppt_funcs(set_performance_level, -EINVAL, smu, level)
 #define smu_dump_pptable(smu)                                          smu_ppt_funcs(dump_pptable, 0, smu)
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap)  smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
-#define smu_disable_umc_cdr_12gbps_workaround(smu)                     smu_ppt_funcs(disable_umc_cdr_12gbps_workaround, 0, smu)
 #define smu_set_power_source(smu, power_src)                           smu_ppt_funcs(set_power_source, 0, smu, power_src)
 #define smu_i2c_init(smu, control)                                     smu_ppt_funcs(i2c_init, 0, smu, control)
 #define smu_i2c_fini(smu, control)                                     smu_ppt_funcs(i2c_fini, 0, smu, control)
@@ -95,6 +95,7 @@
 #define smu_gfx_ulv_control(smu, enablement)                           smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
 #define smu_deep_sleep_control(smu, enablement)                                smu_ppt_funcs(deep_sleep_control, 0, smu, enablement)
 #define smu_get_fan_parameters(smu)                                    smu_ppt_funcs(get_fan_parameters, 0, smu)
+#define smu_post_init(smu)                                             smu_ppt_funcs(post_init, 0, smu)
 
 #endif
 #endif
index ecf8a55..6654bcc 100644 (file)
@@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
        struct scatterlist *sg;
        struct sg_table *sgt;
-       int i, num;
+       int i;
 
        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt)
@@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 
                mapping = dobj->obj.filp->f_mapping;
 
-               for_each_sg(sgt->sgl, sg, count, i) {
+               for_each_sgtable_sg(sgt, sg, i) {
                        struct page *page;
 
                        page = shmem_read_mapping_page(mapping, i);
-                       if (IS_ERR(page)) {
-                               num = i;
+                       if (IS_ERR(page))
                                goto release;
-                       }
 
                        sg_set_page(sg, page, PAGE_SIZE, 0);
                }
 
-               if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
-                       num = sgt->nents;
+               if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto release;
-               }
        } else if (dobj->page) {
                /* Single contiguous page */
                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
@@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 
                sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
 
-               if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+               if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto free_table;
        } else if (dobj->linear) {
                /* Single contiguous physical region - no struct page */
@@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
        return sgt;
 
  release:
-       for_each_sg(sgt->sgl, sg, num, i)
-               put_page(sg_page(sg));
+       for_each_sgtable_sg(sgt, sg, i)
+               if (sg_page(sg))
+                       put_page(sg_page(sg));
  free_table:
        sg_free_table(sgt);
  free_sgt:
@@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
        int i;
 
        if (!dobj->linear)
-               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+               dma_unmap_sgtable(attach->dev, sgt, dir, 0);
 
        if (dobj->obj.filp) {
                struct scatterlist *sg;
-               for_each_sg(sgt->sgl, sg, sgt->nents, i)
+
+               for_each_sgtable_sg(sgt, sg, i)
                        put_page(sg_page(sg));
        }
 
index 3e11af4..ef91646 100644 (file)
@@ -241,6 +241,8 @@ source "drivers/gpu/drm/bridge/analogix/Kconfig"
 
 source "drivers/gpu/drm/bridge/adv7511/Kconfig"
 
+source "drivers/gpu/drm/bridge/cadence/Kconfig"
+
 source "drivers/gpu/drm/bridge/synopsys/Kconfig"
 
 endmenu
index c589a6a..2b3aff1 100644 (file)
@@ -25,4 +25,5 @@ obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
 obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
 
 obj-y += analogix/
+obj-y += cadence/
 obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
new file mode 100644 (file)
index 0000000..511d67b
--- /dev/null
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CDNS_MHDP8546
+       tristate "Cadence DPI/DP bridge"
+       select DRM_KMS_HELPER
+       select DRM_PANEL_BRIDGE
+       depends on OF
+       help
+         Support Cadence DPI to DP bridge. This is an internal
+         bridge and is meant to be directly embedded in a SoC.
+         It takes a DPI stream as input and outputs it encoded
+         in DP format.
+
+if DRM_CDNS_MHDP8546
+
+config DRM_CDNS_MHDP8546_J721E
+       depends on ARCH_K3_J721E_SOC || COMPILE_TEST
+       bool "J721E Cadence DPI/DP wrapper support"
+       default y
+       help
+         Support J721E Cadence DPI/DP wrapper. This is a wrapper
+         which adds support for J721E related platform ops. It
+         initializes the J721E Display Port and sets up the
+         clock and data muxes.
+endif
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
new file mode 100644 (file)
index 0000000..8f64799
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
+cdns-mhdp8546-y := cdns-mhdp8546-core.o
+cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
new file mode 100644 (file)
index 0000000..d0c6561
--- /dev/null
@@ -0,0 +1,2532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
+ *          Swapnil Jakhade <sjakhade@cadence.com>
+ *          Yuti Amonkar <yamonkar@cadence.com>
+ *          Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *          Jyri Sarha <jsarha@ti.com>
+ *
+ * TODO:
+ *     - Implement optimized mailbox communication using mailbox interrupts
+ *     - Add support for power management
+ *     - Add support for features like audio, MST and fast link training
+ *     - Implement request_fw_cancel to handle HW_STATE
+ *     - Fix asynchronous loading of firmware implementation
+ *     - Add DRM helper function for cdns_mhdp_lower_link_rate
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <asm/unaligned.h>
+
+#include "cdns-mhdp8546-core.h"
+
+#include "cdns-mhdp8546-j721e.h"
+
+static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
+{
+       int ret, empty;
+
+       WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+       ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
+                                empty, !empty, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
+}
+
+static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
+{
+       int ret, full;
+
+       WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+       ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
+                                full, !full, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
+                                        u8 module_id, u8 opcode,
+                                        u16 req_size)
+{
+       u32 mbox_size, i;
+       u8 header[4];
+       int ret;
+
+       /* read the header of the message */
+       for (i = 0; i < sizeof(header); i++) {
+               ret = cdns_mhdp_mailbox_read(mhdp);
+               if (ret < 0)
+                       return ret;
+
+               header[i] = ret;
+       }
+
+       mbox_size = get_unaligned_be16(header + 2);
+
+       if (opcode != header[0] || module_id != header[1] ||
+           req_size != mbox_size) {
+               /*
+                * If the message in mailbox is not what we want, we need to
+                * clear the mailbox by reading its contents.
+                */
+               for (i = 0; i < mbox_size; i++)
+                       if (cdns_mhdp_mailbox_read(mhdp) < 0)
+                               break;
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
+                                      u8 *buff, u16 buff_size)
+{
+       u32 i;
+       int ret;
+
+       for (i = 0; i < buff_size; i++) {
+               ret = cdns_mhdp_mailbox_read(mhdp);
+               if (ret < 0)
+                       return ret;
+
+               buff[i] = ret;
+       }
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
+                                 u8 opcode, u16 size, u8 *message)
+{
+       u8 header[4];
+       int ret, i;
+
+       header[0] = opcode;
+       header[1] = module_id;
+       put_unaligned_be16(size, header + 2);
+
+       for (i = 0; i < sizeof(header); i++) {
+               ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < size; i++) {
+               ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static
+int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
+{
+       u8 msg[4], resp[8];
+       int ret;
+
+       put_unaligned_be32(addr, msg);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
+                                    GENERAL_REGISTER_READ,
+                                    sizeof(msg), msg);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
+                                           GENERAL_REGISTER_READ,
+                                           sizeof(resp));
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
+       if (ret)
+               goto out;
+
+       /* Returned address value should be the same as requested */
+       if (memcmp(msg, resp, sizeof(msg))) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       *value = get_unaligned_be32(resp + 4);
+
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+       if (ret) {
+               dev_err(mhdp->dev, "Failed to read register\n");
+               *value = 0;
+       }
+
+       return ret;
+}
+
+static
+int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
+{
+       u8 msg[6];
+       int ret;
+
+       put_unaligned_be16(addr, msg);
+       put_unaligned_be32(val, msg + 2);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_REGISTER, sizeof(msg), msg);
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
+                           u8 start_bit, u8 bits_no, u32 val)
+{
+       u8 field[8];
+       int ret;
+
+       put_unaligned_be16(addr, field);
+       field[2] = start_bit;
+       field[3] = bits_no;
+       put_unaligned_be32(val, field + 4);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_FIELD, sizeof(field), field);
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
+                       u32 addr, u8 *data, u16 len)
+{
+       u8 msg[5], reg[5];
+       int ret;
+
+       put_unaligned_be16(len, msg);
+       put_unaligned_be24(addr, msg + 2);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_READ_DPCD, sizeof(msg), msg);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                           DPTX_READ_DPCD,
+                                           sizeof(reg) + len);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
+
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
+{
+       u8 msg[6], reg[5];
+       int ret;
+
+       put_unaligned_be16(1, msg);
+       put_unaligned_be24(addr, msg + 2);
+       msg[5] = value;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_DPCD, sizeof(msg), msg);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                           DPTX_WRITE_DPCD, sizeof(reg));
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+       if (ret)
+               goto out;
+
+       if (addr != get_unaligned_be24(reg + 2))
+               ret = -EINVAL;
+
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
+       return ret;
+}
+
+static
+int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
+{
+       u8 msg[5];
+       int ret, i;
+
+       msg[0] = GENERAL_MAIN_CONTROL;
+       msg[1] = MB_MODULE_ID_GENERAL;
+       msg[2] = 0;
+       msg[3] = 1;
+       msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       for (i = 0; i < sizeof(msg); i++) {
+               ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
+               if (ret)
+                       goto out;
+       }
+
+       /* read the firmware state */
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
+       if (ret)
+               goto out;
+
+       ret = 0;
+
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret < 0)
+               dev_err(mhdp->dev, "set firmware active failed\n");
+       return ret;
+}
+
+static
+int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
+{
+       u8 status;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_HPD_STATE, 0, NULL);
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                           DPTX_HPD_STATE,
+                                           sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
+               status ? "" : "un");
+
+       return status;
+
+err_get_hpd:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_get_edid_block(void *data, u8 *edid,
+                            unsigned int block, size_t length)
+{
+       struct cdns_mhdp_device *mhdp = data;
+       u8 msg[2], reg[2], i;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       for (i = 0; i < 4; i++) {
+               msg[0] = block / 2;
+               msg[1] = block % 2;
+
+               ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                            DPTX_GET_EDID, sizeof(msg), msg);
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                                   DPTX_GET_EDID,
+                                                   sizeof(reg) + length);
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
+               if (ret)
+                       continue;
+
+               if (reg[0] == length && reg[1] == block / 2)
+                       break;
+       }
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
+                       block, ret);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
+{
+       u8 event = 0;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_READ_EVENT, 0, NULL);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                           DPTX_READ_EVENT, sizeof(event));
+       if (ret < 0)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
+               (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
+               (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
+               (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
+               (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
+
+       return event;
+}
+
+static
+int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
+                       unsigned int udelay, const u8 *lanes_data,
+                       u8 link_status[DP_LINK_STATUS_SIZE])
+{
+       u8 payload[7];
+       u8 hdr[5]; /* For DPCD read response header */
+       u32 addr;
+       int ret;
+
+       if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
+               dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       payload[0] = nlanes;
+       put_unaligned_be16(udelay, payload + 1);
+       memcpy(payload + 3, lanes_data, nlanes);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_ADJUST_LT,
+                                    sizeof(payload), payload);
+       if (ret)
+               goto out;
+
+       /* Yes, read the DPCD read command response */
+       ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+                                           DPTX_READ_DPCD,
+                                           sizeof(hdr) + DP_LINK_STATUS_SIZE);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
+       if (ret)
+               goto out;
+
+       addr = get_unaligned_be24(hdr + 2);
+       if (addr != DP_LANE0_1_STATUS)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
+                                         DP_LINK_STATUS_SIZE);
+
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
+
+       return ret;
+}
+
+/**
+ * cdns_mhdp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
+{
+       u8 value;
+       int err;
+
+       /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D0;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       /*
+        * According to the DP 1.1 specification, a "Sink Device must exit the
+        * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+        * Control Field" (register 0x600).
+        */
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+
+/**
+ * cdns_mhdp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
+                             struct cdns_mhdp_link *link)
+{
+       u8 value;
+       int err;
+
+       /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D3;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+/**
+ * cdns_mhdp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
+                            struct cdns_mhdp_link *link)
+{
+       u8 values[2];
+       int err;
+
+       values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+       values[1] = link->num_lanes;
+
+       if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+               values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+       err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
+{
+       return min(mhdp->host.link_rate, mhdp->sink.link_rate);
+}
+
+static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
+{
+       return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
+}
+
+static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
+{
+       return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
+}
+
+static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
+{
+       /* Check if SSC is supported by both sides */
+       return mhdp->host.ssc && mhdp->sink.ssc;
+}
+
+static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
+{
+       dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
+
+       if (mhdp->plugged)
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
+{
+       u32 major_num, minor_num, revision;
+       u32 fw_ver, lib_ver;
+
+       fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
+              | readl(mhdp->regs + CDNS_VER_L);
+
+       lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
+               | readl(mhdp->regs + CDNS_LIB_L_ADDR);
+
+       if (lib_ver < 33984) {
+               /*
+                * Older FW versions with major number 1, used to store FW
+                * version information by storing repository revision number
+                * in registers. This is for identifying these FW versions.
+                */
+               major_num = 1;
+               minor_num = 2;
+               if (fw_ver == 26098) {
+                       revision = 15;
+               } else if (lib_ver == 0 && fw_ver == 0) {
+                       revision = 17;
+               } else {
+                       dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
+                               fw_ver, lib_ver);
+                       return -ENODEV;
+               }
+       } else {
+               /* To identify newer FW versions with major number 2 onwards. */
+               major_num = fw_ver / 10000;
+               minor_num = (fw_ver / 100) % 100;
+               revision = (fw_ver % 10000) % 100;
+       }
+
+       dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
+               revision);
+       return 0;
+}
+
+static int cdns_mhdp_fw_activate(const struct firmware *fw,
+                                struct cdns_mhdp_device *mhdp)
+{
+       unsigned int reg;
+       int ret;
+
+       /* Release uCPU reset and stall it. */
+       writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
+
+       memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
+
+       /* Leave debug mode, release stall */
+       writel(0, mhdp->regs + CDNS_APB_CTRL);
+
+       /*
+        * Wait for the KEEP_ALIVE "message" on the first 8 bits.
+        * Updated each sched "tick" (~2ms)
+        */
+       ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
+                                reg & CDNS_KEEP_ALIVE_MASK, 500,
+                                CDNS_KEEP_ALIVE_TIMEOUT);
+       if (ret) {
+               dev_err(mhdp->dev,
+                       "device didn't give any life sign: reg %d\n", reg);
+               return ret;
+       }
+
+       ret = cdns_mhdp_check_fw_version(mhdp);
+       if (ret)
+               return ret;
+
+       /* Init events to 0 as it's not cleared by FW at boot but on read */
+       readl(mhdp->regs + CDNS_SW_EVENT0);
+       readl(mhdp->regs + CDNS_SW_EVENT1);
+       readl(mhdp->regs + CDNS_SW_EVENT2);
+       readl(mhdp->regs + CDNS_SW_EVENT3);
+
+       /* Activate uCPU */
+       ret = cdns_mhdp_set_firmware_active(mhdp, true);
+       if (ret)
+               return ret;
+
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->hw_state = MHDP_HW_READY;
+
+       /*
+        * Here we must keep the lock while enabling the interrupts
+        * since it would otherwise be possible that interrupt enable
+        * code is executed after the bridge is detached. The similar
+        * situation is not possible in attach()/detach() callbacks
+        * since the hw_state changes from MHDP_HW_READY to
+        * MHDP_HW_STOPPED happens only due to driver removal when
+        * bridge should already be detached.
+        */
+       if (mhdp->bridge_attached)
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+                      mhdp->regs + CDNS_APB_INT_MASK);
+
+       spin_unlock(&mhdp->start_lock);
+
+       wake_up(&mhdp->fw_load_wq);
+       dev_dbg(mhdp->dev, "DP FW activated\n");
+
+       return 0;
+}
+
+static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
+{
+       struct cdns_mhdp_device *mhdp = context;
+       bool bridge_attached;
+       int ret;
+
+       dev_dbg(mhdp->dev, "firmware callback\n");
+
+       if (!fw || !fw->data) {
+               dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
+               return;
+       }
+
+       ret = cdns_mhdp_fw_activate(fw, mhdp);
+
+       release_firmware(fw);
+
+       if (ret)
+               return;
+
+       /*
+        *  XXX how to make sure the bridge is still attached when
+        *      calling drm_kms_helper_hotplug_event() after releasing
+        *      the lock? We should not hold the spin lock when
+        *      calling drm_kms_helper_hotplug_event() since it may
+        *      cause a dead lock. FB-dev console calls detect from the
+        *      same thread just down the call stack started here.
+        */
+       spin_lock(&mhdp->start_lock);
+       bridge_attached = mhdp->bridge_attached;
+       spin_unlock(&mhdp->start_lock);
+       if (bridge_attached) {
+               if (mhdp->connector.dev)
+                       drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+               else
+                       drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
+       }
+}
+
+static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
+{
+       int ret;
+
+       ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
+                                     GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
+       if (ret) {
+               dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
+                       FW_NAME, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
+                                 struct drm_dp_aux_msg *msg)
+{
+       struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
+       int ret;
+
+       if (msg->request != DP_AUX_NATIVE_WRITE &&
+           msg->request != DP_AUX_NATIVE_READ)
+               return -EOPNOTSUPP;
+
+       if (msg->request == DP_AUX_NATIVE_WRITE) {
+               const u8 *buf = msg->buffer;
+               unsigned int i;
+
+               for (i = 0; i < msg->size; ++i) {
+                       ret = cdns_mhdp_dpcd_write(mhdp,
+                                                  msg->address + i, buf[i]);
+                       if (!ret)
+                               continue;
+
+                       dev_err(mhdp->dev,
+                               "Failed to write DPCD addr %u\n",
+                               msg->address + i);
+
+                       return ret;
+               }
+       } else {
+               ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
+                                         msg->buffer, msg->size);
+               if (ret) {
+                       dev_err(mhdp->dev,
+                               "Failed to read DPCD addr %u\n",
+                               msg->address);
+
+                       return ret;
+               }
+       }
+
+       return msg->size;
+}
+
+static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
+{
+       union phy_configure_opts phy_cfg;
+       u32 reg32;
+       int ret;
+
+       drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+                          DP_TRAINING_PATTERN_DISABLE);
+
+       /* Reset PHY configuration */
+       reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+       if (!mhdp->host.scrambler)
+               reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
+                           mhdp->sink.enhanced & mhdp->host.enhanced);
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
+                           CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
+
+       cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
+       phy_cfg.dp.link_rate = mhdp->link.rate / 100;
+       phy_cfg.dp.lanes = mhdp->link.num_lanes;
+
+       memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
+       memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
+
+       phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+       phy_cfg.dp.set_lanes = true;
+       phy_cfg.dp.set_rate = true;
+       phy_cfg.dp.set_voltages = true;
+       ret = phy_configure(mhdp->phy,  &phy_cfg);
+       if (ret) {
+               dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
+                           CDNS_PHY_COMMON_CONFIG |
+                           CDNS_PHY_TRAINING_EN |
+                           CDNS_PHY_TRAINING_TYPE(1) |
+                           CDNS_PHY_SCRAMBLER_BYPASS);
+
+       drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+                          DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
+
+       return 0;
+}
+
+static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
+                                      u8 link_status[DP_LINK_STATUS_SIZE],
+                                      u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
+                                      union phy_configure_opts *phy_cfg)
+{
+       u8 adjust, max_pre_emph, max_volt_swing;
+       u8 set_volt, set_pre;
+       unsigned int i;
+
+       max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
+                          << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+       max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+
+       for (i = 0; i < mhdp->link.num_lanes; i++) {
+               /* Check if Voltage swing and pre-emphasis are within limits */
+               adjust = drm_dp_get_adjust_request_voltage(link_status, i);
+               set_volt = min(adjust, max_volt_swing);
+
+               adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+               set_pre = min(adjust, max_pre_emph)
+                         >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+               /*
+                * Voltage swing level and pre-emphasis level combination is
+                * not allowed: leaving pre-emphasis as-is, and adjusting
+                * voltage swing.
+                */
+               if (set_volt + set_pre > 3)
+                       set_volt = 3 - set_pre;
+
+               phy_cfg->dp.voltage[i] = set_volt;
+               lanes_data[i] = set_volt;
+
+               if (set_volt == max_volt_swing)
+                       lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
+
+               phy_cfg->dp.pre[i] = set_pre;
+               lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+               if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
+                       lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       }
+}
+
+static
+void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+                                         unsigned int lane, u8 volt)
+{
+       unsigned int s = ((lane & 1) ?
+                         DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+                         DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+       unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
+
+       link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
+       link_status[idx] |= volt << s;
+}
+
+static
+void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+                                              unsigned int lane, u8 pre_emphasis)
+{
+       unsigned int s = ((lane & 1) ?
+                         DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+                         DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+       unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
+
+       link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
+       link_status[idx] |= pre_emphasis << s;
+}
+
+static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
+                                         u8 link_status[DP_LINK_STATUS_SIZE])
+{
+       u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+       u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+       unsigned int i;
+       u8 volt, pre;
+
+       for (i = 0; i < mhdp->link.num_lanes; i++) {
+               volt = drm_dp_get_adjust_request_voltage(link_status, i);
+               pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+               if (volt + pre > 3)
+                       cdns_mhdp_set_adjust_request_voltage(link_status, i,
+                                                            3 - pre);
+               if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
+                       cdns_mhdp_set_adjust_request_voltage(link_status, i,
+                                                            max_volt);
+               if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
+                       cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
+                                                                 i, max_pre);
+       }
+}
+
+static void cdns_mhdp_print_lt_status(const char *prefix,
+                                     struct cdns_mhdp_device *mhdp,
+                                     union phy_configure_opts *phy_cfg)
+{
+       char vs[8] = "0/0/0/0";
+       char pe[8] = "0/0/0/0";
+       unsigned int i;
+
+       for (i = 0; i < mhdp->link.num_lanes; i++) {
+               vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
+               pe[i * 2] = '0' + phy_cfg->dp.pre[i];
+       }
+
+       vs[i * 2 - 1] = '\0';
+       pe[i * 2 - 1] = '\0';
+
+       dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
+               prefix,
+               mhdp->link.num_lanes, mhdp->link.rate / 100,
+               vs, pe);
+}
+
+static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
+                                              u8 eq_tps,
+                                              unsigned int training_interval)
+{
+       u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+       union phy_configure_opts phy_cfg;
+       u32 reg32;
+       int ret;
+       bool r;
+
+       dev_dbg(mhdp->dev, "Starting EQ phase\n");
+
+       /* Enable link training TPS[eq_tps] in PHY */
+       reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
+               CDNS_PHY_TRAINING_TYPE(eq_tps);
+       if (eq_tps != 4)
+               reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+       drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+                          (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
+                          CDNS_DP_TRAINING_PATTERN_4);
+
+       drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
+
+       do {
+               cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
+                                          &phy_cfg);
+               phy_cfg.dp.lanes = mhdp->link.num_lanes;
+               phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+               phy_cfg.dp.set_lanes = false;
+               phy_cfg.dp.set_rate = false;
+               phy_cfg.dp.set_voltages = true;
+               ret = phy_configure(mhdp->phy,  &phy_cfg);
+               if (ret) {
+                       dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                               __func__, ret);
+                       goto err;
+               }
+
+               cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
+                                   training_interval, lanes_data, link_status);
+
+               r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
+               if (!r)
+                       goto err;
+
+               if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
+                       cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
+                                                 &phy_cfg);
+                       return true;
+               }
+
+               fail_counter_short++;
+
+               cdns_mhdp_adjust_requested_eq(mhdp, link_status);
+       } while (fail_counter_short < 5);
+
+err:
+       cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
+
+       return false;
+}
+
+static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
+                                         u8 link_status[DP_LINK_STATUS_SIZE],
+                                         u8 *req_volt, u8 *req_pre)
+{
+       const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+       const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+       unsigned int i;
+
+       for (i = 0; i < mhdp->link.num_lanes; i++) {
+               u8 val;
+
+               val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
+                     max_volt : req_volt[i];
+               cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
+
+               val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
+                     max_pre : req_pre[i];
+               cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
+       }
+}
+
+static
+void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
+                          bool *same_before_adjust, bool *max_swing_reached,
+                          u8 before_cr[CDNS_DP_MAX_NUM_LANES],
+                          u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
+                          u8 *req_pre)
+{
+       const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+       const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+       bool same_pre, same_volt;
+       unsigned int i;
+       u8 adjust;
+
+       *same_before_adjust = false;
+       *max_swing_reached = false;
+       *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
+
+       for (i = 0; i < mhdp->link.num_lanes; i++) {
+               adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
+               req_volt[i] = min(adjust, max_volt);
+
+               adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
+                     DP_TRAIN_PRE_EMPHASIS_SHIFT;
+               req_pre[i] = min(adjust, max_pre);
+
+               same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
+                          req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+               same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+                           req_volt[i];
+               if (same_pre && same_volt)
+                       *same_before_adjust = true;
+
+               /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
+               if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
+                       *max_swing_reached = true;
+                       return;
+               }
+       }
+}
+
+static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
+{
+       u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
+       fail_counter_short = 0, fail_counter_cr_long = 0;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+       bool cr_done;
+       union phy_configure_opts phy_cfg;
+       int ret;
+
+       dev_dbg(mhdp->dev, "Starting CR phase\n");
+
+       ret = cdns_mhdp_link_training_init(mhdp);
+       if (ret)
+               goto err;
+
+       drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
+
+       do {
+               u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
+               u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
+               bool same_before_adjust, max_swing_reached;
+
+               cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
+                                          &phy_cfg);
+               phy_cfg.dp.lanes = mhdp->link.num_lanes;
+               phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+               phy_cfg.dp.set_lanes = false;
+               phy_cfg.dp.set_rate = false;
+               phy_cfg.dp.set_voltages = true;
+               ret = phy_configure(mhdp->phy,  &phy_cfg);
+               if (ret) {
+                       dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                               __func__, ret);
+                       goto err;
+               }
+
+               cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
+                                   lanes_data, link_status);
+
+               cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
+                                     &max_swing_reached, lanes_data,
+                                     link_status,
+                                     requested_adjust_volt_swing,
+                                     requested_adjust_pre_emphasis);
+
+               if (max_swing_reached) {
+                       dev_err(mhdp->dev, "CR: max swing reached\n");
+                       goto err;
+               }
+
+               if (cr_done) {
+                       cdns_mhdp_print_lt_status("CR phase ok", mhdp,
+                                                 &phy_cfg);
+                       return true;
+               }
+
+               /* Not all CR_DONE bits set */
+               fail_counter_cr_long++;
+
+               if (same_before_adjust) {
+                       fail_counter_short++;
+                       continue;
+               }
+
+               fail_counter_short = 0;
+               /*
+                * Voltage swing/pre-emphasis adjust requested
+                * during CR phase
+                */
+               cdns_mhdp_adjust_requested_cr(mhdp, link_status,
+                                             requested_adjust_volt_swing,
+                                             requested_adjust_pre_emphasis);
+       } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
+
+err:
+       cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
+
+       return false;
+}
+
+static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
+{
+       switch (drm_dp_link_rate_to_bw_code(link->rate)) {
+       case DP_LINK_BW_2_7:
+               link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
+               break;
+       case DP_LINK_BW_5_4:
+               link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
+               break;
+       case DP_LINK_BW_8_1:
+               link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+               break;
+       }
+}
+
+static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
+                                  unsigned int training_interval)
+{
+       u32 reg32;
+       const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
+       int ret;
+
+       while (1) {
+               if (!cdns_mhdp_link_training_cr(mhdp)) {
+                       if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
+                           DP_LINK_BW_1_62) {
+                               dev_dbg(mhdp->dev,
+                                       "Reducing link rate during CR phase\n");
+                               cdns_mhdp_lower_link_rate(&mhdp->link);
+
+                               continue;
+                       } else if (mhdp->link.num_lanes > 1) {
+                               dev_dbg(mhdp->dev,
+                                       "Reducing lanes number during CR phase\n");
+                               mhdp->link.num_lanes >>= 1;
+                               mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
+
+                               continue;
+                       }
+
+                       dev_err(mhdp->dev,
+                               "Link training failed during CR phase\n");
+                       goto err;
+               }
+
+               if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
+                                                      training_interval))
+                       break;
+
+               if (mhdp->link.num_lanes > 1) {
+                       dev_dbg(mhdp->dev,
+                               "Reducing lanes number during EQ phase\n");
+                       mhdp->link.num_lanes >>= 1;
+
+                       continue;
+               } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
+                          DP_LINK_BW_1_62) {
+                       dev_dbg(mhdp->dev,
+                               "Reducing link rate during EQ phase\n");
+                       cdns_mhdp_lower_link_rate(&mhdp->link);
+                       mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
+
+                       continue;
+               }
+
+               dev_err(mhdp->dev, "Link training failed during EQ phase\n");
+               goto err;
+       }
+
+       dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
+               mhdp->link.num_lanes, mhdp->link.rate / 100);
+
+       drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+                          mhdp->host.scrambler ? 0 :
+                          DP_LINK_SCRAMBLING_DISABLE);
+
+       ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
+       if (ret < 0) {
+               dev_err(mhdp->dev,
+                       "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+                       ret);
+               return ret;
+       }
+       reg32 &= ~GENMASK(1, 0);
+       reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
+       reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
+       reg32 |= CDNS_DP_FRAMER_EN;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
+
+       /* Reset PHY config */
+       reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+       if (!mhdp->host.scrambler)
+               reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+       return 0;
+err:
+       /* Reset PHY config */
+       reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+       if (!mhdp->host.scrambler)
+               reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+       drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+                          DP_TRAINING_PATTERN_DISABLE);
+
+       return -EIO;
+}
+
+static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
+                                             u32 interval)
+{
+       if (interval == 0)
+               return 400;
+       if (interval < 5)
+               return 4000 << (interval - 1);
+       dev_err(mhdp->dev,
+               "wrong training interval returned by DPCD: %d\n", interval);
+       return 0;
+}
+
+static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
+{
+       unsigned int link_rate;
+
+       /* Get source capabilities based on PHY attributes */
+
+       mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
+       if (!mhdp->host.lanes_cnt)
+               mhdp->host.lanes_cnt = 4;
+
+       link_rate = mhdp->phy->attrs.max_link_rate;
+       if (!link_rate)
+               link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
+       else
+               /* PHY uses Mb/s, DRM uses tens of kb/s. */
+               link_rate *= 100;
+
+       mhdp->host.link_rate = link_rate;
+       mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
+       mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
+       mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
+                                 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
+                                 CDNS_SUPPORT_TPS(4);
+       mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
+       mhdp->host.fast_link = false;
+       mhdp->host.enhanced = true;
+       mhdp->host.scrambler = true;
+       mhdp->host.ssc = false;
+}
+
+static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
+                                    u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       mhdp->sink.link_rate = mhdp->link.rate;
+       mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
+       mhdp->sink.enhanced = !!(mhdp->link.capabilities &
+                                DP_LINK_CAP_ENHANCED_FRAMING);
+
+       /* Set SSC support */
+       mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
+                                 DP_MAX_DOWNSPREAD_0_5);
+
+       /* Set TPS support */
+       mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
+       if (drm_dp_tps3_supported(dpcd))
+               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
+       if (drm_dp_tps4_supported(dpcd))
+               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
+
+       /* Set fast link support */
+       mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
+                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
+{
+       u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
+       u32 resp, interval, interval_us;
+       u8 ext_cap_chk = 0;
+       unsigned int addr;
+       int err;
+
+       WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
+
+       drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
+                         &ext_cap_chk);
+
+       if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
+               addr = DP_DP13_DPCD_REV;
+       else
+               addr = DP_DPCD_REV;
+
+       err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
+       if (err < 0) {
+               dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
+               return err;
+       }
+
+       mhdp->link.revision = dpcd[0];
+       mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
+       mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+
+       if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
+               mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+       dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
+       cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+
+       cdns_mhdp_fill_sink_caps(mhdp, dpcd);
+
+       mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
+       mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
+
+       /* Disable framer for link training */
+       err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
+       if (err < 0) {
+               dev_err(mhdp->dev,
+                       "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+                       err);
+               return err;
+       }
+
+       resp &= ~CDNS_DP_FRAMER_EN;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
+
+       /* Spread AMP if required, enable 8b/10b coding */
+       amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
+       amp[1] = DP_SET_ANSI_8B10B;
+       drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
+
+       if (mhdp->host.fast_link & mhdp->sink.fast_link) {
+               dev_err(mhdp->dev, "fastlink not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
+       interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
+       if (!interval_us ||
+           cdns_mhdp_link_training(mhdp, interval_us)) {
+               dev_err(mhdp->dev, "Link training failed. Exiting.\n");
+               return -EIO;
+       }
+
+       mhdp->link_up = true;
+
+       return 0;
+}
+
+static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
+{
+       WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
+
+       if (mhdp->plugged)
+               cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+
+       mhdp->link_up = false;
+}
+
+static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
+                                      struct drm_connector *connector)
+{
+       if (!mhdp->plugged)
+               return NULL;
+
+       return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
+}
+
+static int cdns_mhdp_get_modes(struct drm_connector *connector)
+{
+       struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
+       struct edid *edid;
+       int num_modes;
+
+       if (!mhdp->plugged)
+               return 0;
+
+       edid = cdns_mhdp_get_edid(mhdp, connector);
+       if (!edid) {
+               dev_err(mhdp->dev, "Failed to read EDID\n");
+               return 0;
+       }
+
+       drm_connector_update_edid_property(connector, edid);
+       num_modes = drm_add_edid_modes(connector, edid);
+       kfree(edid);
+
+       /*
+        * HACK: Warn about unsupported display formats until we deal
+        *       with them correctly.
+        */
+       if (connector->display_info.color_formats &&
+           !(connector->display_info.color_formats &
+             mhdp->display_fmt.color_format))
+               dev_warn(mhdp->dev,
+                        "%s: No supported color_format found (0x%08x)\n",
+                       __func__, connector->display_info.color_formats);
+
+       if (connector->display_info.bpc &&
+           connector->display_info.bpc < mhdp->display_fmt.bpc)
+               dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
+                        __func__, connector->display_info.bpc,
+                        mhdp->display_fmt.bpc);
+
+       return num_modes;
+}
+
+static int cdns_mhdp_connector_detect(struct drm_connector *conn,
+                                     struct drm_modeset_acquire_ctx *ctx,
+                                     bool force)
+{
+       struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+       return cdns_mhdp_detect(mhdp);
+}
+
+static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
+{
+       u32 bpp;
+
+       if (fmt->y_only)
+               return fmt->bpc;
+
+       switch (fmt->color_format) {
+       case DRM_COLOR_FORMAT_RGB444:
+       case DRM_COLOR_FORMAT_YCRCB444:
+               bpp = fmt->bpc * 3;
+               break;
+       case DRM_COLOR_FORMAT_YCRCB422:
+               bpp = fmt->bpc * 2;
+               break;
+       case DRM_COLOR_FORMAT_YCRCB420:
+               bpp = fmt->bpc * 3 / 2;
+               break;
+       default:
+               bpp = fmt->bpc * 3;
+               WARN_ON(1);
+       }
+       return bpp;
+}
+
+static
+bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
+                           const struct drm_display_mode *mode,
+                           unsigned int lanes, unsigned int rate)
+{
+       u32 max_bw, req_bw, bpp;
+
+       /*
+        * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
+        * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
+        * value thus equals the bandwidth in 10kb/s units, which matches the
+        * units of the rate parameter.
+        */
+
+       bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+       req_bw = mode->clock * bpp / 8;
+       max_bw = lanes * rate;
+       if (req_bw > max_bw) {
+               dev_dbg(mhdp->dev,
+                       "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
+                       mode->name, req_bw, max_bw);
+
+               return false;
+       }
+
+       return true;
+}
+
+static
+enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
+                                         struct drm_display_mode *mode)
+{
+       struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+       mutex_lock(&mhdp->link_mutex);
+
+       if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+                                   mhdp->link.rate)) {
+               mutex_unlock(&mhdp->link_mutex);
+               return MODE_CLOCK_HIGH;
+       }
+
+       mutex_unlock(&mhdp->link_mutex);
+       return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
+       .detect_ctx = cdns_mhdp_connector_detect,
+       .get_modes = cdns_mhdp_get_modes,
+       .mode_valid = cdns_mhdp_mode_valid,
+};
+
+static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .reset = drm_atomic_helper_connector_reset,
+       .destroy = drm_connector_cleanup,
+};
+
+static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
+{
+       u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
+       struct drm_connector *conn = &mhdp->connector;
+       struct drm_bridge *bridge = &mhdp->bridge;
+       int ret;
+
+       if (!bridge->encoder) {
+               dev_err(mhdp->dev, "Parent encoder object not found");
+               return -ENODEV;
+       }
+
+       conn->polled = DRM_CONNECTOR_POLL_HPD;
+
+       ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
+                                DRM_MODE_CONNECTOR_DisplayPort);
+       if (ret) {
+               dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
+               return ret;
+       }
+
+       drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
+
+       ret = drm_display_info_set_bus_formats(&conn->display_info,
+                                              &bus_format, 1);
+       if (ret)
+               return ret;
+
+       ret = drm_connector_attach_encoder(conn, bridge->encoder);
+       if (ret) {
+               dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int cdns_mhdp_attach(struct drm_bridge *bridge,
+                           enum drm_bridge_attach_flags flags)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+       bool hw_ready;
+       int ret;
+
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+               ret = cdns_mhdp_connector_init(mhdp);
+               if (ret)
+                       return ret;
+       }
+
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->bridge_attached = true;
+       hw_ready = mhdp->hw_state == MHDP_HW_READY;
+
+       spin_unlock(&mhdp->start_lock);
+
+       /* Enable SW event interrupts */
+       if (hw_ready)
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+                      mhdp->regs + CDNS_APB_INT_MASK);
+
+       return 0;
+}
+
+static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
+                                     const struct drm_display_mode *mode)
+{
+       unsigned int dp_framer_sp = 0, msa_horizontal_1,
+               msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
+               misc0 = 0, misc1 = 0, pxl_repr,
+               front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
+               dp_vertical_1;
+       u8 stream_id = mhdp->stream_id;
+       u32 bpp, bpc, pxlfmt, framer;
+       int ret;
+
+       pxlfmt = mhdp->display_fmt.color_format;
+       bpc = mhdp->display_fmt.bpc;
+
+       /*
+        * If YCBCR supported and stream not SD, use ITU709
+        * Need to handle ITU version with YCBCR420 when supported
+        */
+       if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
+            pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
+               misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
+
+       bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+
+       switch (pxlfmt) {
+       case DRM_COLOR_FORMAT_RGB444:
+               pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
+               misc0 |= DP_COLOR_FORMAT_RGB;
+               break;
+       case DRM_COLOR_FORMAT_YCRCB444:
+               pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
+               misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
+               break;
+       case DRM_COLOR_FORMAT_YCRCB422:
+               pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
+               misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
+               break;
+       case DRM_COLOR_FORMAT_YCRCB420:
+               pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
+               break;
+       default:
+               pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
+       }
+
+       switch (bpc) {
+       case 6:
+               misc0 |= DP_TEST_BIT_DEPTH_6;
+               pxl_repr |= CDNS_DP_FRAMER_6_BPC;
+               break;
+       case 8:
+               misc0 |= DP_TEST_BIT_DEPTH_8;
+               pxl_repr |= CDNS_DP_FRAMER_8_BPC;
+               break;
+       case 10:
+               misc0 |= DP_TEST_BIT_DEPTH_10;
+               pxl_repr |= CDNS_DP_FRAMER_10_BPC;
+               break;
+       case 12:
+               misc0 |= DP_TEST_BIT_DEPTH_12;
+               pxl_repr |= CDNS_DP_FRAMER_12_BPC;
+               break;
+       case 16:
+               misc0 |= DP_TEST_BIT_DEPTH_16;
+               pxl_repr |= CDNS_DP_FRAMER_16_BPC;
+               break;
+       }
+
+       bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
+
+       cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
+                           bnd_hsync2vsync);
+
+       hsync2vsync_pol_ctrl = 0;
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
+       cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
+                           hsync2vsync_pol_ctrl);
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
+
+       front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
+       back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
+                           CDNS_DP_FRONT_PORCH(front_porch) |
+                           CDNS_DP_BACK_PORCH(back_porch));
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
+                           mode->crtc_hdisplay * bpp / 8);
+
+       msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
+                           CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
+                           CDNS_DP_MSAH0_HSYNC_START(msa_h0));
+
+       hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+       msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
+                          CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
+                           msa_horizontal_1);
+
+       msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
+                           CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
+                           CDNS_DP_MSAV0_VSYNC_START(msa_v0));
+
+       vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+       msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
+                        CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
+                           msa_vertical_1);
+
+       if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+           mode->crtc_vtotal % 2 == 0)
+               misc1 = DP_TEST_INTERLACED;
+       if (mhdp->display_fmt.y_only)
+               misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
+       /* Use VSC SDP for Y420 */
+       if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
+               misc1 = CDNS_DP_TEST_VSC_SDP;
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
+                           misc0 | (misc1 << 8));
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
+                           CDNS_DP_H_HSYNC_WIDTH(hsync) |
+                           CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
+                           CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
+                           CDNS_DP_V0_VSTART(msa_v0));
+
+       dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
+       if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+           mode->crtc_vtotal % 2 == 0)
+               dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
+
+       cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
+                               (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+                               CDNS_DP_VB_ID_INTERLACED : 0);
+
+       ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
+       if (ret < 0) {
+               dev_err(mhdp->dev,
+                       "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+                       ret);
+               return;
+       }
+       framer |= CDNS_DP_FRAMER_EN;
+       framer &= ~CDNS_DP_NO_VIDEO_MODE;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
+}
+
+static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
+                                const struct drm_display_mode *mode)
+{
+       u32 rate, vs, required_bandwidth, available_bandwidth;
+       s32 line_thresh1, line_thresh2, line_thresh = 0;
+       int pxlclock = mode->crtc_clock;
+       u32 tu_size = 64;
+       u32 bpp;
+
+       /* Get rate in MSymbols per second per lane */
+       rate = mhdp->link.rate / 1000;
+
+       bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+
+       required_bandwidth = pxlclock * bpp / 8;
+       available_bandwidth = mhdp->link.num_lanes * rate;
+
+       vs = tu_size * required_bandwidth / available_bandwidth;
+       vs /= 1000;
+
+       if (vs == tu_size)
+               vs = tu_size - 1;
+
+       line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
+       line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
+       line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
+       line_thresh = (line_thresh >> 5) + 2;
+
+       mhdp->stream_id = 0;
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
+                           CDNS_DP_FRAMER_TU_VS(vs) |
+                           CDNS_DP_FRAMER_TU_SIZE(tu_size) |
+                           CDNS_DP_FRAMER_TU_CNT_RST_EN);
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
+                           line_thresh & GENMASK(5, 0));
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
+                           CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
+                                                  0 : tu_size - vs));
+
+       cdns_mhdp_configure_video(mhdp, mode);
+}
+
+static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+                                   struct drm_bridge_state *bridge_state)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+       struct drm_atomic_state *state = bridge_state->base.state;
+       struct cdns_mhdp_bridge_state *mhdp_state;
+       struct drm_crtc_state *crtc_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
+       struct drm_bridge_state *new_state;
+       const struct drm_display_mode *mode;
+       u32 resp;
+       int ret;
+
+       dev_dbg(mhdp->dev, "bridge enable\n");
+
+       mutex_lock(&mhdp->link_mutex);
+
+       if (mhdp->plugged && !mhdp->link_up) {
+               ret = cdns_mhdp_link_up(mhdp);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
+               mhdp->info->ops->enable(mhdp);
+
+       /* Enable VIF clock for stream 0 */
+       ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
+       if (ret < 0) {
+               dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
+               goto out;
+       }
+
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
+                           resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
+
+       connector = drm_atomic_get_new_connector_for_encoder(state,
+                                                            bridge->encoder);
+       if (WARN_ON(!connector))
+               goto out;
+
+       conn_state = drm_atomic_get_new_connector_state(state, connector);
+       if (WARN_ON(!conn_state))
+               goto out;
+
+       crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+       if (WARN_ON(!crtc_state))
+               goto out;
+
+       mode = &crtc_state->adjusted_mode;
+
+       new_state = drm_atomic_get_new_bridge_state(state, bridge);
+       if (WARN_ON(!new_state))
+               goto out;
+
+       if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+                                   mhdp->link.rate)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cdns_mhdp_sst_enable(mhdp, mode);
+
+       mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+
+       mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+       drm_mode_set_name(mhdp_state->current_mode);
+
+       dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
+
+       mhdp->bridge_enabled = true;
+
+out:
+       mutex_unlock(&mhdp->link_mutex);
+       if (ret < 0)
+               schedule_work(&mhdp->modeset_retry_work);
+}
+
+static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
+                                    struct drm_bridge_state *bridge_state)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+       u32 resp;
+
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       mutex_lock(&mhdp->link_mutex);
+
+       mhdp->bridge_enabled = false;
+       cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
+       resp &= ~CDNS_DP_FRAMER_EN;
+       resp |= CDNS_DP_NO_VIDEO_MODE;
+       cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
+
+       cdns_mhdp_link_down(mhdp);
+
+       /* Disable VIF clock for stream 0 */
+       cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
+       cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
+                           resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
+
+       if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
+               mhdp->info->ops->disable(mhdp);
+
+       mutex_unlock(&mhdp->link_mutex);
+}
+
+static void cdns_mhdp_detach(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->bridge_attached = false;
+
+       spin_unlock(&mhdp->start_lock);
+
+       writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static struct drm_bridge_state *
+cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_bridge_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+       return &state->base;
+}
+
+static void
+cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+                                     struct drm_bridge_state *state)
+{
+       struct cdns_mhdp_bridge_state *cdns_mhdp_state;
+
+       cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
+
+       if (cdns_mhdp_state->current_mode) {
+               drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
+               cdns_mhdp_state->current_mode = NULL;
+       }
+
+       kfree(cdns_mhdp_state);
+}
+
+static struct drm_bridge_state *
+cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_bridge_state *cdns_mhdp_state;
+
+       cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
+       if (!cdns_mhdp_state)
+               return NULL;
+
+        __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
+
+       return &cdns_mhdp_state->base;
+}
+
+static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
+                                 struct drm_bridge_state *bridge_state,
+                                 struct drm_crtc_state *crtc_state,
+                                 struct drm_connector_state *conn_state)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+       const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+
+       mutex_lock(&mhdp->link_mutex);
+
+       if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+                                   mhdp->link.rate)) {
+               dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
+                       __func__, mode->name, mhdp->link.num_lanes,
+                       mhdp->link.rate / 100);
+               mutex_unlock(&mhdp->link_mutex);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&mhdp->link_mutex);
+       return 0;
+}
+
+static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       return cdns_mhdp_detect(mhdp);
+}
+
+static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
+                                             struct drm_connector *connector)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       return cdns_mhdp_get_edid(mhdp, connector);
+}
+
+static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       /* Enable SW event interrupts */
+       if (mhdp->bridge_attached)
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+                      mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
+       .atomic_enable = cdns_mhdp_atomic_enable,
+       .atomic_disable = cdns_mhdp_atomic_disable,
+       .atomic_check = cdns_mhdp_atomic_check,
+       .attach = cdns_mhdp_attach,
+       .detach = cdns_mhdp_detach,
+       .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
+       .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
+       .atomic_reset = cdns_mhdp_bridge_atomic_reset,
+       .detect = cdns_mhdp_bridge_detect,
+       .get_edid = cdns_mhdp_bridge_get_edid,
+       .hpd_enable = cdns_mhdp_bridge_hpd_enable,
+       .hpd_disable = cdns_mhdp_bridge_hpd_disable,
+};
+
+static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
+{
+       int hpd_event, hpd_status;
+
+       *hpd_pulse = false;
+
+       hpd_event = cdns_mhdp_read_hpd_event(mhdp);
+
+       /* Getting event bits failed, bail out */
+       if (hpd_event < 0) {
+               dev_warn(mhdp->dev, "%s: read event failed: %d\n",
+                        __func__, hpd_event);
+               return false;
+       }
+
+       hpd_status = cdns_mhdp_get_hpd_status(mhdp);
+       if (hpd_status < 0) {
+               dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
+                        __func__, hpd_status);
+               return false;
+       }
+
+       if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
+               *hpd_pulse = true;
+
+       return !!hpd_status;
+}
+
+static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
+{
+       struct cdns_mhdp_bridge_state *cdns_bridge_state;
+       struct drm_display_mode *current_mode;
+       bool old_plugged = mhdp->plugged;
+       struct drm_bridge_state *state;
+       u8 status[DP_LINK_STATUS_SIZE];
+       bool hpd_pulse;
+       int ret = 0;
+
+       mutex_lock(&mhdp->link_mutex);
+
+       mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
+
+       if (!mhdp->plugged) {
+               cdns_mhdp_link_down(mhdp);
+               mhdp->link.rate = mhdp->host.link_rate;
+               mhdp->link.num_lanes = mhdp->host.lanes_cnt;
+               goto out;
+       }
+
+       /*
+        * If we get a HPD pulse event and we were and still are connected,
+        * check the link status. If link status is ok, there's nothing to do
+        * as we don't handle DP interrupts. If link status is bad, continue
+        * with full link setup.
+        */
+       if (hpd_pulse && old_plugged == mhdp->plugged) {
+               ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
+
+               /*
+                * If everything looks fine, just return, as we don't handle
+                * DP IRQs.
+                */
+               if (ret > 0 &&
+                   drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
+                   drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
+                       goto out;
+
+               /* If link is bad, mark link as down so that we do a new LT */
+               mhdp->link_up = false;
+       }
+
+       if (!mhdp->link_up) {
+               ret = cdns_mhdp_link_up(mhdp);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if (mhdp->bridge_enabled) {
+               state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
+               if (!state) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
+               if (!cdns_bridge_state) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               current_mode = cdns_bridge_state->current_mode;
+               if (!current_mode) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
+                                           mhdp->link.rate)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
+                       current_mode->name);
+
+               cdns_mhdp_sst_enable(mhdp, current_mode);
+       }
+out:
+       mutex_unlock(&mhdp->link_mutex);
+       return ret;
+}
+
+static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
+{
+       struct cdns_mhdp_device *mhdp;
+       struct drm_connector *conn;
+
+       mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
+
+       conn = &mhdp->connector;
+
+       /* Grab the locks before changing connector property */
+       mutex_lock(&conn->dev->mode_config.mutex);
+
+       /*
+        * Set connector link status to BAD and send a Uevent to notify
+        * userspace to do a modeset.
+        */
+       drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
+       mutex_unlock(&conn->dev->mode_config.mutex);
+
+       /* Send Hotplug uevent so userspace can reprobe */
+       drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+}
+
+static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
+{
+       struct cdns_mhdp_device *mhdp = data;
+       u32 apb_stat, sw_ev0;
+       bool bridge_attached;
+       int ret;
+
+       apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
+       if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
+               return IRQ_NONE;
+
+       sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
+
+       /*
+        *  Calling drm_kms_helper_hotplug_event() when not attached
+        *  to drm device causes an oops because the drm_bridge->dev
+        *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
+        *  problems related drm_kms_helper_hotplug_event() call.
+        */
+       spin_lock(&mhdp->start_lock);
+       bridge_attached = mhdp->bridge_attached;
+       spin_unlock(&mhdp->start_lock);
+
+       if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
+               ret = cdns_mhdp_update_link_status(mhdp);
+               if (mhdp->connector.dev) {
+                       if (ret < 0)
+                               schedule_work(&mhdp->modeset_retry_work);
+                       else
+                               drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+               } else {
+                       drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int cdns_mhdp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct cdns_mhdp_device *mhdp;
+       unsigned long rate;
+       struct clk *clk;
+       int ret;
+       int irq;
+
+       mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
+       if (!mhdp)
+               return -ENOMEM;
+
+       clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
+               return PTR_ERR(clk);
+       }
+
+       mhdp->clk = clk;
+       mhdp->dev = dev;
+       mutex_init(&mhdp->mbox_mutex);
+       mutex_init(&mhdp->link_mutex);
+       spin_lock_init(&mhdp->start_lock);
+
+       drm_dp_aux_init(&mhdp->aux);
+       mhdp->aux.dev = dev;
+       mhdp->aux.transfer = cdns_mhdp_transfer;
+
+       mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mhdp->regs)) {
+               dev_err(dev, "Failed to get memory resource\n");
+               return PTR_ERR(mhdp->regs);
+       }
+
+       mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
+       if (IS_ERR(mhdp->phy)) {
+               dev_err(dev, "no PHY configured\n");
+               return PTR_ERR(mhdp->phy);
+       }
+
+       platform_set_drvdata(pdev, mhdp);
+
+       mhdp->info = of_device_get_match_data(dev);
+
+       clk_prepare_enable(clk);
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               dev_err(dev, "pm_runtime_get_sync failed\n");
+               pm_runtime_disable(dev);
+               goto clk_disable;
+       }
+
+       if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
+               ret = mhdp->info->ops->init(mhdp);
+               if (ret != 0) {
+                       dev_err(dev, "MHDP platform initialization failed: %d\n",
+                               ret);
+                       goto runtime_put;
+               }
+       }
+
+       rate = clk_get_rate(clk);
+       writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
+       writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
+
+       dev_dbg(dev, "func clk rate %lu Hz\n", rate);
+
+       writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
+
+       irq = platform_get_irq(pdev, 0);
+       ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
+                                       cdns_mhdp_irq_handler, IRQF_ONESHOT,
+                                       "mhdp8546", mhdp);
+       if (ret) {
+               dev_err(dev, "cannot install IRQ %d\n", irq);
+               ret = -EIO;
+               goto plat_fini;
+       }
+
+       cdns_mhdp_fill_host_caps(mhdp);
+
+       /* Initialize link rate and num of lanes to host values */
+       mhdp->link.rate = mhdp->host.link_rate;
+       mhdp->link.num_lanes = mhdp->host.lanes_cnt;
+
+       /* The only currently supported format */
+       mhdp->display_fmt.y_only = false;
+       mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
+       mhdp->display_fmt.bpc = 8;
+
+       mhdp->bridge.of_node = pdev->dev.of_node;
+       mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
+       mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+                          DRM_BRIDGE_OP_HPD;
+       mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+       if (mhdp->info)
+               mhdp->bridge.timings = mhdp->info->timings;
+
+       ret = phy_init(mhdp->phy);
+       if (ret) {
+               dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
+               goto plat_fini;
+       }
+
+       /* Initialize the work for modeset in case of link train failure */
+       INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
+
+       init_waitqueue_head(&mhdp->fw_load_wq);
+
+       ret = cdns_mhdp_load_firmware(mhdp);
+       if (ret)
+               goto phy_exit;
+
+       drm_bridge_add(&mhdp->bridge);
+
+       return 0;
+
+phy_exit:
+       phy_exit(mhdp->phy);
+plat_fini:
+       if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
+               mhdp->info->ops->exit(mhdp);
+runtime_put:
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
+clk_disable:
+       clk_disable_unprepare(mhdp->clk);
+
+       return ret;
+}
+
+static int cdns_mhdp_remove(struct platform_device *pdev)
+{
+       struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
+       unsigned long timeout = msecs_to_jiffies(100);
+       bool stop_fw = false;
+       int ret;
+
+       drm_bridge_remove(&mhdp->bridge);
+
+       ret = wait_event_timeout(mhdp->fw_load_wq,
+                                mhdp->hw_state == MHDP_HW_READY,
+                                timeout);
+       if (ret == 0)
+               dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+                       __func__);
+       else
+               stop_fw = true;
+
+       spin_lock(&mhdp->start_lock);
+       mhdp->hw_state = MHDP_HW_STOPPED;
+       spin_unlock(&mhdp->start_lock);
+
+       if (stop_fw)
+               ret = cdns_mhdp_set_firmware_active(mhdp, false);
+
+       phy_exit(mhdp->phy);
+
+       if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
+               mhdp->info->ops->exit(mhdp);
+
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       cancel_work_sync(&mhdp->modeset_retry_work);
+       flush_scheduled_work();
+
+       clk_disable_unprepare(mhdp->clk);
+
+       return ret;
+}
+
+static const struct of_device_id mhdp_ids[] = {
+       { .compatible = "cdns,mhdp8546", },
+#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
+       { .compatible = "ti,j721e-mhdp8546",
+         .data = &(const struct cdns_mhdp_platform_info) {
+                 .timings = &mhdp_ti_j721e_bridge_timings,
+                 .ops = &mhdp_ti_j721e_ops,
+         },
+       },
+#endif
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mhdp_ids);
+
+static struct platform_driver mhdp_driver = {
+       .driver = {
+               .name           = "cdns-mhdp8546",
+               .of_match_table = of_match_ptr(mhdp_ids),
+       },
+       .probe  = cdns_mhdp_probe,
+       .remove = cdns_mhdp_remove,
+};
+module_platform_driver(mhdp_driver);
+
+MODULE_FIRMWARE(FW_NAME);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
+MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cdns-mhdp8546");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
new file mode 100644 (file)
index 0000000..5897a85
--- /dev/null
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ * Author: Quentin Schulz <quentin.schulz@free-electrons.com>
+ *         Swapnil Jakhade <sjakhade@cadence.com>
+ */
+
+#ifndef CDNS_MHDP8546_CORE_H
+#define CDNS_MHDP8546_CORE_H
+
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_dp_helper.h>
+
+struct clk;
+struct device;
+struct phy;
+
+/* Register offsets */
+#define CDNS_APB_CTRL                          0x00000
+#define CDNS_CPU_STALL                         BIT(3)
+
+#define CDNS_MAILBOX_FULL                      0x00008
+#define CDNS_MAILBOX_EMPTY                     0x0000c
+#define CDNS_MAILBOX_TX_DATA                   0x00010
+#define CDNS_MAILBOX_RX_DATA                   0x00014
+#define CDNS_KEEP_ALIVE                                0x00018
+#define CDNS_KEEP_ALIVE_MASK                   GENMASK(7, 0)
+
+#define CDNS_VER_L                             0x0001C
+#define CDNS_VER_H                             0x00020
+#define CDNS_LIB_L_ADDR                                0x00024
+#define CDNS_LIB_H_ADDR                                0x00028
+
+#define CDNS_MB_INT_MASK                       0x00034
+#define CDNS_MB_INT_STATUS                     0x00038
+
+#define CDNS_SW_CLK_L                          0x0003c
+#define CDNS_SW_CLK_H                          0x00040
+
+#define CDNS_SW_EVENT0                         0x00044
+#define CDNS_DPTX_HPD                          BIT(0)
+
+#define CDNS_SW_EVENT1                         0x00048
+#define CDNS_SW_EVENT2                         0x0004c
+#define CDNS_SW_EVENT3                         0x00050
+
+#define CDNS_APB_INT_MASK                      0x0006C
+#define CDNS_APB_INT_MASK_MAILBOX_INT          BIT(0)
+#define CDNS_APB_INT_MASK_SW_EVENT_INT         BIT(1)
+
+#define CDNS_APB_INT_STATUS                    0x00070
+
+#define CDNS_DPTX_CAR                          0x00904
+#define CDNS_VIF_CLK_EN                                BIT(0)
+#define CDNS_VIF_CLK_RSTN                      BIT(1)
+
+#define CDNS_SOURCE_VIDEO_IF(s)                        (0x00b00 + ((s) * 0x20))
+#define CDNS_BND_HSYNC2VSYNC(s)                        (CDNS_SOURCE_VIDEO_IF(s) + \
+                                                0x00)
+#define CDNS_IP_DTCT_WIN                       GENMASK(11, 0)
+#define CDNS_IP_DET_INTERLACE_FORMAT           BIT(12)
+#define CDNS_IP_BYPASS_V_INTERFACE             BIT(13)
+
+#define CDNS_HSYNC2VSYNC_POL_CTRL(s)           (CDNS_SOURCE_VIDEO_IF(s) + \
+                                                0x10)
+#define CDNS_H2V_HSYNC_POL_ACTIVE_LOW          BIT(1)
+#define CDNS_H2V_VSYNC_POL_ACTIVE_LOW          BIT(2)
+
+#define CDNS_DPTX_PHY_CONFIG                   0x02000
+#define CDNS_PHY_TRAINING_EN                   BIT(0)
+#define CDNS_PHY_TRAINING_TYPE(x)              (((x) & GENMASK(3, 0)) << 1)
+#define CDNS_PHY_SCRAMBLER_BYPASS              BIT(5)
+#define CDNS_PHY_ENCODER_BYPASS                        BIT(6)
+#define CDNS_PHY_SKEW_BYPASS                   BIT(7)
+#define CDNS_PHY_TRAINING_AUTO                 BIT(8)
+#define CDNS_PHY_LANE0_SKEW(x)                 (((x) & GENMASK(2, 0)) << 9)
+#define CDNS_PHY_LANE1_SKEW(x)                 (((x) & GENMASK(2, 0)) << 12)
+#define CDNS_PHY_LANE2_SKEW(x)                 (((x) & GENMASK(2, 0)) << 15)
+#define CDNS_PHY_LANE3_SKEW(x)                 (((x) & GENMASK(2, 0)) << 18)
+#define CDNS_PHY_COMMON_CONFIG                 (CDNS_PHY_LANE1_SKEW(1) | \
+                                               CDNS_PHY_LANE2_SKEW(2) |  \
+                                               CDNS_PHY_LANE3_SKEW(3))
+#define CDNS_PHY_10BIT_EN                      BIT(21)
+
+#define CDNS_DP_FRAMER_GLOBAL_CONFIG           0x02200
+#define CDNS_DP_NUM_LANES(x)                   ((x) - 1)
+#define CDNS_DP_MST_EN                         BIT(2)
+#define CDNS_DP_FRAMER_EN                      BIT(3)
+#define CDNS_DP_RATE_GOVERNOR_EN               BIT(4)
+#define CDNS_DP_NO_VIDEO_MODE                  BIT(5)
+#define CDNS_DP_DISABLE_PHY_RST                        BIT(6)
+#define CDNS_DP_WR_FAILING_EDGE_VSYNC          BIT(7)
+
+#define CDNS_DP_FRAMER_TU                      0x02208
+#define CDNS_DP_FRAMER_TU_SIZE(x)              (((x) & GENMASK(6, 0)) << 8)
+#define CDNS_DP_FRAMER_TU_VS(x)                        ((x) & GENMASK(5, 0))
+#define CDNS_DP_FRAMER_TU_CNT_RST_EN           BIT(15)
+
+#define CDNS_DP_MTPH_CONTROL                   0x02264
+#define CDNS_DP_MTPH_ECF_EN                    BIT(0)
+#define CDNS_DP_MTPH_ACT_EN                    BIT(1)
+#define CDNS_DP_MTPH_LVP_EN                    BIT(2)
+
+#define CDNS_DP_MTPH_STATUS                    0x0226C
+#define CDNS_DP_MTPH_ACT_STATUS                        BIT(0)
+
+#define CDNS_DP_LANE_EN                                0x02300
+#define CDNS_DP_LANE_EN_LANES(x)               GENMASK((x) - 1, 0)
+
+#define CDNS_DP_ENHNCD                         0x02304
+
+#define CDNS_DPTX_STREAM(s)                    (0x03000 + (s) * 0x80)
+#define CDNS_DP_MSA_HORIZONTAL_0(s)            (CDNS_DPTX_STREAM(s) + 0x00)
+#define CDNS_DP_MSAH0_H_TOTAL(x)               (x)
+#define CDNS_DP_MSAH0_HSYNC_START(x)           ((x) << 16)
+
+#define CDNS_DP_MSA_HORIZONTAL_1(s)            (CDNS_DPTX_STREAM(s) + 0x04)
+#define CDNS_DP_MSAH1_HSYNC_WIDTH(x)           (x)
+#define CDNS_DP_MSAH1_HSYNC_POL_LOW            BIT(15)
+#define CDNS_DP_MSAH1_HDISP_WIDTH(x)           ((x) << 16)
+
+#define CDNS_DP_MSA_VERTICAL_0(s)              (CDNS_DPTX_STREAM(s) + 0x08)
+#define CDNS_DP_MSAV0_V_TOTAL(x)               (x)
+#define CDNS_DP_MSAV0_VSYNC_START(x)           ((x) << 16)
+
+#define CDNS_DP_MSA_VERTICAL_1(s)              (CDNS_DPTX_STREAM(s) + 0x0c)
+#define CDNS_DP_MSAV1_VSYNC_WIDTH(x)           (x)
+#define CDNS_DP_MSAV1_VSYNC_POL_LOW            BIT(15)
+#define CDNS_DP_MSAV1_VDISP_WIDTH(x)           ((x) << 16)
+
+#define CDNS_DP_MSA_MISC(s)                    (CDNS_DPTX_STREAM(s) + 0x10)
+#define CDNS_DP_STREAM_CONFIG(s)               (CDNS_DPTX_STREAM(s) + 0x14)
+#define CDNS_DP_STREAM_CONFIG_2(s)             (CDNS_DPTX_STREAM(s) + 0x2c)
+#define CDNS_DP_SC2_TU_VS_DIFF(x)              ((x) << 8)
+
+#define CDNS_DP_HORIZONTAL(s)                  (CDNS_DPTX_STREAM(s) + 0x30)
+#define CDNS_DP_H_HSYNC_WIDTH(x)               (x)
+#define CDNS_DP_H_H_TOTAL(x)                   ((x) << 16)
+
+#define CDNS_DP_VERTICAL_0(s)                  (CDNS_DPTX_STREAM(s) + 0x34)
+#define CDNS_DP_V0_VHEIGHT(x)                  (x)
+#define CDNS_DP_V0_VSTART(x)                   ((x) << 16)
+
+#define CDNS_DP_VERTICAL_1(s)                  (CDNS_DPTX_STREAM(s) + 0x38)
+#define CDNS_DP_V1_VTOTAL(x)                   (x)
+#define CDNS_DP_V1_VTOTAL_EVEN                 BIT(16)
+
+#define CDNS_DP_MST_SLOT_ALLOCATE(s)           (CDNS_DPTX_STREAM(s) + 0x44)
+#define CDNS_DP_S_ALLOC_START_SLOT(x)          (x)
+#define CDNS_DP_S_ALLOC_END_SLOT(x)            ((x) << 8)
+
+#define CDNS_DP_RATE_GOVERNING(s)              (CDNS_DPTX_STREAM(s) + 0x48)
+#define CDNS_DP_RG_TARG_AV_SLOTS_Y(x)          (x)
+#define CDNS_DP_RG_TARG_AV_SLOTS_X(x)          ((x) << 4)
+#define CDNS_DP_RG_ENABLE                      BIT(10)
+
+#define CDNS_DP_FRAMER_PXL_REPR(s)             (CDNS_DPTX_STREAM(s) + 0x4c)
+#define CDNS_DP_FRAMER_6_BPC                   BIT(0)
+#define CDNS_DP_FRAMER_8_BPC                   BIT(1)
+#define CDNS_DP_FRAMER_10_BPC                  BIT(2)
+#define CDNS_DP_FRAMER_12_BPC                  BIT(3)
+#define CDNS_DP_FRAMER_16_BPC                  BIT(4)
+#define CDNS_DP_FRAMER_PXL_FORMAT              0x8
+#define CDNS_DP_FRAMER_RGB                     BIT(0)
+#define CDNS_DP_FRAMER_YCBCR444                        BIT(1)
+#define CDNS_DP_FRAMER_YCBCR422                        BIT(2)
+#define CDNS_DP_FRAMER_YCBCR420                        BIT(3)
+#define CDNS_DP_FRAMER_Y_ONLY                  BIT(4)
+
+#define CDNS_DP_FRAMER_SP(s)                   (CDNS_DPTX_STREAM(s) + 0x50)
+#define CDNS_DP_FRAMER_VSYNC_POL_LOW           BIT(0)
+#define CDNS_DP_FRAMER_HSYNC_POL_LOW           BIT(1)
+#define CDNS_DP_FRAMER_INTERLACE               BIT(2)
+
+#define CDNS_DP_LINE_THRESH(s)                 (CDNS_DPTX_STREAM(s) + 0x64)
+#define CDNS_DP_ACTIVE_LINE_THRESH(x)          (x)
+
+#define CDNS_DP_VB_ID(s)                       (CDNS_DPTX_STREAM(s) + 0x68)
+#define CDNS_DP_VB_ID_INTERLACED               BIT(2)
+#define CDNS_DP_VB_ID_COMPRESSED               BIT(6)
+
+#define CDNS_DP_FRONT_BACK_PORCH(s)            (CDNS_DPTX_STREAM(s) + 0x78)
+#define CDNS_DP_BACK_PORCH(x)                  (x)
+#define CDNS_DP_FRONT_PORCH(x)                 ((x) << 16)
+
+#define CDNS_DP_BYTE_COUNT(s)                  (CDNS_DPTX_STREAM(s) + 0x7c)
+#define CDNS_DP_BYTE_COUNT_BYTES_IN_CHUNK_SHIFT        16
+
+/* mailbox */
+#define MAILBOX_RETRY_US                       1000
+#define MAILBOX_TIMEOUT_US                     2000000
+
+#define MB_OPCODE_ID                           0
+#define MB_MODULE_ID                           1
+#define MB_SIZE_MSB_ID                         2
+#define MB_SIZE_LSB_ID                         3
+#define MB_DATA_ID                             4
+
+#define MB_MODULE_ID_DP_TX                     0x01
+#define MB_MODULE_ID_HDCP_TX                   0x07
+#define MB_MODULE_ID_HDCP_RX                   0x08
+#define MB_MODULE_ID_HDCP_GENERAL              0x09
+#define MB_MODULE_ID_GENERAL                   0x0a
+
+/* firmware and opcodes */
+#define FW_NAME                                        "cadence/mhdp8546.bin"
+#define CDNS_MHDP_IMEM                         0x10000
+
+#define GENERAL_MAIN_CONTROL                   0x01
+#define GENERAL_TEST_ECHO                      0x02
+#define GENERAL_BUS_SETTINGS                   0x03
+#define GENERAL_TEST_ACCESS                    0x04
+#define GENERAL_REGISTER_READ                  0x07
+
+#define DPTX_SET_POWER_MNG                     0x00
+#define DPTX_GET_EDID                          0x02
+#define DPTX_READ_DPCD                         0x03
+#define DPTX_WRITE_DPCD                                0x04
+#define DPTX_ENABLE_EVENT                      0x05
+#define DPTX_WRITE_REGISTER                    0x06
+#define DPTX_READ_REGISTER                     0x07
+#define DPTX_WRITE_FIELD                       0x08
+#define DPTX_READ_EVENT                                0x0a
+#define DPTX_GET_LAST_AUX_STAUS                        0x0e
+#define DPTX_HPD_STATE                         0x11
+#define DPTX_ADJUST_LT                         0x12
+
+#define FW_STANDBY                             0
+#define FW_ACTIVE                              1
+
+/* HPD */
+#define DPTX_READ_EVENT_HPD_TO_HIGH             BIT(0)
+#define DPTX_READ_EVENT_HPD_TO_LOW              BIT(1)
+#define DPTX_READ_EVENT_HPD_PULSE               BIT(2)
+#define DPTX_READ_EVENT_HPD_STATE               BIT(3)
+
+/* general */
+#define CDNS_DP_TRAINING_PATTERN_4             0x7
+
+#define CDNS_KEEP_ALIVE_TIMEOUT                        2000
+
+#define CDNS_VOLT_SWING(x)                     ((x) & GENMASK(1, 0))
+#define CDNS_FORCE_VOLT_SWING                  BIT(2)
+
+#define CDNS_PRE_EMPHASIS(x)                   ((x) & GENMASK(1, 0))
+#define CDNS_FORCE_PRE_EMPHASIS                        BIT(2)
+
+#define CDNS_SUPPORT_TPS(x)                    BIT((x) - 1)
+
+#define CDNS_FAST_LINK_TRAINING                        BIT(0)
+
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x)     ((x) & GENMASK(1, 0))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x)     ((x) & GENMASK(3, 2))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x)     ((x) & GENMASK(5, 4))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x)     ((x) & GENMASK(7, 6))
+#define CDNS_LANE_MAPPING_NORMAL               0xe4
+#define CDNS_LANE_MAPPING_FLIPPED              0x1b
+
+#define CDNS_DP_MAX_NUM_LANES                  4
+#define CDNS_DP_TEST_VSC_SDP                   BIT(6) /* 1.3+ */
+#define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY   BIT(7)
+
+#define CDNS_MHDP_MAX_STREAMS                  4
+
+#define DP_LINK_CAP_ENHANCED_FRAMING           BIT(0)
+
+struct cdns_mhdp_link {
+       unsigned char revision;
+       unsigned int rate;
+       unsigned int num_lanes;
+       unsigned long capabilities;
+};
+
+struct cdns_mhdp_host {
+       unsigned int link_rate;
+       u8 lanes_cnt;
+       u8 volt_swing;
+       u8 pre_emphasis;
+       u8 pattern_supp;
+       u8 lane_mapping;
+       bool fast_link;
+       bool enhanced;
+       bool scrambler;
+       bool ssc;
+};
+
+struct cdns_mhdp_sink {
+       unsigned int link_rate;
+       u8 lanes_cnt;
+       u8 pattern_supp;
+       bool fast_link;
+       bool enhanced;
+       bool ssc;
+};
+
+struct cdns_mhdp_display_fmt {
+       u32 color_format;
+       u32 bpc;
+       bool y_only;
+};
+
+/*
+ * These enums present MHDP hw initialization state
+ * Legal state transitions are:
+ * MHDP_HW_READY <-> MHDP_HW_STOPPED
+ */
+enum mhdp_hw_state {
+       MHDP_HW_READY = 1,      /* HW ready, FW active */
+       MHDP_HW_STOPPED         /* Driver removal FW to be stopped */
+};
+
+struct cdns_mhdp_device;
+
+struct mhdp_platform_ops {
+       int (*init)(struct cdns_mhdp_device *mhdp);
+       void (*exit)(struct cdns_mhdp_device *mhdp);
+       void (*enable)(struct cdns_mhdp_device *mhdp);
+       void (*disable)(struct cdns_mhdp_device *mhdp);
+};
+
+struct cdns_mhdp_bridge_state {
+       struct drm_bridge_state base;
+       struct drm_display_mode *current_mode;
+};
+
+struct cdns_mhdp_platform_info {
+       const struct drm_bridge_timings *timings;
+       const struct mhdp_platform_ops *ops;
+};
+
+#define to_cdns_mhdp_bridge_state(s) \
+               container_of(s, struct cdns_mhdp_bridge_state, base)
+
+struct cdns_mhdp_device {
+       void __iomem *regs;
+       void __iomem *j721e_regs;
+
+       struct device *dev;
+       struct clk *clk;
+       struct phy *phy;
+
+       const struct cdns_mhdp_platform_info *info;
+
+       /* This is to protect mailbox communications with the firmware */
+       struct mutex mbox_mutex;
+
+       /*
+        * "link_mutex" protects the access to all the link parameters
+        * including the link training process. Link training will be
+        * invoked both from threaded interrupt handler and from atomic
+        * callbacks when link_up is not set. So this mutex protects
+        * flags such as link_up, bridge_enabled, link.num_lanes,
+        * link.rate etc.
+        */
+       struct mutex link_mutex;
+
+       struct drm_connector connector;
+       struct drm_bridge bridge;
+
+       struct cdns_mhdp_link link;
+       struct drm_dp_aux aux;
+
+       struct cdns_mhdp_host host;
+       struct cdns_mhdp_sink sink;
+       struct cdns_mhdp_display_fmt display_fmt;
+       u8 stream_id;
+
+       bool link_up;
+       bool plugged;
+
+       /*
+        * "start_lock" protects the access to bridge_attached and
+        * hw_state data members that control the delayed firmware
+        * loading and attaching the bridge. They are accessed from
+        * both the DRM core and cdns_mhdp_fw_cb(). In most cases just
+        * protecting the data members is enough, but the irq mask
+        * setting needs to be protected when enabling the FW.
+        */
+       spinlock_t start_lock;
+       bool bridge_attached;
+       bool bridge_enabled;
+       enum mhdp_hw_state hw_state;
+       wait_queue_head_t fw_load_wq;
+
+       /* Work struct to schedule a uevent on link train failure */
+       struct work_struct modeset_retry_work;
+};
+
+#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector)
+#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge)
+
+#endif
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
new file mode 100644 (file)
index 0000000..dfe1b59
--- /dev/null
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence MHDP8546 DP wrapper
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "cdns-mhdp8546-j721e.h"
+
+#define        REVISION                        0x00
+#define        DPTX_IPCFG                      0x04
+#define        ECC_MEM_CFG                     0x08
+#define        DPTX_DSC_CFG                    0x0c
+#define        DPTX_SRC_CFG                    0x10
+#define        DPTX_VIF_SECURE_MODE_CFG        0x14
+#define        DPTX_VIF_CONN_STATUS            0x18
+#define        PHY_CLK_STATUS                  0x1c
+
+#define DPTX_SRC_AIF_EN                        BIT(16)
+#define DPTX_SRC_VIF_3_IN30B           BIT(11)
+#define DPTX_SRC_VIF_2_IN30B           BIT(10)
+#define DPTX_SRC_VIF_1_IN30B           BIT(9)
+#define DPTX_SRC_VIF_0_IN30B           BIT(8)
+#define DPTX_SRC_VIF_3_SEL_DPI5                BIT(7)
+#define DPTX_SRC_VIF_3_SEL_DPI3                0
+#define DPTX_SRC_VIF_2_SEL_DPI4                BIT(6)
+#define DPTX_SRC_VIF_2_SEL_DPI2                0
+#define DPTX_SRC_VIF_1_SEL_DPI3                BIT(5)
+#define DPTX_SRC_VIF_1_SEL_DPI1                0
+#define DPTX_SRC_VIF_0_SEL_DPI2                BIT(4)
+#define DPTX_SRC_VIF_0_SEL_DPI0                0
+#define DPTX_SRC_VIF_3_EN              BIT(3)
+#define DPTX_SRC_VIF_2_EN              BIT(2)
+#define DPTX_SRC_VIF_1_EN              BIT(1)
+#define DPTX_SRC_VIF_0_EN              BIT(0)
+
+/* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */
+
+static int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp)
+{
+       struct platform_device *pdev = to_platform_device(mhdp->dev);
+
+       mhdp->j721e_regs = devm_platform_ioremap_resource(pdev, 1);
+       return PTR_ERR_OR_ZERO(mhdp->j721e_regs);
+}
+
+static void cdns_mhdp_j721e_enable(struct cdns_mhdp_device *mhdp)
+{
+       /*
+        * Enable VIF_0 and select DPI2 as its input. DSS0 DPI0 is connected
+        * to eDP DPI2. This is the only supported SST configuration on
+        * J721E.
+        */
+       writel(DPTX_SRC_VIF_0_EN | DPTX_SRC_VIF_0_SEL_DPI2,
+              mhdp->j721e_regs + DPTX_SRC_CFG);
+}
+
+static void cdns_mhdp_j721e_disable(struct cdns_mhdp_device *mhdp)
+{
+       /* Put everything to defaults  */
+       writel(0, mhdp->j721e_regs + DPTX_DSC_CFG);
+}
+
+const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
+       .init = cdns_mhdp_j721e_init,
+       .enable = cdns_mhdp_j721e_enable,
+       .disable = cdns_mhdp_j721e_disable,
+};
+
+const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = {
+       .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+                          DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
+                          DRM_BUS_FLAG_DE_HIGH,
+};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
new file mode 100644 (file)
index 0000000..97d20d1
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI j721e Cadence MHDP8546 DP wrapper
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef CDNS_MHDP8546_J721E_H
+#define CDNS_MHDP8546_J721E_H
+
+#include "cdns-mhdp8546-core.h"
+
+struct mhdp_platform_ops;
+
+extern const struct mhdp_platform_ops mhdp_ti_j721e_ops;
+extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings;
+
+#endif /* !CDNS_MHDP8546_J721E_H */
index f19d9f7..f52ccff 100644 (file)
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 
 #include <drm/drm_bridge.h>
 #include <drm/drm_panel.h>
 
 struct lvds_codec {
+       struct device *dev;
        struct drm_bridge bridge;
        struct drm_bridge *panel_bridge;
+       struct regulator *vcc;
        struct gpio_desc *powerdown_gpio;
        u32 connector_type;
 };
@@ -38,6 +41,14 @@ static int lvds_codec_attach(struct drm_bridge *bridge,
 static void lvds_codec_enable(struct drm_bridge *bridge)
 {
        struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
+       int ret;
+
+       ret = regulator_enable(lvds_codec->vcc);
+       if (ret) {
+               dev_err(lvds_codec->dev,
+                       "Failed to enable regulator \"vcc\": %d\n", ret);
+               return;
+       }
 
        if (lvds_codec->powerdown_gpio)
                gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -46,9 +57,15 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
 static void lvds_codec_disable(struct drm_bridge *bridge)
 {
        struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
+       int ret;
 
        if (lvds_codec->powerdown_gpio)
                gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
+
+       ret = regulator_disable(lvds_codec->vcc);
+       if (ret)
+               dev_err(lvds_codec->dev,
+                       "Failed to disable regulator \"vcc\": %d\n", ret);
 }
 
 static const struct drm_bridge_funcs funcs = {
@@ -63,12 +80,24 @@ static int lvds_codec_probe(struct platform_device *pdev)
        struct device_node *panel_node;
        struct drm_panel *panel;
        struct lvds_codec *lvds_codec;
+       int ret;
 
        lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
        if (!lvds_codec)
                return -ENOMEM;
 
+       lvds_codec->dev = &pdev->dev;
        lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
+
+       lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power");
+       if (IS_ERR(lvds_codec->vcc)) {
+               ret = PTR_ERR(lvds_codec->vcc);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(lvds_codec->dev,
+                               "Unable to get \"vcc\" supply: %d\n", ret);
+               return ret;
+       }
+
        lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
                                                             GPIOD_OUT_HIGH);
        if (IS_ERR(lvds_codec->powerdown_gpio))
index 03e01b0..0fe3c49 100644 (file)
@@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
                struct sg_page_iter sg_iter;
 
                mb(); /*CLFLUSH is ordered only by using memory barriers*/
-               for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+               for_each_sgtable_page(st, &sg_iter, 0)
                        drm_clflush_page(sg_page_iter_page(&sg_iter));
                mb(); /*Make sure that all cache line entry is flushed*/
 
index 1e7c638..90807a6 100644 (file)
@@ -363,6 +363,66 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 }
 EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
 
+static bool is_edid_digital_input_dp(const struct edid *edid)
+{
+       return edid && edid->revision >= 4 &&
+               edid->input & DRM_EDID_INPUT_DIGITAL &&
+               (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP;
+}
+
+/**
+ * drm_dp_downstream_is_type() - is the downstream facing port of certain type?
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Caveat: Only works with DPCD 1.1+ port caps.
+ *
+ * Returns: whether the downstream facing port matches the type.
+ */
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                              const u8 port_cap[4], u8 type)
+{
+       return drm_dp_is_branch(dpcd) &&
+               dpcd[DP_DPCD_REV] >= 0x11 &&
+               (port_cap[0] & DP_DS_PORT_TYPE_MASK) == type;
+}
+EXPORT_SYMBOL(drm_dp_downstream_is_type);
+
+/**
+ * drm_dp_downstream_is_tmds() - is the downstream facing port TMDS?
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @edid: EDID
+ *
+ * Returns: whether the downstream facing port is TMDS (HDMI/DVI).
+ */
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                              const u8 port_cap[4],
+                              const struct edid *edid)
+{
+       if (dpcd[DP_DPCD_REV] < 0x11) {
+               switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+               case DP_DWN_STRM_PORT_TYPE_TMDS:
+                       return true;
+               default:
+                       return false;
+               }
+       }
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               if (is_edid_digital_input_dp(edid))
+                       return false;
+               fallthrough;
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+               return true;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_is_tmds);
+
 /**
  * drm_dp_send_real_edid_checksum() - send back real edid checksum value
  * @aux: DisplayPort AUX channel
@@ -545,79 +605,191 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
        ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
        if (ret < 0)
                return ret;
+       if (ret != len)
+               return -EIO;
+
+       DRM_DEBUG_KMS("%s: DPCD DFP: %*ph\n",
+                     aux->name, len, downstream_ports);
 
-       return ret == len ? 0 : -EIO;
+       return 0;
 }
 EXPORT_SYMBOL(drm_dp_read_downstream_info);
 
 /**
- * drm_dp_downstream_max_clock() - extract branch device max
- *                                 pixel rate for legacy VGA
- *                                 converter or max TMDS clock
- *                                 rate for others
+ * drm_dp_downstream_max_dotclock() - extract downstream facing port max dot clock
  * @dpcd: DisplayPort configuration data
  * @port_cap: port capabilities
  *
- * See also:
- * drm_dp_read_downstream_info()
- * drm_dp_downstream_max_bpc()
- *
- * Returns: Max clock in kHz on success or 0 if max clock not defined
+ * Returns: Downstream facing port max dot clock in kHz on success,
+ * or 0 if max clock not defined
  */
-int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                               const u8 port_cap[4])
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                  const u8 port_cap[4])
 {
-       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
-       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-               DP_DETAILED_CAP_INFO_AVAILABLE;
+       if (!drm_dp_is_branch(dpcd))
+               return 0;
 
-       if (!detailed_cap_info)
+       if (dpcd[DP_DPCD_REV] < 0x11)
                return 0;
 
-       switch (type) {
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
        case DP_DS_PORT_TYPE_VGA:
-               return port_cap[1] * 8 * 1000;
-       case DP_DS_PORT_TYPE_DVI:
-       case DP_DS_PORT_TYPE_HDMI:
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return 0;
+               return port_cap[1] * 8000;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_dotclock);
+
+/**
+ * drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @edid: EDID
+ *
+ * Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success,
+ * or 0 if max TMDS clock not defined
+ */
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                    const u8 port_cap[4],
+                                    const struct edid *edid)
+{
+       if (!drm_dp_is_branch(dpcd))
+               return 0;
+
+       if (dpcd[DP_DPCD_REV] < 0x11) {
+               switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+               case DP_DWN_STRM_PORT_TYPE_TMDS:
+                       return 165000;
+               default:
+                       return 0;
+               }
+       }
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
        case DP_DS_PORT_TYPE_DP_DUALMODE:
+               if (is_edid_digital_input_dp(edid))
+                       return 0;
+               /*
+                * It's left up to the driver to check the
+                * DP dual mode adapter's max TMDS clock.
+                *
+                * Unfortunatley it looks like branch devices
+                * may not fordward that the DP dual mode i2c
+                * access so we just usually get i2c nak :(
+                */
+               fallthrough;
+       case DP_DS_PORT_TYPE_HDMI:
+                /*
+                 * We should perhaps assume 165 MHz when detailed cap
+                 * info is not available. But looks like many typical
+                 * branch devices fall into that category and so we'd
+                 * probably end up with users complaining that they can't
+                 * get high resolution modes with their favorite dongle.
+                 *
+                 * So let's limit to 300 MHz instead since DPCD 1.4
+                 * HDMI 2.0 DFPs are required to have the detailed cap
+                 * info. So it's more likely we're dealing with a HDMI 1.4
+                 * compatible* device here.
+                 */
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return 300000;
+               return port_cap[1] * 2500;
+       case DP_DS_PORT_TYPE_DVI:
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return 165000;
+               /* FIXME what to do about DVI dual link? */
                return port_cap[1] * 2500;
        default:
                return 0;
        }
 }
-EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock);
 
 /**
- * drm_dp_downstream_max_bpc() - extract branch device max
- *                               bits per component
+ * drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock
  * @dpcd: DisplayPort configuration data
  * @port_cap: port capabilities
+ * @edid: EDID
  *
- * See also:
- * drm_dp_read_downstream_info()
- * drm_dp_downstream_max_clock()
+ * Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success,
+ * or 0 if max TMDS clock not defined
+ */
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                    const u8 port_cap[4],
+                                    const struct edid *edid)
+{
+       if (!drm_dp_is_branch(dpcd))
+               return 0;
+
+       if (dpcd[DP_DPCD_REV] < 0x11) {
+               switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+               case DP_DWN_STRM_PORT_TYPE_TMDS:
+                       return 25000;
+               default:
+                       return 0;
+               }
+       }
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               if (is_edid_digital_input_dp(edid))
+                       return 0;
+               fallthrough;
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+               /*
+                * Unclear whether the protocol converter could
+                * utilize pixel replication. Assume it won't.
+                */
+               return 25000;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract downstream facing port max
+ *                               bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @edid: EDID
  *
  * Returns: Max bpc on success or 0 if max bpc not defined
  */
 int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                             const u8 port_cap[4])
+                             const u8 port_cap[4],
+                             const struct edid *edid)
 {
-       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
-       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-               DP_DETAILED_CAP_INFO_AVAILABLE;
-       int bpc;
-
-       if (!detailed_cap_info)
+       if (!drm_dp_is_branch(dpcd))
                return 0;
 
-       switch (type) {
-       case DP_DS_PORT_TYPE_VGA:
-       case DP_DS_PORT_TYPE_DVI:
-       case DP_DS_PORT_TYPE_HDMI:
+       if (dpcd[DP_DPCD_REV] < 0x11) {
+               switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+               case DP_DWN_STRM_PORT_TYPE_DP:
+                       return 0;
+               default:
+                       return 8;
+               }
+       }
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_DP:
+               return 0;
        case DP_DS_PORT_TYPE_DP_DUALMODE:
-               bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+               if (is_edid_digital_input_dp(edid))
+                       return 0;
+               fallthrough;
+       case DP_DS_PORT_TYPE_HDMI:
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_VGA:
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return 8;
 
-               switch (bpc) {
+               switch (port_cap[2] & DP_DS_MAX_BPC_MASK) {
                case DP_DS_8BPC:
                        return 8;
                case DP_DS_10BPC:
@@ -626,15 +798,131 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                        return 12;
                case DP_DS_16BPC:
                        return 16;
+               default:
+                       return 8;
                }
-               fallthrough;
+               break;
        default:
-               return 0;
+               return 8;
        }
 }
 EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
 
 /**
+ * drm_dp_downstream_420_passthrough() - determine downstream facing port
+ *                                       YCbCr 4:2:0 pass-through capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ *
+ * Returns: whether the downstream facing port can pass through YCbCr 4:2:0
+ */
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                      const u8 port_cap[4])
+{
+       if (!drm_dp_is_branch(dpcd))
+               return false;
+
+       if (dpcd[DP_DPCD_REV] < 0x13)
+               return false;
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_DP:
+               return true;
+       case DP_DS_PORT_TYPE_HDMI:
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return false;
+
+               return port_cap[3] & DP_DS_HDMI_YCBCR420_PASS_THROUGH;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_420_passthrough);
+
+/**
+ * drm_dp_downstream_444_to_420_conversion() - determine downstream facing port
+ *                                             YCbCr 4:4:4->4:2:0 conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ *
+ * Returns: whether the downstream facing port can convert YCbCr 4:4:4 to 4:2:0
+ */
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                            const u8 port_cap[4])
+{
+       if (!drm_dp_is_branch(dpcd))
+               return false;
+
+       if (dpcd[DP_DPCD_REV] < 0x13)
+               return false;
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_HDMI:
+               if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+                       return false;
+
+               return port_cap[3] & DP_DS_HDMI_YCBCR444_TO_420_CONV;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
+
+/**
+ * drm_dp_downstream_mode() - return a mode for downstream facing port
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Provides a suitable mode for downstream facing ports without EDID.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_dp_downstream_mode(struct drm_device *dev,
+                      const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                      const u8 port_cap[4])
+
+{
+       u8 vic;
+
+       if (!drm_dp_is_branch(dpcd))
+               return NULL;
+
+       if (dpcd[DP_DPCD_REV] < 0x11)
+               return NULL;
+
+       switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+       case DP_DS_PORT_TYPE_NON_EDID:
+               switch (port_cap[0] & DP_DS_NON_EDID_MASK) {
+               case DP_DS_NON_EDID_720x480i_60:
+                       vic = 6;
+                       break;
+               case DP_DS_NON_EDID_720x480i_50:
+                       vic = 21;
+                       break;
+               case DP_DS_NON_EDID_1920x1080i_60:
+                       vic = 5;
+                       break;
+               case DP_DS_NON_EDID_1920x1080i_50:
+                       vic = 20;
+                       break;
+               case DP_DS_NON_EDID_1280x720_60:
+                       vic = 4;
+                       break;
+               case DP_DS_NON_EDID_1280x720_50:
+                       vic = 19;
+                       break;
+               default:
+                       return NULL;
+               }
+               return drm_display_mode_from_cea_vic(dev, vic);
+       default:
+               return NULL;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_mode);
+
+/**
  * drm_dp_downstream_id() - identify branch device
  * @aux: DisplayPort AUX channel
  * @id: DisplayPort branch device id
@@ -652,12 +940,15 @@ EXPORT_SYMBOL(drm_dp_downstream_id);
  * @m: pointer for debugfs file
  * @dpcd: DisplayPort configuration data
  * @port_cap: port capabilities
+ * @edid: EDID
  * @aux: DisplayPort AUX channel
  *
  */
 void drm_dp_downstream_debug(struct seq_file *m,
                             const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                            const u8 port_cap[4], struct drm_dp_aux *aux)
+                            const u8 port_cap[4],
+                            const struct edid *edid,
+                            struct drm_dp_aux *aux)
 {
        bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
                                 DP_DETAILED_CAP_INFO_AVAILABLE;
@@ -715,16 +1006,19 @@ void drm_dp_downstream_debug(struct seq_file *m,
                seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
 
        if (detailed_cap_info) {
-               clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+               clk = drm_dp_downstream_max_dotclock(dpcd, port_cap);
+               if (clk > 0)
+                       seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
 
-               if (clk > 0) {
-                       if (type == DP_DS_PORT_TYPE_VGA)
-                               seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
-                       else
-                               seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
-               }
+               clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid);
+               if (clk > 0)
+                       seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+
+               clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid);
+               if (clk > 0)
+                       seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk);
 
-               bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+               bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid);
 
                if (bpc > 0)
                        seq_printf(m, "\t\tMax bpc: %d\n", bpc);
index b9c5a98..e875425 100644 (file)
  * OF THIS SOFTWARE.
  */
 
+#include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/random.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/iopoll.h>
@@ -423,6 +425,22 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
                memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
                idx += req->u.i2c_write.num_bytes;
                break;
+       case DP_QUERY_STREAM_ENC_STATUS: {
+               const struct drm_dp_query_stream_enc_status *msg;
+
+               msg = &req->u.enc_status;
+               buf[idx] = msg->stream_id;
+               idx++;
+               memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
+               idx += sizeof(msg->client_id);
+               buf[idx] = 0;
+               buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
+               buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
+               buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
+               buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
+               idx++;
+               }
+               break;
        }
        raw->cur_len = idx;
 }
@@ -551,6 +569,20 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
                                return -ENOMEM;
                }
                break;
+       case DP_QUERY_STREAM_ENC_STATUS:
+               req->u.enc_status.stream_id = buf[idx++];
+               for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
+                       req->u.enc_status.client_id[i] = buf[idx++];
+
+               req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
+                                                          buf[idx]);
+               req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
+                                                                buf[idx]);
+               req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
+                                                             buf[idx]);
+               req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
+                                                                   buf[idx]);
+               break;
        }
 
        return 0;
@@ -629,6 +661,16 @@ drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req
                  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
                  req->u.i2c_write.bytes);
                break;
+       case DP_QUERY_STREAM_ENC_STATUS:
+               P("stream_id=%u client_id=%*ph stream_event=%x "
+                 "valid_event=%d stream_behavior=%x valid_behavior=%d",
+                 req->u.enc_status.stream_id,
+                 (int)ARRAY_SIZE(req->u.enc_status.client_id),
+                 req->u.enc_status.client_id, req->u.enc_status.stream_event,
+                 req->u.enc_status.valid_stream_event,
+                 req->u.enc_status.stream_behavior,
+                 req->u.enc_status.valid_stream_behavior);
+               break;
        default:
                P("???\n");
                break;
@@ -936,6 +978,42 @@ static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_ms
        return true;
 }
 
+static bool
+drm_dp_sideband_parse_query_stream_enc_status(
+                               struct drm_dp_sideband_msg_rx *raw,
+                               struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+       struct drm_dp_query_stream_enc_status_ack_reply *reply;
+
+       reply = &repmsg->u.enc_status;
+
+       reply->stream_id = raw->msg[3];
+
+       reply->reply_signed = raw->msg[2] & BIT(0);
+
+       /*
+        * NOTE: It's my impression from reading the spec that the below parsing
+        * is correct. However I noticed while testing with an HDCP 1.4 display
+        * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
+        * would expect both bits to be set. So keep the parsing following the
+        * spec, but beware reality might not match the spec (at least for some
+        * configurations).
+        */
+       reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
+       reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
+
+       reply->query_capable_device_present = raw->msg[2] & BIT(5);
+       reply->legacy_device_present = raw->msg[2] & BIT(6);
+       reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
+
+       reply->auth_completed = !!(raw->msg[1] & BIT(3));
+       reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
+       reply->repeater_present = !!(raw->msg[1] & BIT(5));
+       reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
+
+       return true;
+}
+
 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
                                        struct drm_dp_sideband_msg_reply_body *msg)
 {
@@ -972,6 +1050,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
                return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
        case DP_CLEAR_PAYLOAD_ID_TABLE:
                return true; /* since there's nothing to parse */
+       case DP_QUERY_STREAM_ENC_STATUS:
+               return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
        default:
                DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
                          drm_dp_mst_req_type_str(msg->req_type));
@@ -1123,6 +1203,25 @@ static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
        msg->path_msg = true;
 }
 
+static int
+build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
+                             u8 *q_id)
+{
+       struct drm_dp_sideband_msg_req_body req;
+
+       req.req_type = DP_QUERY_STREAM_ENC_STATUS;
+       req.u.enc_status.stream_id = stream_id;
+       memcpy(req.u.enc_status.client_id, q_id,
+              sizeof(req.u.enc_status.client_id));
+       req.u.enc_status.stream_event = 0;
+       req.u.enc_status.valid_stream_event = false;
+       req.u.enc_status.stream_behavior = 0;
+       req.u.enc_status.valid_stream_behavior = false;
+
+       drm_dp_encode_sideband_req(&req, msg);
+       return 0;
+}
+
 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
                                        struct drm_dp_vcpi *vcpi)
 {
@@ -3155,6 +3254,57 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 }
 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
 
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+               struct drm_dp_mst_port *port,
+               struct drm_dp_query_stream_enc_status_ack_reply *status)
+{
+       struct drm_dp_sideband_msg_tx *txmsg;
+       u8 nonce[7];
+       int len, ret;
+
+       txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+       if (!txmsg)
+               return -ENOMEM;
+
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
+       if (!port) {
+               ret = -EINVAL;
+               goto out_get_port;
+       }
+
+       get_random_bytes(nonce, sizeof(nonce));
+
+       /*
+        * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
+        *  transaction at the MST Branch device directly connected to the
+        *  Source"
+        */
+       txmsg->dst = mgr->mst_primary;
+
+       len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
+
+       drm_dp_queue_down_tx(mgr, txmsg);
+
+       ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
+       if (ret < 0) {
+               goto out;
+       } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+               drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       ret = 0;
+       memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
+
+out:
+       drm_dp_mst_topology_put_port(port);
+out_get_port:
+       kfree(txmsg);
+       return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
+
 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
                                       int id,
                                       struct drm_dp_payload *payload)
index 6840f05..a82f37d 100644 (file)
@@ -3738,6 +3738,34 @@ drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
        bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
 }
 
+/**
+ * drm_display_mode_from_cea_vic() - return a mode for CEA VIC
+ * @dev: DRM device
+ * @vic: CEA VIC of the mode
+ *
+ * Creates a new mode matching the specified CEA VIC.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_display_mode_from_cea_vic(struct drm_device *dev,
+                             u8 video_code)
+{
+       const struct drm_display_mode *cea_mode;
+       struct drm_display_mode *newmode;
+
+       cea_mode = cea_mode_for_vic(video_code);
+       if (!cea_mode)
+               return NULL;
+
+       newmode = drm_mode_duplicate(dev, cea_mode);
+       if (!newmode)
+               return NULL;
+
+       return newmode;
+}
+EXPORT_SYMBOL(drm_display_mode_from_cea_vic);
+
 static int
 do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
 {
index 822edea..59b9ca2 100644 (file)
@@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
 {
        struct drm_gem_cma_object *cma_obj;
 
-       if (sgt->nents != 1) {
-               /* check if the entries in the sg_table are contiguous */
-               dma_addr_t next_addr = sg_dma_address(sgt->sgl);
-               struct scatterlist *s;
-               unsigned int i;
-
-               for_each_sg(sgt->sgl, s, sgt->nents, i) {
-                       /*
-                        * sg_dma_address(s) is only valid for entries
-                        * that have sg_dma_len(s) != 0
-                        */
-                       if (!sg_dma_len(s))
-                               continue;
-
-                       if (sg_dma_address(s) != next_addr)
-                               return ERR_PTR(-EINVAL);
-
-                       next_addr = sg_dma_address(s) + sg_dma_len(s);
-               }
-       }
+       /* check if the entries in the sg_table are contiguous */
+       if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
+               return ERR_PTR(-EINVAL);
 
        /* Create a CMA GEM buffer. */
        cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
index 0a952f2..d77c9f8 100644 (file)
@@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
                if (shmem->sgt) {
-                       dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
-                                    shmem->sgt->nents, DMA_BIDIRECTIONAL);
+                       dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
+                                         DMA_BIDIRECTIONAL, 0);
                        sg_free_table(shmem->sgt);
                        kfree(shmem->sgt);
                }
@@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
 
        WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
 
-       dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
-                    shmem->sgt->nents, DMA_BIDIRECTIONAL);
+       dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
        sg_free_table(shmem->sgt);
        kfree(shmem->sgt);
        shmem->sgt = NULL;
@@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
                goto err_put_pages;
        }
        /* Map the pages for use by the h/w. */
-       dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+       ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret)
+               goto err_free_sgt;
 
        shmem->sgt = sgt;
 
        return sgt;
 
+err_free_sgt:
+       sg_free_table(sgt);
+       kfree(sgt);
 err_put_pages:
        drm_gem_shmem_put_pages(shmem);
        return ERR_PTR(ret);
index 8a6a3c9..11fe9ff 100644 (file)
@@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 {
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct sg_table *sgt;
+       int ret;
 
        if (WARN_ON(dir == DMA_NONE))
                return ERR_PTR(-EINVAL);
@@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
        else
                sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
-       if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC)) {
+       ret = dma_map_sgtable(attach->dev, sgt, dir,
+                             DMA_ATTR_SKIP_CPU_SYNC);
+       if (ret) {
                sg_free_table(sgt);
                kfree(sgt);
-               sgt = ERR_PTR(-ENOMEM);
+               sgt = ERR_PTR(ret);
        }
 
        return sgt;
@@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
        if (!sgt)
                return;
 
-       dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                          DMA_ATTR_SKIP_CPU_SYNC);
+       dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sgt);
        kfree(sgt);
 }
@@ -833,6 +834,37 @@ out:
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
 /**
+ * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
+ * @sgt: sg_table describing the buffer to check
+ *
+ * This helper calculates the contiguous size in the DMA address space
+ * of the the buffer described by the provided sg_table.
+ *
+ * This is useful for implementing
+ * &drm_gem_object_funcs.gem_prime_import_sg_table.
+ */
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
+{
+       dma_addr_t expected = sg_dma_address(sgt->sgl);
+       struct scatterlist *sg;
+       unsigned long size = 0;
+       int i;
+
+       for_each_sgtable_dma_sg(sgt, sg, i) {
+               unsigned int len = sg_dma_len(sg);
+
+               if (!len)
+                       break;
+               if (sg_dma_address(sg) != expected)
+                       break;
+               expected += len;
+               size += len;
+       }
+       return size;
+}
+EXPORT_SYMBOL(drm_prime_get_contiguous_size);
+
+/**
  * drm_gem_prime_export - helper library implementation of the export callback
  * @obj: GEM object to export
  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
@@ -966,45 +998,26 @@ EXPORT_SYMBOL(drm_gem_prime_import);
 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
                                     dma_addr_t *addrs, int max_entries)
 {
-       unsigned count;
-       struct scatterlist *sg;
-       struct page *page;
-       u32 page_len, page_index;
-       dma_addr_t addr;
-       u32 dma_len, dma_index;
-
-       /*
-        * Scatterlist elements contains both pages and DMA addresses, but
-        * one shoud not assume 1:1 relation between them. The sg->length is
-        * the size of the physical memory chunk described by the sg->page,
-        * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
-        * described by the sg_dma_address(sg).
-        */
-       page_index = 0;
-       dma_index = 0;
-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-               page_len = sg->length;
-               page = sg_page(sg);
-               dma_len = sg_dma_len(sg);
-               addr = sg_dma_address(sg);
-
-               while (pages && page_len > 0) {
-                       if (WARN_ON(page_index >= max_entries))
+       struct sg_dma_page_iter dma_iter;
+       struct sg_page_iter page_iter;
+       struct page **p = pages;
+       dma_addr_t *a = addrs;
+
+       if (pages) {
+               for_each_sgtable_page(sgt, &page_iter, 0) {
+                       if (WARN_ON(p - pages >= max_entries))
                                return -1;
-                       pages[page_index] = page;
-                       page++;
-                       page_len -= PAGE_SIZE;
-                       page_index++;
+                       *p++ = sg_page_iter_page(&page_iter);
                }
-               while (addrs && dma_len > 0) {
-                       if (WARN_ON(dma_index >= max_entries))
+       }
+       if (addrs) {
+               for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+                       if (WARN_ON(a - addrs >= max_entries))
                                return -1;
-                       addrs[dma_index] = addr;
-                       addr += PAGE_SIZE;
-                       dma_len -= PAGE_SIZE;
-                       dma_index++;
+                       *a++ = sg_page_iter_dma_address(&dma_iter);
                }
        }
+
        return 0;
 }
 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
index ea19f1d..d1533bd 100644 (file)
@@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
         * because display controller, GPU, etc. are not coherent.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
-               dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+               dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
         * discard those writes.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
-               dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+               dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 /* called with etnaviv_obj->lock held */
@@ -405,9 +405,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
        }
 
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
-               dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
-                                   etnaviv_obj->sgt->nents,
-                                   etnaviv_op_to_dma_dir(op));
+               dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
+                                        etnaviv_op_to_dma_dir(op));
                etnaviv_obj->last_cpu_prep_op = op;
        }
 
@@ -422,8 +421,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
                /* fini without a prep is almost certainly a userspace error */
                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
-               dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
-                       etnaviv_obj->sgt->nents,
+               dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
                etnaviv_obj->last_cpu_prep_op = 0;
        }
index 3607d34..15d9fa3 100644 (file)
@@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
                             struct sg_table *sgt, unsigned len, int prot)
 {      struct scatterlist *sg;
        unsigned int da = iova;
-       unsigned int i, j;
+       unsigned int i;
        int ret;
 
        if (!context || !sgt)
                return -EINVAL;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+       for_each_sgtable_dma_sg(sgt, sg, i) {
                u32 pa = sg_dma_address(sg) - sg->offset;
                size_t bytes = sg_dma_len(sg) + sg->offset;
 
@@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
        return 0;
 
 fail:
-       da = iova;
-
-       for_each_sg(sgt->sgl, sg, i, j) {
-               size_t bytes = sg_dma_len(sg) + sg->offset;
-
-               etnaviv_context_unmap(context, da, bytes);
-               da += bytes;
-       }
+       etnaviv_context_unmap(context, iova, da - iova);
        return ret;
 }
 
@@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
        unsigned int da = iova;
        int i;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+       for_each_sgtable_dma_sg(sgt, sg, i) {
                size_t bytes = sg_dma_len(sg) + sg->offset;
 
                etnaviv_context_unmap(context, da, bytes);
index 58b89ec..5887f7f 100644 (file)
 #define EXYNOS_DEV_ADDR_START  0x20000000
 #define EXYNOS_DEV_ADDR_SIZE   0x40000000
 
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
-       if (!dev->dma_parms)
-               dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
-       if (!dev->dma_parms)
-               return -ENOMEM;
-
-       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-       return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
-       kfree(dev->dma_parms);
-       dev->dma_parms = NULL;
-}
-
 /*
  * drm_iommu_attach_device- attach device to iommu mapping
  *
@@ -69,10 +52,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                return -EINVAL;
        }
 
-       ret = configure_dma_max_seg_size(subdrv_dev);
-       if (ret)
-               return ret;
-
+       dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
        if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
                /*
                 * Keep the original DMA mapping of the sub-device and
@@ -89,9 +69,6 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                ret = iommu_attach_device(priv->mapping, subdrv_dev);
        }
 
-       if (ret)
-               clear_dma_max_seg_size(subdrv_dev);
-
        return ret;
 }
 
@@ -114,8 +91,6 @@ static void drm_iommu_detach_device(struct drm_device *drm_dev,
                arm_iommu_attach_device(subdrv_dev, *dma_priv);
        } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
                iommu_detach_device(priv->mapping, subdrv_dev);
-
-       clear_dma_max_seg_size(subdrv_dev);
 }
 
 int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
index 1a1a285..5b9666f 100644 (file)
@@ -1760,11 +1760,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
        dsi->supplies[1].supply = "vddio";
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
                                      dsi->supplies);
-       if (ret) {
-               if (ret != -EPROBE_DEFER)
-                       dev_info(dev, "failed to get regulators: %d\n", ret);
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to get regulators\n");
 
        dsi->clks = devm_kcalloc(dev,
                        dsi->driver_data->num_clks, sizeof(*dsi->clks),
index 03be314..967a5cd 100644 (file)
@@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
                return;
 
 out:
-       dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
-                       g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
+       dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
+                         DMA_BIDIRECTIONAL, 0);
 
        pages = frame_vector_pages(g2d_userptr->vec);
        if (!IS_ERR(pages)) {
@@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
 
        g2d_userptr->sgt = sgt;
 
-       if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
-                               DMA_BIDIRECTIONAL)) {
+       ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
+                             DMA_BIDIRECTIONAL, 0);
+       if (ret) {
                DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
-               ret = -ENOMEM;
                goto err_sg_free_table;
        }
 
index efa4768..1716a02 100644 (file)
@@ -431,27 +431,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
 {
        struct exynos_drm_gem *exynos_gem;
 
-       if (sgt->nents < 1)
+       /* check if the entries in the sg_table are contiguous */
+       if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
+               DRM_ERROR("buffer chunks must be mapped contiguously");
                return ERR_PTR(-EINVAL);
-
-       /*
-        * Check if the provided buffer has been mapped as contiguous
-        * into DMA address space.
-        */
-       if (sgt->nents > 1) {
-               dma_addr_t next_addr = sg_dma_address(sgt->sgl);
-               struct scatterlist *s;
-               unsigned int i;
-
-               for_each_sg(sgt->sgl, s, sgt->nents, i) {
-                       if (!sg_dma_len(s))
-                               break;
-                       if (sg_dma_address(s) != next_addr) {
-                               DRM_ERROR("buffer chunks must be mapped contiguously");
-                               return ERR_PTR(-EINVAL);
-                       }
-                       next_addr = sg_dma_address(s) + sg_dma_len(s);
-               }
        }
 
        exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
index c5ba32f..dc01c18 100644 (file)
@@ -1797,11 +1797,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
                hdata->regul_bulk[i].supply = supply[i];
 
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
-       if (ret) {
-               if (ret != -EPROBE_DEFER)
-                       DRM_DEV_ERROR(dev, "failed to get regulators\n");
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to get regulators\n");
 
        hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
 
index bda4c0e..e5574e5 100644 (file)
@@ -234,6 +234,7 @@ i915-y += \
        display/intel_ddi.o \
        display/intel_dp.o \
        display/intel_dp_aux_backlight.o \
+       display/intel_dp_hdcp.o \
        display/intel_dp_link_training.o \
        display/intel_dp_mst.o \
        display/intel_dsi.o \
index f4053dd..520715b 100644 (file)
@@ -1646,6 +1646,7 @@ static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
 };
 
 static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+       .detect = intel_panel_detect,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_connector_destroy,
index 630f49b..86be032 100644 (file)
@@ -527,8 +527,6 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
        intel_atomic_clear_global_state(state);
 
        state->dpll_set = state->modeset = false;
-       state->global_state_changed = false;
-       state->active_pipes = 0;
 }
 
 struct intel_crtc_state *
@@ -542,40 +540,3 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 
        return to_intel_crtc_state(crtc_state);
 }
-
-int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc *crtc;
-
-       state->global_state_changed = true;
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               int ret;
-
-               ret = drm_modeset_lock(&crtc->base.mutex,
-                                      state->base.acquire_ctx);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
-{
-       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_crtc *crtc;
-
-       state->global_state_changed = true;
-
-       for_each_intel_crtc(&dev_priv->drm, crtc) {
-               struct intel_crtc_state *crtc_state;
-
-               crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
-       }
-
-       return 0;
-}
index 1114629..285de07 100644 (file)
@@ -56,8 +56,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
                               struct intel_crtc *intel_crtc,
                               struct intel_crtc_state *crtc_state);
 
-int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
-
-int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
-
 #endif /* __INTEL_ATOMIC_H__ */
index ad4aa66..f7de557 100644 (file)
@@ -958,13 +958,8 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
        if (IS_ERR(cdclk_state))
                return PTR_ERR(cdclk_state);
 
-       cdclk_state->force_min_cdclk_changed = true;
        cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
 
-       ret = intel_atomic_lock_global_state(&cdclk_state->base);
-       if (ret)
-               return ret;
-
        return drm_atomic_commit(&state->base);
 }
 
index a0a41ec..4716484 100644 (file)
@@ -1656,6 +1656,8 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
                [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
                [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
                [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+               [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 },
+               [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
        };
        /*
         * Bspec lists the ports as A, B, C, D - however internally in our
@@ -2133,7 +2135,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 
        INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
 
-       if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
+       if (!HAS_DISPLAY(dev_priv)) {
                drm_dbg_kms(&dev_priv->drm,
                            "Skipping VBT init due to disabled display.\n");
                return;
@@ -2650,6 +2652,12 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
        case DP_AUX_G:
                aux_ch = AUX_CH_G;
                break;
+       case DP_AUX_H:
+               aux_ch = AUX_CH_H;
+               break;
+       case DP_AUX_I:
+               aux_ch = AUX_CH_I;
+               break;
        default:
                MISSING_CASE(info->alternate_aux_channel);
                aux_ch = AUX_CH_A;
index 577c444..cb93f6c 100644 (file)
@@ -2426,7 +2426,6 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
        if (!cdclk_state)
                return NULL;
 
-       cdclk_state->force_min_cdclk_changed = false;
        cdclk_state->pipe = INVALID_PIPE;
 
        return &cdclk_state->base;
@@ -2501,6 +2500,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
                if (ret)
                        return ret;
        } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+                  old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
                   intel_cdclk_changed(&old_cdclk_state->logical,
                                       &new_cdclk_state->logical)) {
                ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
index 6b31fde..b34eb00 100644 (file)
@@ -49,7 +49,6 @@ struct intel_cdclk_state {
 
        /* forced minimum cdclk for glk+ audio w/a */
        int force_min_cdclk;
-       bool force_min_cdclk_changed;
 
        /* bitmask of active pipes */
        u8 active_pipes;
index 5b4510c..4934edd 100644 (file)
@@ -833,6 +833,9 @@ intel_crt_detect(struct drm_connector *connector,
                    connector->base.id, connector->name,
                    force);
 
+       if (!INTEL_DISPLAY_ENABLED(dev_priv))
+               return connector_status_disconnected;
+
        if (dev_priv->params.load_detect_test) {
                wakeref = intel_display_power_get(dev_priv,
                                                  intel_encoder->power_domain);
index 19ac6b2..cdcb7b1 100644 (file)
@@ -572,13 +572,13 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
                                                /* NT mV Trans mV db    */
        { 0xA, 0x33, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
        { 0xA, 0x47, 0x36, 0x00, 0x09 },        /* 350   500      3.1   */
-       { 0xC, 0x64, 0x30, 0x00, 0x0F },        /* 350   700      6.0   */
-       { 0x6, 0x7F, 0x2C, 0x00, 0x13 },        /* 350   900      8.2   */
+       { 0xC, 0x64, 0x34, 0x00, 0x0B },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x30, 0x00, 0x0F },        /* 350   900      8.2   */
        { 0xA, 0x46, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
-       { 0xC, 0x64, 0x36, 0x00, 0x09 },        /* 500   700      2.9   */
-       { 0x6, 0x7F, 0x30, 0x00, 0x0F },        /* 500   900      5.1   */
+       { 0xC, 0x64, 0x38, 0x00, 0x07 },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x32, 0x00, 0x0D },        /* 500   900      5.1   */
        { 0xC, 0x61, 0x3F, 0x00, 0x00 },        /* 650   700      0.6   */
-       { 0x6, 0x7F, 0x37, 0x00, 0x08 },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x38, 0x00, 0x07 },        /* 600   900      3.5   */
        { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
@@ -1074,12 +1074,28 @@ static const struct cnl_ddi_buf_trans *
 ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
                        int *n_entries)
 {
-       if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       switch (type) {
+       case INTEL_OUTPUT_HDMI:
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+               return icl_combo_phy_ddi_translations_hdmi;
+       case INTEL_OUTPUT_EDP:
+               if (dev_priv->vbt.edp.low_vswing) {
+                       if (rate > 540000) {
+                               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+                               return icl_combo_phy_ddi_translations_edp_hbr3;
+                       } else {
+                               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+                               return icl_combo_phy_ddi_translations_edp_hbr2;
+                       }
+               }
+               /* fall through */
+       default:
+               /* All combo DP and eDP ports that do not support low_vswing */
                *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
                return ehl_combo_phy_ddi_translations_dp;
        }
-
-       return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
 }
 
 static const struct cnl_ddi_buf_trans *
@@ -1088,30 +1104,44 @@ tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.hobl) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-               if (!intel_dp->hobl_failed && rate <= 540000) {
-                       /* Same table applies to TGL, RKL and DG1 */
-                       *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
-                       return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+       switch (type) {
+       case INTEL_OUTPUT_HDMI:
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+               return icl_combo_phy_ddi_translations_hdmi;
+       case INTEL_OUTPUT_EDP:
+               if (dev_priv->vbt.edp.hobl) {
+                       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+                       if (!intel_dp->hobl_failed && rate <= 540000) {
+                               /* Same table applies to TGL, RKL and DG1 */
+                               *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
+                               return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+                       }
                }
-       }
 
-       if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
-               return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
-       } else if (rate > 270000) {
-               if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
-                       *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
-                       return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+               if (rate > 540000) {
+                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+                       return icl_combo_phy_ddi_translations_edp_hbr3;
+               } else if (dev_priv->vbt.edp.low_vswing) {
+                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+                       return icl_combo_phy_ddi_translations_edp_hbr2;
+               }
+               /* fall through */
+       default:
+               /* All combo DP and eDP ports that do not support low_vswing */
+               if (rate > 270000) {
+                       if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+                               *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
+                               return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+                       }
+
+                       *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+                       return tgl_combo_phy_ddi_translations_dp_hbr2;
                }
 
-               *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
-               return tgl_combo_phy_ddi_translations_dp_hbr2;
+               *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+               return tgl_combo_phy_ddi_translations_dp_hbr;
        }
-
-       *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
-       return tgl_combo_phy_ddi_translations_dp_hbr;
 }
 
 static const struct tgl_dkl_phy_ddi_buf_trans *
@@ -1791,6 +1821,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
 
        ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
+       drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
+
        ctl &= ~TRANS_DDI_FUNC_ENABLE;
 
        if (IS_GEN_RANGE(dev_priv, 8, 10))
@@ -1818,12 +1850,12 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
 }
 
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+                                    enum transcoder cpu_transcoder,
                                     bool enable)
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        intel_wakeref_t wakeref;
-       enum pipe pipe = 0;
        int ret = 0;
        u32 tmp;
 
@@ -1832,19 +1864,12 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
        if (drm_WARN_ON(dev, !wakeref))
                return -ENXIO;
 
-       if (drm_WARN_ON(dev,
-                       !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
-               ret = -EIO;
-               goto out;
-       }
-
-       tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
+       tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (enable)
                tmp |= TRANS_DDI_HDCP_SIGNALLING;
        else
                tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
-out:
+       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
        intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
        return ret;
 }
@@ -2717,7 +2742,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
        u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
        int rate = 0;
 
-       if (type == INTEL_OUTPUT_HDMI) {
+       if (type != INTEL_OUTPUT_HDMI) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                rate = intel_dp->link_rate;
@@ -3445,6 +3470,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
        intel_ddi_init_dp_buf_reg(encoder);
        if (!is_mst)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_configure_protocol_converter(intel_dp);
        intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
                                              true);
        intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -3556,19 +3582,17 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
                intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
                                          conn_state);
        } else {
-               struct intel_lspcon *lspcon =
-                               enc_to_intel_lspcon(encoder);
+               struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
                intel_ddi_pre_enable_dp(state, encoder, crtc_state,
                                        conn_state);
-               if (lspcon->active) {
-                       struct intel_digital_port *dig_port =
-                                       enc_to_dig_port(encoder);
 
+               /* FIXME precompute everything properly */
+               /* FIXME how do we turn infoframes off again? */
+               if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
                        dig_port->set_infoframes(encoder,
                                                 crtc_state->has_infoframe,
                                                 crtc_state, conn_state);
-               }
        }
 }
 
@@ -4012,18 +4036,19 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
 
        intel_psr_update(intel_dp, crtc_state, conn_state);
        intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
-       intel_edp_drrs_enable(intel_dp, crtc_state);
+       intel_edp_drrs_update(intel_dp, crtc_state);
 
        intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
 }
 
-static void intel_ddi_update_pipe(struct intel_atomic_state *state,
-                                 struct intel_encoder *encoder,
-                                 const struct intel_crtc_state *crtc_state,
-                                 const struct drm_connector_state *conn_state)
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
+                          const struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state)
 {
 
-       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+           !intel_encoder_is_mst(encoder))
                intel_ddi_update_pipe_dp(state, encoder, crtc_state,
                                         conn_state);
 
@@ -4949,6 +4974,57 @@ static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
                 i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
 }
 
+static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       if (port >= PORT_D)
+               return HPD_PORT_TC1 + port - PORT_D;
+       else
+               return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       if (HAS_PCH_TGP(dev_priv))
+               return tgl_hpd_pin(dev_priv, port);
+
+       if (port >= PORT_D)
+               return HPD_PORT_C + port - PORT_D;
+       else
+               return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       if (port >= PORT_C)
+               return HPD_PORT_TC1 + port - PORT_C;
+       else
+               return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       if (port == PORT_D)
+               return HPD_PORT_A;
+
+       if (HAS_PCH_MCC(dev_priv))
+               return icl_hpd_pin(dev_priv, port);
+
+       return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       if (port == PORT_F)
+               return HPD_PORT_E;
+
+       return HPD_PORT_A + port - PORT_A;
+}
+
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 {
        struct intel_digital_port *dig_port;
@@ -5001,6 +5077,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
                         DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
 
+       mutex_init(&dig_port->hdcp_mutex);
+       dig_port->num_hdcp_streams = 0;
+
        encoder->hotplug = intel_ddi_hotplug;
        encoder->compute_output_type = intel_ddi_compute_output_type;
        encoder->compute_config = intel_ddi_compute_config;
@@ -5022,6 +5101,19 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        encoder->cloneable = 0;
        encoder->pipe_mask = ~0;
 
+       if (IS_ROCKETLAKE(dev_priv))
+               encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
+       else if (INTEL_GEN(dev_priv) >= 12)
+               encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
+       else if (IS_ELKHARTLAKE(dev_priv))
+               encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
+       else if (IS_GEN(dev_priv, 11))
+               encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
+       else if (IS_GEN(dev_priv, 10))
+               encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
+       else
+               encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+
        if (INTEL_GEN(dev_priv) >= 11)
                dig_port->saved_port_bits =
                        intel_de_read(dev_priv, DDI_BUF_CTL(port))
index 077e9db..f5fb62f 100644 (file)
@@ -16,6 +16,7 @@ struct intel_crtc_state;
 struct intel_dp;
 struct intel_dpll_hw_state;
 struct intel_encoder;
+enum transcoder;
 
 void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
                                struct intel_encoder *intel_encoder,
@@ -43,6 +44,7 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 u32 bxt_signal_levels(struct intel_dp *intel_dp);
 u32 ddi_signal_levels(struct intel_dp *intel_dp);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+                                    enum transcoder cpu_transcoder,
                                     bool enable);
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
 
index 345ecee..a1fba7e 100644 (file)
@@ -67,6 +67,7 @@
 #include "intel_bw.h"
 #include "intel_cdclk.h"
 #include "intel_color.h"
+#include "intel_csr.h"
 #include "intel_display_types.h"
 #include "intel_dp_link_training.h"
 #include "intel_fbc.h"
@@ -4092,8 +4093,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
 int skl_check_plane_surface(struct intel_plane_state *plane_state)
 {
        const struct drm_framebuffer *fb = plane_state->hw.fb;
-       int ret;
-       bool needs_aux = false;
+       int ret, i;
 
        ret = intel_plane_compute_gtt(plane_state);
        if (ret)
@@ -4107,7 +4107,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
         * it.
         */
        if (is_ccs_modifier(fb->modifier)) {
-               needs_aux = true;
                ret = skl_check_ccs_aux_surface(plane_state);
                if (ret)
                        return ret;
@@ -4115,20 +4114,15 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
 
        if (intel_format_info_is_yuv_semiplanar(fb->format,
                                                fb->modifier)) {
-               needs_aux = true;
                ret = skl_check_nv12_aux_surface(plane_state);
                if (ret)
                        return ret;
        }
 
-       if (!needs_aux) {
-               int i;
-
-               for (i = 1; i < fb->format->num_planes; i++) {
-                       plane_state->color_plane[i].offset = ~0xfff;
-                       plane_state->color_plane[i].x = 0;
-                       plane_state->color_plane[i].y = 0;
-               }
+       for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
+               plane_state->color_plane[i].offset = ~0xfff;
+               plane_state->color_plane[i].x = 0;
+               plane_state->color_plane[i].y = 0;
        }
 
        ret = skl_check_main_surface(plane_state);
@@ -7331,6 +7325,10 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
                return POWER_DOMAIN_PORT_DDI_F_LANES;
        case PORT_G:
                return POWER_DOMAIN_PORT_DDI_G_LANES;
+       case PORT_H:
+               return POWER_DOMAIN_PORT_DDI_H_LANES;
+       case PORT_I:
+               return POWER_DOMAIN_PORT_DDI_I_LANES;
        default:
                MISSING_CASE(port);
                return POWER_DOMAIN_PORT_OTHER;
@@ -7356,6 +7354,10 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
                        return POWER_DOMAIN_AUX_F_TBT;
                case AUX_CH_G:
                        return POWER_DOMAIN_AUX_G_TBT;
+               case AUX_CH_H:
+                       return POWER_DOMAIN_AUX_H_TBT;
+               case AUX_CH_I:
+                       return POWER_DOMAIN_AUX_I_TBT;
                default:
                        MISSING_CASE(dig_port->aux_ch);
                        return POWER_DOMAIN_AUX_C_TBT;
@@ -7387,6 +7389,10 @@ intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
                return POWER_DOMAIN_AUX_F;
        case AUX_CH_G:
                return POWER_DOMAIN_AUX_G;
+       case AUX_CH_H:
+               return POWER_DOMAIN_AUX_H;
+       case AUX_CH_I:
+               return POWER_DOMAIN_AUX_I;
        default:
                MISSING_CASE(aux_ch);
                return POWER_DOMAIN_AUX_A;
@@ -8155,7 +8161,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * which the devices expect also in synchronous clock mode.
         */
        if (constant_n)
-               *ret_n = 0x8000;
+               *ret_n = DP_LINK_CONSTANT_N_VALUE;
        else
                *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
 
@@ -14291,7 +14297,6 @@ verify_crtc_state(struct intel_crtc *crtc,
        struct intel_encoder *encoder;
        struct intel_crtc_state *pipe_config = old_crtc_state;
        struct drm_atomic_state *state = old_crtc_state->uapi.state;
-       bool active;
 
        __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
        intel_crtc_free_hw_state(old_crtc_state);
@@ -14301,16 +14306,19 @@ verify_crtc_state(struct intel_crtc *crtc,
        drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
                    crtc->base.name);
 
-       active = dev_priv->display.get_pipe_config(crtc, pipe_config);
+       pipe_config->hw.enable = new_crtc_state->hw.enable;
+
+       pipe_config->hw.active =
+               dev_priv->display.get_pipe_config(crtc, pipe_config);
 
        /* we keep both pipes enabled on 830 */
-       if (IS_I830(dev_priv))
-               active = new_crtc_state->hw.active;
+       if (IS_I830(dev_priv) && pipe_config->hw.active)
+               pipe_config->hw.active = new_crtc_state->hw.active;
 
-       I915_STATE_WARN(new_crtc_state->hw.active != active,
+       I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
                        "crtc active state doesn't match with hw state "
                        "(expected %i, found %i)\n",
-                       new_crtc_state->hw.active, active);
+                       new_crtc_state->hw.active, pipe_config->hw.active);
 
        I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
                        "transitional active state does not match atomic hw state "
@@ -14319,6 +14327,7 @@ verify_crtc_state(struct intel_crtc *crtc,
 
        for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
                enum pipe pipe;
+               bool active;
 
                active = encoder->get_hw_state(encoder, &pipe);
                I915_STATE_WARN(active != new_crtc_state->hw.active,
@@ -14630,16 +14639,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
 static int intel_modeset_checks(struct intel_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       int ret;
 
        state->modeset = true;
-       state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
-
-       if (state->active_pipes != dev_priv->active_pipes) {
-               ret = _intel_atomic_lock_global_state(state);
-               if (ret)
-                       return ret;
-       }
 
        if (IS_HASWELL(dev_priv))
                return hsw_mode_set_planes_workaround(state);
@@ -14783,7 +14784,8 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
                                    bool *need_cdclk_calc)
 {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-       struct intel_cdclk_state *new_cdclk_state;
+       const struct intel_cdclk_state *old_cdclk_state;
+       const struct intel_cdclk_state *new_cdclk_state;
        struct intel_plane_state *plane_state;
        struct intel_bw_state *new_bw_state;
        struct intel_plane *plane;
@@ -14802,9 +14804,11 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
                        return ret;
        }
 
+       old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
        new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
 
-       if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+       if (new_cdclk_state &&
+           old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
                *need_cdclk_calc = true;
 
        ret = dev_priv->display.bw_calc_min_cdclk(state);
@@ -15751,14 +15755,6 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
                                        plane->frontbuffer_bit);
 }
 
-static void assert_global_state_locked(struct drm_i915_private *dev_priv)
-{
-       struct intel_crtc *crtc;
-
-       for_each_intel_crtc(&dev_priv->drm, crtc)
-               drm_modeset_lock_assert_held(&crtc->base.mutex);
-}
-
 static int intel_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *_state,
                               bool nonblock)
@@ -15834,12 +15830,6 @@ static int intel_atomic_commit(struct drm_device *dev,
        intel_shared_dpll_swap_state(state);
        intel_atomic_track_fbs(state);
 
-       if (state->global_state_changed) {
-               assert_global_state_locked(dev_priv);
-
-               dev_priv->active_pipes = state->active_pipes;
-       }
-
        drm_atomic_state_get(&state->base);
        INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
 
@@ -16886,7 +16876,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
        intel_pps_init(dev_priv);
 
-       if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+       if (!HAS_DISPLAY(dev_priv))
                return;
 
        if (IS_ROCKETLAKE(dev_priv)) {
@@ -17872,6 +17862,27 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
 {
        int ret;
 
+       if (i915_inject_probe_failure(i915))
+               return -ENODEV;
+
+       if (HAS_DISPLAY(i915)) {
+               ret = drm_vblank_init(&i915->drm,
+                                     INTEL_NUM_PIPES(i915));
+               if (ret)
+                       return ret;
+       }
+
+       intel_bios_init(i915);
+
+       ret = intel_vga_register(i915);
+       if (ret)
+               goto cleanup_bios;
+
+       /* FIXME: completely on the wrong abstraction layer */
+       intel_power_domains_init_hw(i915, false);
+
+       intel_csr_ucode_init(i915);
+
        i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
        i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
                                        WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
@@ -17880,15 +17891,15 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
 
        ret = intel_cdclk_init(i915);
        if (ret)
-               return ret;
+               goto cleanup_vga_client_pw_domain_csr;
 
        ret = intel_dbuf_init(i915);
        if (ret)
-               return ret;
+               goto cleanup_vga_client_pw_domain_csr;
 
        ret = intel_bw_init(i915);
        if (ret)
-               return ret;
+               goto cleanup_vga_client_pw_domain_csr;
 
        init_llist_head(&i915->atomic_helper.free_list);
        INIT_WORK(&i915->atomic_helper.free_work,
@@ -17899,10 +17910,19 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
        intel_fbc_init(i915);
 
        return 0;
+
+cleanup_vga_client_pw_domain_csr:
+       intel_csr_ucode_fini(i915);
+       intel_power_domains_driver_remove(i915);
+       intel_vga_unregister(i915);
+cleanup_bios:
+       intel_bios_driver_remove(i915);
+
+       return ret;
 }
 
-/* part #2: call after irq install */
-int intel_modeset_init(struct drm_i915_private *i915)
+/* part #2: call after irq install, but before gem init */
+int intel_modeset_init_nogem(struct drm_i915_private *i915)
 {
        struct drm_device *dev = &i915->drm;
        enum pipe pipe;
@@ -17919,7 +17939,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
                    INTEL_NUM_PIPES(i915),
                    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
 
-       if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+       if (HAS_DISPLAY(i915)) {
                for_each_pipe(i915, pipe) {
                        ret = intel_crtc_init(i915, pipe);
                        if (ret) {
@@ -18001,6 +18021,30 @@ int intel_modeset_init(struct drm_i915_private *i915)
        return 0;
 }
 
+/* part #3: call after gem init */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+       int ret;
+
+       intel_overlay_setup(i915);
+
+       if (!HAS_DISPLAY(i915))
+               return 0;
+
+       ret = intel_fbdev_init(&i915->drm);
+       if (ret)
+               return ret;
+
+       /* Only enable hotplug handling once the fbdev is fully set up. */
+       intel_hpd_init(i915);
+
+       intel_init_ipc(i915);
+
+       intel_psr_set_force_mode_changed(i915->psr.dp);
+
+       return 0;
+}
+
 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -18885,6 +18929,18 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
        intel_fbc_cleanup_cfb(i915);
 }
 
+/* part #3: call after gem init */
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
+{
+       intel_csr_ucode_fini(i915);
+
+       intel_power_domains_driver_remove(i915);
+
+       intel_vga_unregister(i915);
+
+       intel_bios_driver_remove(i915);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 
 struct intel_display_error_state {
@@ -18945,7 +19001,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 
        BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
 
-       if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+       if (!HAS_DISPLAY(dev_priv))
                return NULL;
 
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
index e890c8f..d10b7c8 100644 (file)
@@ -272,8 +272,6 @@ enum dpio_phy {
        DPIO_PHY2,
 };
 
-#define I915_NUM_PHYS_VLV 2
-
 enum aux_ch {
        AUX_CH_A,
        AUX_CH_B,
@@ -282,6 +280,8 @@ enum aux_ch {
        AUX_CH_E, /* ICL+ */
        AUX_CH_F,
        AUX_CH_G,
+       AUX_CH_H,
+       AUX_CH_I,
 };
 
 #define aux_ch_name(a) ((a) + 'A')
@@ -629,9 +629,11 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
 /* modesetting */
 void intel_modeset_init_hw(struct drm_i915_private *i915);
 int intel_modeset_init_noirq(struct drm_i915_private *i915);
+int intel_modeset_init_nogem(struct drm_i915_private *i915);
 int intel_modeset_init(struct drm_i915_private *i915);
 void intel_modeset_driver_remove(struct drm_i915_private *i915);
 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
 void intel_display_resume(struct drm_device *dev);
 void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
 
index f549381..0bf31f9 100644 (file)
@@ -601,6 +601,11 @@ static void intel_hdcp_info(struct seq_file *m,
 {
        bool hdcp_cap, hdcp2_cap;
 
+       if (!intel_connector->hdcp.shim) {
+               seq_puts(m, "No Connector Support");
+               goto out;
+       }
+
        hdcp_cap = intel_hdcp_capable(intel_connector);
        hdcp2_cap = intel_hdcp2_capable(intel_connector);
 
@@ -612,6 +617,7 @@ static void intel_hdcp_info(struct seq_file *m,
        if (!hdcp_cap && !hdcp2_cap)
                seq_puts(m, "None");
 
+out:
        seq_puts(m, "\n");
 }
 
@@ -620,6 +626,7 @@ static void intel_dp_info(struct seq_file *m,
 {
        struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+       const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
 
        seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
        seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -627,11 +634,7 @@ static void intel_dp_info(struct seq_file *m,
                intel_panel_info(m, &intel_connector->panel);
 
        drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
-                               &intel_dp->aux);
-       if (intel_connector->hdcp.shim) {
-               seq_puts(m, "\tHDCP version: ");
-               intel_hdcp_info(m, intel_connector);
-       }
+                               edid ? edid->data : NULL, &intel_dp->aux);
 }
 
 static void intel_dp_mst_info(struct seq_file *m,
@@ -649,10 +652,6 @@ static void intel_hdmi_info(struct seq_file *m,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
 
        seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
-       if (intel_connector->hdcp.shim) {
-               seq_puts(m, "\tHDCP version: ");
-               intel_hdcp_info(m, intel_connector);
-       }
 }
 
 static void intel_lvds_info(struct seq_file *m,
@@ -708,6 +707,9 @@ static void intel_connector_info(struct seq_file *m,
                break;
        }
 
+       seq_puts(m, "\tHDCP version: ");
+       intel_hdcp_info(m, intel_connector);
+
        seq_printf(m, "\tmodes:\n");
        list_for_each_entry(mode, &connector->modes, head)
                intel_seq_print_mode(m, 2, mode);
@@ -1069,10 +1071,18 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
        drm_connector_list_iter_begin(dev, &conn_iter);
        drm_for_each_connector_iter(connector, &conn_iter) {
+               bool supported = false;
+
                if (connector->state->crtc != &intel_crtc->base)
                        continue;
 
                seq_printf(m, "%s:\n", connector->name);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+                   drrs->type == SEAMLESS_DRRS_SUPPORT)
+                       supported = true;
+
+               seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
        }
        drm_connector_list_iter_end(&conn_iter);
 
@@ -1083,7 +1093,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
                mutex_lock(&drrs->mutex);
                /* DRRS Supported */
-               seq_puts(m, "\tDRRS Supported: Yes\n");
+               seq_puts(m, "\tDRRS Enabled: Yes\n");
 
                /* disable_drrs() will make drrs->dp NULL */
                if (!drrs->dp) {
@@ -1118,7 +1128,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
                mutex_unlock(&drrs->mutex);
        } else {
                /* DRRS not supported. Print the VBT parameter*/
-               seq_puts(m, "\tDRRS Supported : No");
+               seq_puts(m, "\tDRRS Enabled : No");
        }
        seq_puts(m, "\n");
 }
@@ -2029,10 +2039,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
        if (connector->status != connector_status_connected)
                return -ENODEV;
 
-       /* HDCP is supported by connector */
-       if (!intel_connector->hdcp.shim)
-               return -EINVAL;
-
        seq_printf(m, "%s:%d HDCP version: ", connector->name,
                   connector->base.id);
        intel_hdcp_info(m, intel_connector);
index 7946c6a..7277e58 100644 (file)
@@ -5263,7 +5263,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
        unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
        int config, i;
 
-       if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
+       if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
                /* Wa_1409767108: tgl */
                table = wa_1409767108_buddy_page_masks;
        else
index 9349b15..3d4bf9b 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <linux/async.h>
 #include <linux/i2c.h>
+#include <linux/pwm.h>
 #include <linux/sched/clock.h>
 
 #include <drm/drm_atomic.h>
@@ -223,6 +224,7 @@ struct intel_panel {
                bool util_pin_active_low;       /* bxt+ */
                u8 controller;          /* bxt+ only */
                struct pwm_device *pwm;
+               struct pwm_state pwm_state;
 
                /* DPCD backlight */
                u8 pwmgen_bit_count;
@@ -314,10 +316,12 @@ struct intel_hdcp_shim {
 
        /* Enables HDCP signalling on the port */
        int (*toggle_signalling)(struct intel_digital_port *dig_port,
+                                enum transcoder cpu_transcoder,
                                 bool enable);
 
        /* Ensures the link is still protected */
-       bool (*check_link)(struct intel_digital_port *dig_port);
+       bool (*check_link)(struct intel_digital_port *dig_port,
+                          struct intel_connector *connector);
 
        /* Detects panel's hdcp capability. This is optional for HDMI. */
        int (*hdcp_capable)(struct intel_digital_port *dig_port,
@@ -479,8 +483,6 @@ struct intel_atomic_state {
 
        bool dpll_set, modeset;
 
-       u8 active_pipes;
-
        struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
 
        /*
@@ -491,11 +493,6 @@ struct intel_atomic_state {
 
        bool rps_interactive;
 
-       /*
-        * active_pipes
-        */
-       bool global_state_changed;
-
        struct i915_sw_fence commit_ready;
 
        struct llist_node freed;
@@ -1275,6 +1272,7 @@ struct intel_dp {
        u8 sink_count;
        bool link_mst;
        bool link_trained;
+       bool has_hdmi_sink;
        bool has_audio;
        bool reset_link_params;
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1376,6 +1374,14 @@ struct intel_dp {
        /* Displayport compliance testing */
        struct intel_dp_compliance compliance;
 
+       /* Downstream facing port caps */
+       struct {
+               int min_tmds_clock, max_tmds_clock;
+               int max_dotclock;
+               u8 max_bpc;
+               bool ycbcr_444_to_420;
+       } dfp;
+
        /* Display stream compression testing */
        bool force_dsc_en;
 
@@ -1415,6 +1421,11 @@ struct intel_digital_port {
        enum phy_fia tc_phy_fia;
        u8 tc_phy_fia_idx;
 
+       /* protects num_hdcp_streams reference count */
+       struct mutex hdcp_mutex;
+       /* the number of pipes using HDCP signalling out of this port */
+       unsigned int num_hdcp_streams;
+
        void (*write_infoframe)(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
                                unsigned int type,
@@ -1525,6 +1536,18 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
        }
 }
 
+static inline bool intel_encoder_is_mst(struct intel_encoder *encoder)
+{
+       return encoder->type == INTEL_OUTPUT_DP_MST;
+}
+
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct intel_encoder *encoder)
+{
+       return container_of(&encoder->base, struct intel_dp_mst_encoder,
+                           base.base);
+}
+
 static inline struct intel_digital_port *
 enc_to_dig_port(struct intel_encoder *encoder)
 {
@@ -1533,6 +1556,8 @@ enc_to_dig_port(struct intel_encoder *encoder)
        if (intel_encoder_is_dig_port(intel_encoder))
                return container_of(&encoder->base, struct intel_digital_port,
                                    base.base);
+       else if (intel_encoder_is_mst(intel_encoder))
+               return enc_to_mst(encoder)->primary;
        else
                return NULL;
 }
@@ -1543,13 +1568,6 @@ intel_attached_dig_port(struct intel_connector *connector)
        return enc_to_dig_port(intel_attached_encoder(connector));
 }
 
-static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct intel_encoder *encoder)
-{
-       return container_of(&encoder->base, struct intel_dp_mst_encoder,
-                           base.base);
-}
-
 static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
 {
        return &enc_to_dig_port(encoder)->dp;
index 284b15f..bf1e9cf 100644 (file)
@@ -38,7 +38,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_edid.h>
-#include <drm/drm_hdcp.h>
 #include <drm/drm_probe_helper.h>
 
 #include "i915_debugfs.h"
@@ -248,29 +247,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
        return max_link_clock * max_lanes;
 }
 
-static int
-intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *encoder = &dig_port->base;
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       int max_dotclk = dev_priv->max_dotclk_freq;
-       int ds_max_dotclk;
-
-       int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
-
-       if (type != DP_DS_PORT_TYPE_VGA)
-               return max_dotclk;
-
-       ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
-                                                   intel_dp->downstream_ports);
-
-       if (ds_max_dotclk != 0)
-               max_dotclk = min(max_dotclk, ds_max_dotclk);
-
-       return max_dotclk;
-}
-
 static int cnl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -636,6 +612,34 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
 }
 
 static enum drm_mode_status
+intel_dp_mode_valid_downstream(struct intel_connector *connector,
+                              const struct drm_display_mode *mode,
+                              int target_clock)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       const struct drm_display_info *info = &connector->base.display_info;
+       int tmds_clock;
+
+       if (intel_dp->dfp.max_dotclock &&
+           target_clock > intel_dp->dfp.max_dotclock)
+               return MODE_CLOCK_HIGH;
+
+       /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
+       tmds_clock = target_clock;
+       if (drm_mode_is_420_only(info, mode))
+               tmds_clock /= 2;
+
+       if (intel_dp->dfp.min_tmds_clock &&
+           tmds_clock < intel_dp->dfp.min_tmds_clock)
+               return MODE_CLOCK_LOW;
+       if (intel_dp->dfp.max_tmds_clock &&
+           tmds_clock > intel_dp->dfp.max_tmds_clock)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
@@ -645,15 +649,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        int target_clock = mode->clock;
        int max_rate, mode_rate, max_lanes, max_link_clock;
-       int max_dotclk;
+       int max_dotclk = dev_priv->max_dotclk_freq;
        u16 dsc_max_output_bpp = 0;
        u8 dsc_slice_count = 0;
+       enum drm_mode_status status;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
-       max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
-
        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
@@ -709,6 +712,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                return MODE_H_ILLEGAL;
 
+       status = intel_dp_mode_valid_downstream(intel_connector,
+                                               mode, target_clock);
+       if (status != MODE_OK)
+               return status;
+
        return intel_mode_valid_max_plane_size(dev_priv, mode);
 }
 
@@ -1563,6 +1571,20 @@ intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
        txbuf[3] = msg->size - 1;
 }
 
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+       /*
+        * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+        * select bit to inform the hardware to send the Aksv after our header
+        * since we can't access that data from software.
+        */
+       if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+           msg->address == DP_AUX_HDCP_AKSV)
+               return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+       return 0;
+}
+
 static ssize_t
 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 {
@@ -1570,6 +1592,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 txbuf[20], rxbuf[20];
        size_t txsize, rxsize;
+       u32 flags = intel_dp_aux_xfer_flags(msg);
        int ret;
 
        intel_dp_aux_header(txbuf, msg);
@@ -1590,7 +1613,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                        memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 
                ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
-                                       rxbuf, rxsize, 0);
+                                       rxbuf, rxsize, flags);
                if (ret > 0) {
                        msg->reply = rxbuf[0] >> 4;
 
@@ -1613,7 +1636,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                        return -E2BIG;
 
                ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
-                                       rxbuf, rxsize, 0);
+                                       rxbuf, rxsize, flags);
                if (ret > 0) {
                        msg->reply = rxbuf[0] >> 4;
                        /*
@@ -1954,19 +1977,72 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
                drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
 }
 
-static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
-                               struct intel_crtc_state *pipe_config)
+static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
+                                  const struct intel_crtc_state *crtc_state)
+{
+       return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+               (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+                intel_dp->dfp.ycbcr_444_to_420);
+}
+
+static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
+                                   const struct intel_crtc_state *crtc_state, int bpc)
+{
+       int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
+
+       if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
+               clock /= 2;
+
+       return clock;
+}
+
+static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state, int bpc)
+{
+       int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
+
+       if (intel_dp->dfp.min_tmds_clock &&
+           tmds_clock < intel_dp->dfp.min_tmds_clock)
+               return false;
+
+       if (intel_dp->dfp.max_tmds_clock &&
+           tmds_clock > intel_dp->dfp.max_tmds_clock)
+               return false;
+
+       return true;
+}
+
+static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
+                                             const struct intel_crtc_state *crtc_state,
+                                             int bpc)
+{
+
+       return intel_hdmi_deep_color_possible(crtc_state, bpc,
+                                             intel_dp->has_hdmi_sink,
+                                             intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
+               intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
+}
+
+static int intel_dp_max_bpp(struct intel_dp *intel_dp,
+                           const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int bpp, bpc;
 
-       bpp = pipe_config->pipe_bpp;
-       bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+       bpc = crtc_state->pipe_bpp / 3;
 
-       if (bpc > 0)
-               bpp = min(bpp, 3*bpc);
+       if (intel_dp->dfp.max_bpc)
+               bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
 
+       if (intel_dp->dfp.min_tmds_clock) {
+               for (; bpc >= 10; bpc -= 2) {
+                       if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
+                               break;
+               }
+       }
+
+       bpp = bpc * 3;
        if (intel_dp_is_edp(intel_dp)) {
                /* Get bpp from vbt only for panels that dont have bpp in edid */
                if (intel_connector->base.display_info.bpc == 0 &&
@@ -2288,7 +2364,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
 
        limits.min_bpp = intel_dp_min_bpp(pipe_config);
-       limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
+       limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
 
        if (intel_dp_is_edp(intel_dp)) {
                /*
@@ -2363,10 +2439,16 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->hw.adjusted_mode;
 
-       if (!drm_mode_is_420_only(info, adjusted_mode) ||
-           !intel_dp_get_colorimetry_status(intel_dp) ||
-           !connector->ycbcr_420_allowed)
+       if (!connector->ycbcr_420_allowed)
+               return 0;
+
+       if (!drm_mode_is_420_only(info, adjusted_mode))
+               return 0;
+
+       if (intel_dp->dfp.ycbcr_444_to_420) {
+               crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
                return 0;
+       }
 
        crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
 
@@ -2575,6 +2657,34 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
                intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
 }
 
+static void
+intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
+                            struct intel_crtc_state *pipe_config,
+                            int output_bpp, bool constant_n)
+{
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       /*
+        * DRRS and PSR can't be enable together, so giving preference to PSR
+        * as it allows more power-savings by complete shutting down display,
+        * so to guarantee this, intel_dp_drrs_compute_config() must be called
+        * after intel_psr_compute_config().
+        */
+       if (pipe_config->has_psr)
+               return;
+
+       if (!intel_connector->panel.downclock_mode ||
+           dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+               return;
+
+       pipe_config->has_drrs = true;
+       intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
+                              intel_connector->panel.downclock_mode->clock,
+                              pipe_config->port_clock, &pipe_config->dp_m2_n2,
+                              constant_n, pipe_config->fec_enable);
+}
+
 int
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config,
@@ -2605,7 +2715,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (ret)
                return ret;
 
-       pipe_config->has_drrs = false;
        if (!intel_dp_port_has_audio(dev_priv, port))
                pipe_config->has_audio = false;
        else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
@@ -2657,21 +2766,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                               &pipe_config->dp_m_n,
                               constant_n, pipe_config->fec_enable);
 
-       if (intel_connector->panel.downclock_mode != NULL &&
-               dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
-                       pipe_config->has_drrs = true;
-                       intel_link_compute_m_n(output_bpp,
-                                              pipe_config->lane_count,
-                                              intel_connector->panel.downclock_mode->clock,
-                                              pipe_config->port_clock,
-                                              &pipe_config->dp_m2_n2,
-                                              constant_n, pipe_config->fec_enable);
-       }
-
        if (!HAS_DDI(dev_priv))
                intel_dp_set_clock(encoder, pipe_config);
 
        intel_psr_compute_config(intel_dp, pipe_config);
+       intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
+                                    constant_n);
        intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
        intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
 
@@ -3752,6 +3852,43 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       u8 tmp;
+
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
+               return;
+
+       if (!drm_dp_is_branch(intel_dp->dpcd))
+               return;
+
+       tmp = intel_dp->has_hdmi_sink ?
+               DP_HDMI_DVI_OUTPUT_CONFIG : 0;
+
+       if (drm_dp_dpcd_writeb(&intel_dp->aux,
+                              DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
+               drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
+                           enableddisabled(intel_dp->has_hdmi_sink));
+
+       tmp = intel_dp->dfp.ycbcr_444_to_420 ?
+               DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+
+       if (drm_dp_dpcd_writeb(&intel_dp->aux,
+                              DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
+               drm_dbg_kms(&i915->drm,
+                           "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
+                           enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
+
+       tmp = 0;
+
+       if (drm_dp_dpcd_writeb(&intel_dp->aux,
+                              DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+               drm_dbg_kms(&i915->drm,
+                           "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
+                           enableddisabled(false));
+}
+
 static void intel_enable_dp(struct intel_atomic_state *state,
                            struct intel_encoder *encoder,
                            const struct intel_crtc_state *pipe_config,
@@ -3789,6 +3926,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
        }
 
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_configure_protocol_converter(intel_dp);
        intel_dp_start_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 
@@ -6028,16 +6166,103 @@ intel_dp_get_edid(struct intel_dp *intel_dp)
 }
 
 static void
+intel_dp_update_dfp(struct intel_dp *intel_dp,
+                   const struct edid *edid)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_connector *connector = intel_dp->attached_connector;
+
+       intel_dp->dfp.max_bpc =
+               drm_dp_downstream_max_bpc(intel_dp->dpcd,
+                                         intel_dp->downstream_ports, edid);
+
+       intel_dp->dfp.max_dotclock =
+               drm_dp_downstream_max_dotclock(intel_dp->dpcd,
+                                              intel_dp->downstream_ports);
+
+       intel_dp->dfp.min_tmds_clock =
+               drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
+                                                intel_dp->downstream_ports,
+                                                edid);
+       intel_dp->dfp.max_tmds_clock =
+               drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
+                                                intel_dp->downstream_ports,
+                                                edid);
+
+       drm_dbg_kms(&i915->drm,
+                   "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+                   connector->base.base.id, connector->base.name,
+                   intel_dp->dfp.max_bpc,
+                   intel_dp->dfp.max_dotclock,
+                   intel_dp->dfp.min_tmds_clock,
+                   intel_dp->dfp.max_tmds_clock);
+}
+
+static void
+intel_dp_update_420(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_connector *connector = intel_dp->attached_connector;
+       bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+
+       /* No YCbCr output support on gmch platforms */
+       if (HAS_GMCH(i915))
+               return;
+
+       /*
+        * ILK doesn't seem capable of DP YCbCr output. The
+        * displayed image is severly corrupted. SNB+ is fine.
+        */
+       if (IS_GEN(i915, 5))
+               return;
+
+       is_branch = drm_dp_is_branch(intel_dp->dpcd);
+       ycbcr_420_passthrough =
+               drm_dp_downstream_420_passthrough(intel_dp->dpcd,
+                                                 intel_dp->downstream_ports);
+       ycbcr_444_to_420 =
+               drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
+                                                       intel_dp->downstream_ports);
+
+       if (INTEL_GEN(i915) >= 11) {
+               /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
+               intel_dp->dfp.ycbcr_444_to_420 =
+                       ycbcr_444_to_420 && !ycbcr_420_passthrough;
+
+               connector->base.ycbcr_420_allowed =
+                       !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+       } else {
+               /* 4:4:4->4:2:0 conversion is the only way */
+               intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
+
+               connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
+       }
+
+       drm_dbg_kms(&i915->drm,
+                   "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+                   connector->base.base.id, connector->base.name,
+                   yesno(connector->base.ycbcr_420_allowed),
+                   yesno(intel_dp->dfp.ycbcr_444_to_420));
+}
+
+static void
 intel_dp_set_edid(struct intel_dp *intel_dp)
 {
-       struct intel_connector *intel_connector = intel_dp->attached_connector;
+       struct intel_connector *connector = intel_dp->attached_connector;
        struct edid *edid;
 
        intel_dp_unset_edid(intel_dp);
        edid = intel_dp_get_edid(intel_dp);
-       intel_connector->detect_edid = edid;
+       connector->detect_edid = edid;
+
+       intel_dp_update_dfp(intel_dp, edid);
+       intel_dp_update_420(intel_dp);
+
+       if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+               intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+               intel_dp->has_audio = drm_detect_monitor_audio(edid);
+       }
 
-       intel_dp->has_audio = drm_detect_monitor_audio(edid);
        drm_dp_cec_set_edid(&intel_dp->aux, edid);
        intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
 }
@@ -6045,14 +6270,23 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
 static void
 intel_dp_unset_edid(struct intel_dp *intel_dp)
 {
-       struct intel_connector *intel_connector = intel_dp->attached_connector;
+       struct intel_connector *connector = intel_dp->attached_connector;
 
        drm_dp_cec_unset_edid(&intel_dp->aux);
-       kfree(intel_connector->detect_edid);
-       intel_connector->detect_edid = NULL;
+       kfree(connector->detect_edid);
+       connector->detect_edid = NULL;
 
+       intel_dp->has_hdmi_sink = false;
        intel_dp->has_audio = false;
        intel_dp->edid_quirks = 0;
+
+       intel_dp->dfp.max_bpc = 0;
+       intel_dp->dfp.max_dotclock = 0;
+       intel_dp->dfp.min_tmds_clock = 0;
+       intel_dp->dfp.max_tmds_clock = 0;
+
+       intel_dp->dfp.ycbcr_444_to_420 = false;
+       connector->base.ycbcr_420_allowed = false;
 }
 
 static int
@@ -6071,6 +6305,9 @@ intel_dp_detect(struct drm_connector *connector,
        drm_WARN_ON(&dev_priv->drm,
                    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
+       if (!INTEL_DISPLAY_ENABLED(dev_priv))
+               return connector_status_disconnected;
+
        /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
                status = edp_detect(intel_dp);
@@ -6211,7 +6448,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        }
 
        /* if eDP has no EDID, fall back to fixed mode */
-       if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
+       if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
            intel_connector->panel.fixed_mode) {
                struct drm_display_mode *mode;
 
@@ -6223,6 +6460,19 @@ static int intel_dp_get_modes(struct drm_connector *connector)
                }
        }
 
+       if (!edid) {
+               struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+               struct drm_display_mode *mode;
+
+               mode = drm_dp_downstream_mode(connector->dev,
+                                             intel_dp->dpcd,
+                                             intel_dp->downstream_ports);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       return 1;
+               }
+       }
+
        return 0;
 }
 
@@ -6308,628 +6558,6 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
                edp_panel_vdd_off_sync(intel_dp);
 }
 
-static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
-{
-       long ret;
-
-#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
-       ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
-                                              msecs_to_jiffies(timeout));
-
-       if (!ret)
-               DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
-}
-
-static
-int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
-                               u8 *an)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
-       static const struct drm_dp_aux_msg msg = {
-               .request = DP_AUX_NATIVE_WRITE,
-               .address = DP_AUX_HDCP_AKSV,
-               .size = DRM_HDCP_KSV_LEN,
-       };
-       u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
-       ssize_t dpcd_ret;
-       int ret;
-
-       /* Output An first, that's easy */
-       dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
-                                    an, DRM_HDCP_AN_LEN);
-       if (dpcd_ret != DRM_HDCP_AN_LEN) {
-               drm_dbg_kms(&i915->drm,
-                           "Failed to write An over DP/AUX (%zd)\n",
-                           dpcd_ret);
-               return dpcd_ret >= 0 ? -EIO : dpcd_ret;
-       }
-
-       /*
-        * Since Aksv is Oh-So-Secret, we can't access it in software. So in
-        * order to get it on the wire, we need to create the AUX header as if
-        * we were writing the data, and then tickle the hardware to output the
-        * data once the header is sent out.
-        */
-       intel_dp_aux_header(txbuf, &msg);
-
-       ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
-                               rxbuf, sizeof(rxbuf),
-                               DP_AUX_CH_CTL_AUX_AKSV_SELECT);
-       if (ret < 0) {
-               drm_dbg_kms(&i915->drm,
-                           "Write Aksv over DP/AUX failed (%d)\n", ret);
-               return ret;
-       } else if (ret == 0) {
-               drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
-               return -EIO;
-       }
-
-       reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
-       if (reply != DP_AUX_NATIVE_REPLY_ACK) {
-               drm_dbg_kms(&i915->drm,
-                           "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
-                           reply);
-               return -EIO;
-       }
-       return 0;
-}
-
-static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
-                                  u8 *bksv)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
-                              DRM_HDCP_KSV_LEN);
-       if (ret != DRM_HDCP_KSV_LEN) {
-               drm_dbg_kms(&i915->drm,
-                           "Read Bksv from DP/AUX failed (%zd)\n", ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-       return 0;
-}
-
-static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
-                                     u8 *bstatus)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       /*
-        * For some reason the HDMI and DP HDCP specs call this register
-        * definition by different names. In the HDMI spec, it's called BSTATUS,
-        * but in DP it's called BINFO.
-        */
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
-                              bstatus, DRM_HDCP_BSTATUS_LEN);
-       if (ret != DRM_HDCP_BSTATUS_LEN) {
-               drm_dbg_kms(&i915->drm,
-                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-       return 0;
-}
-
-static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
-                            u8 *bcaps)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
-                              bcaps, 1);
-       if (ret != 1) {
-               drm_dbg_kms(&i915->drm,
-                           "Read bcaps from DP/AUX failed (%zd)\n", ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-
-       return 0;
-}
-
-static
-int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
-                                  bool *repeater_present)
-{
-       ssize_t ret;
-       u8 bcaps;
-
-       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
-       if (ret)
-               return ret;
-
-       *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
-       return 0;
-}
-
-static
-int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
-                               u8 *ri_prime)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
-                              ri_prime, DRM_HDCP_RI_LEN);
-       if (ret != DRM_HDCP_RI_LEN) {
-               drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
-                           ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-       return 0;
-}
-
-static
-int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
-                                bool *ksv_ready)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-       u8 bstatus;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
-                              &bstatus, 1);
-       if (ret != 1) {
-               drm_dbg_kms(&i915->drm,
-                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-       *ksv_ready = bstatus & DP_BSTATUS_READY;
-       return 0;
-}
-
-static
-int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
-                               int num_downstream, u8 *ksv_fifo)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-       int i;
-
-       /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
-       for (i = 0; i < num_downstream; i += 3) {
-               size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
-               ret = drm_dp_dpcd_read(&dig_port->dp.aux,
-                                      DP_AUX_HDCP_KSV_FIFO,
-                                      ksv_fifo + i * DRM_HDCP_KSV_LEN,
-                                      len);
-               if (ret != len) {
-                       drm_dbg_kms(&i915->drm,
-                                   "Read ksv[%d] from DP/AUX failed (%zd)\n",
-                                   i, ret);
-                       return ret >= 0 ? -EIO : ret;
-               }
-       }
-       return 0;
-}
-
-static
-int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
-                                   int i, u32 *part)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
-               return -EINVAL;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
-                              DP_AUX_HDCP_V_PRIME(i), part,
-                              DRM_HDCP_V_PRIME_PART_LEN);
-       if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
-               drm_dbg_kms(&i915->drm,
-                           "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-       return 0;
-}
-
-static
-int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
-                                   bool enable)
-{
-       /* Not used for single stream DisplayPort setups */
-       return 0;
-}
-
-static
-bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-       u8 bstatus;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
-                              &bstatus, 1);
-       if (ret != 1) {
-               drm_dbg_kms(&i915->drm,
-                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
-               return false;
-       }
-
-       return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
-}
-
-static
-int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
-                         bool *hdcp_capable)
-{
-       ssize_t ret;
-       u8 bcaps;
-
-       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
-       if (ret)
-               return ret;
-
-       *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
-       return 0;
-}
-
-struct hdcp2_dp_errata_stream_type {
-       u8      msg_id;
-       u8      stream_type;
-} __packed;
-
-struct hdcp2_dp_msg_data {
-       u8 msg_id;
-       u32 offset;
-       bool msg_detectable;
-       u32 timeout;
-       u32 timeout2; /* Added for non_paired situation */
-};
-
-static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
-       { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
-       { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
-         false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
-       { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
-         false, 0, 0 },
-       { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
-         false, 0, 0 },
-       { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
-         true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
-         HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
-       { HDCP_2_2_AKE_SEND_PAIRING_INFO,
-         DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
-         HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
-       { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
-       { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
-         false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
-       { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
-         0, 0 },
-       { HDCP_2_2_REP_SEND_RECVID_LIST,
-         DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
-         HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
-       { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
-         0, 0 },
-       { HDCP_2_2_REP_STREAM_MANAGE,
-         DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
-         0, 0 },
-       { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
-         false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
-/* local define to shovel this through the write_2_2 interface */
-#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
-       { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
-         DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
-         0, 0 },
-};
-
-static int
-intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
-                             u8 *rx_status)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       ssize_t ret;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
-                              DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
-                              HDCP_2_2_DP_RXSTATUS_LEN);
-       if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
-               drm_dbg_kms(&i915->drm,
-                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
-               return ret >= 0 ? -EIO : ret;
-       }
-
-       return 0;
-}
-
-static
-int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
-                                 u8 msg_id, bool *msg_ready)
-{
-       u8 rx_status;
-       int ret;
-
-       *msg_ready = false;
-       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
-       if (ret < 0)
-               return ret;
-
-       switch (msg_id) {
-       case HDCP_2_2_AKE_SEND_HPRIME:
-               if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
-                       *msg_ready = true;
-               break;
-       case HDCP_2_2_AKE_SEND_PAIRING_INFO:
-               if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
-                       *msg_ready = true;
-               break;
-       case HDCP_2_2_REP_SEND_RECVID_LIST:
-               if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
-                       *msg_ready = true;
-               break;
-       default:
-               DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t
-intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
-                           const struct hdcp2_dp_msg_data *hdcp2_msg_data)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct intel_dp *dp = &dig_port->dp;
-       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
-       u8 msg_id = hdcp2_msg_data->msg_id;
-       int ret, timeout;
-       bool msg_ready = false;
-
-       if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
-               timeout = hdcp2_msg_data->timeout2;
-       else
-               timeout = hdcp2_msg_data->timeout;
-
-       /*
-        * There is no way to detect the CERT, LPRIME and STREAM_READY
-        * availability. So Wait for timeout and read the msg.
-        */
-       if (!hdcp2_msg_data->msg_detectable) {
-               mdelay(timeout);
-               ret = 0;
-       } else {
-               /*
-                * As we want to check the msg availability at timeout, Ignoring
-                * the timeout at wait for CP_IRQ.
-                */
-               intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
-               ret = hdcp2_detect_msg_availability(dig_port,
-                                                   msg_id, &msg_ready);
-               if (!msg_ready)
-                       ret = -ETIMEDOUT;
-       }
-
-       if (ret)
-               drm_dbg_kms(&i915->drm,
-                           "msg_id %d, ret %d, timeout(mSec): %d\n",
-                           hdcp2_msg_data->msg_id, ret, timeout);
-
-       return ret;
-}
-
-static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
-               if (hdcp2_dp_msg_data[i].msg_id == msg_id)
-                       return &hdcp2_dp_msg_data[i];
-
-       return NULL;
-}
-
-static
-int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
-                            void *buf, size_t size)
-{
-       struct intel_dp *dp = &dig_port->dp;
-       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
-       unsigned int offset;
-       u8 *byte = buf;
-       ssize_t ret, bytes_to_write, len;
-       const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-
-       hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
-       if (!hdcp2_msg_data)
-               return -EINVAL;
-
-       offset = hdcp2_msg_data->offset;
-
-       /* No msg_id in DP HDCP2.2 msgs */
-       bytes_to_write = size - 1;
-       byte++;
-
-       hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
-
-       while (bytes_to_write) {
-               len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
-                               DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
-
-               ret = drm_dp_dpcd_write(&dig_port->dp.aux,
-                                       offset, (void *)byte, len);
-               if (ret < 0)
-                       return ret;
-
-               bytes_to_write -= ret;
-               byte += ret;
-               offset += ret;
-       }
-
-       return size;
-}
-
-static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
-{
-       u8 rx_info[HDCP_2_2_RXINFO_LEN];
-       u32 dev_cnt;
-       ssize_t ret;
-
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
-                              DP_HDCP_2_2_REG_RXINFO_OFFSET,
-                              (void *)rx_info, HDCP_2_2_RXINFO_LEN);
-       if (ret != HDCP_2_2_RXINFO_LEN)
-               return ret >= 0 ? -EIO : ret;
-
-       dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
-                  HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
-
-       if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
-               dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
-
-       ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
-               HDCP_2_2_RECEIVER_IDS_MAX_LEN +
-               (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
-
-       return ret;
-}
-
-static
-int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
-                           u8 msg_id, void *buf, size_t size)
-{
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       unsigned int offset;
-       u8 *byte = buf;
-       ssize_t ret, bytes_to_recv, len;
-       const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-
-       hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
-       if (!hdcp2_msg_data)
-               return -EINVAL;
-       offset = hdcp2_msg_data->offset;
-
-       ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
-       if (ret < 0)
-               return ret;
-
-       if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
-               ret = get_receiver_id_list_size(dig_port);
-               if (ret < 0)
-                       return ret;
-
-               size = ret;
-       }
-       bytes_to_recv = size - 1;
-
-       /* DP adaptation msgs has no msg_id */
-       byte++;
-
-       while (bytes_to_recv) {
-               len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
-                     DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
-
-               ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
-                                      (void *)byte, len);
-               if (ret < 0) {
-                       drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
-                                   msg_id, ret);
-                       return ret;
-               }
-
-               bytes_to_recv -= ret;
-               byte += ret;
-               offset += ret;
-       }
-       byte = buf;
-       *byte = msg_id;
-
-       return size;
-}
-
-static
-int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
-                                     bool is_repeater, u8 content_type)
-{
-       int ret;
-       struct hdcp2_dp_errata_stream_type stream_type_msg;
-
-       if (is_repeater)
-               return 0;
-
-       /*
-        * Errata for DP: As Stream type is used for encryption, Receiver
-        * should be communicated with stream type for the decryption of the
-        * content.
-        * Repeater will be communicated with stream type as a part of it's
-        * auth later in time.
-        */
-       stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
-       stream_type_msg.stream_type = content_type;
-
-       ret =  intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
-                                       sizeof(stream_type_msg));
-
-       return ret < 0 ? ret : 0;
-
-}
-
-static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
-{
-       u8 rx_status;
-       int ret;
-
-       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
-       if (ret)
-               return ret;
-
-       if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
-               ret = HDCP_REAUTH_REQUEST;
-       else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
-               ret = HDCP_LINK_INTEGRITY_FAILURE;
-       else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
-               ret = HDCP_TOPOLOGY_CHANGE;
-
-       return ret;
-}
-
-static
-int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
-                          bool *capable)
-{
-       u8 rx_caps[3];
-       int ret;
-
-       *capable = false;
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
-                              DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
-                              rx_caps, HDCP_2_2_RXCAPS_LEN);
-       if (ret != HDCP_2_2_RXCAPS_LEN)
-               return ret >= 0 ? -EIO : ret;
-
-       if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
-           HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
-               *capable = true;
-
-       return 0;
-}
-
-static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
-       .write_an_aksv = intel_dp_hdcp_write_an_aksv,
-       .read_bksv = intel_dp_hdcp_read_bksv,
-       .read_bstatus = intel_dp_hdcp_read_bstatus,
-       .repeater_present = intel_dp_hdcp_repeater_present,
-       .read_ri_prime = intel_dp_hdcp_read_ri_prime,
-       .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
-       .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
-       .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
-       .toggle_signalling = intel_dp_hdcp_toggle_signalling,
-       .check_link = intel_dp_hdcp_check_link,
-       .hdcp_capable = intel_dp_hdcp_capable,
-       .write_2_2_msg = intel_dp_hdcp2_write_msg,
-       .read_2_2_msg = intel_dp_hdcp2_read_msg,
-       .config_stream_type = intel_dp_hdcp2_config_stream_type,
-       .check_2_2_link = intel_dp_hdcp2_check_link,
-       .hdcp_2_2_capable = intel_dp_hdcp2_capable,
-       .protocol = HDCP_PROTOCOL_DP,
-};
-
 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -7640,6 +7268,15 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
                    refresh_rate);
 }
 
+static void
+intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       dev_priv->drrs.busy_frontbuffer_bits = 0;
+       dev_priv->drrs.dp = intel_dp;
+}
+
 /**
  * intel_edp_drrs_enable - init drrs struct if supported
  * @intel_dp: DP struct
@@ -7652,31 +7289,40 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
-       if (!crtc_state->has_drrs) {
-               drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
+       if (!crtc_state->has_drrs)
                return;
-       }
 
-       if (dev_priv->psr.enabled) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR enabled. Not enabling DRRS.\n");
-               return;
-       }
+       drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
 
        mutex_lock(&dev_priv->drrs.mutex);
+
        if (dev_priv->drrs.dp) {
-               drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
+               drm_warn(&dev_priv->drm, "DRRS already enabled\n");
                goto unlock;
        }
 
-       dev_priv->drrs.busy_frontbuffer_bits = 0;
-
-       dev_priv->drrs.dp = intel_dp;
+       intel_edp_drrs_enable_locked(intel_dp);
 
 unlock:
        mutex_unlock(&dev_priv->drrs.mutex);
 }
 
+static void
+intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
+                             const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+               int refresh;
+
+               refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
+               intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
+       }
+
+       dev_priv->drrs.dp = NULL;
+}
+
 /**
  * intel_edp_drrs_disable - Disable DRRS
  * @intel_dp: DP struct
@@ -7697,16 +7343,45 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp,
                return;
        }
 
-       if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv, old_crtc_state,
-                       drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
-       dev_priv->drrs.dp = NULL;
+       intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
        mutex_unlock(&dev_priv->drrs.mutex);
 
        cancel_delayed_work_sync(&dev_priv->drrs.work);
 }
 
+/**
+ * intel_edp_drrs_update - Update DRRS state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This function will update DRRS states, disabling or enabling DRRS when
+ * executing fastsets. For full modeset, intel_edp_drrs_disable() and
+ * intel_edp_drrs_enable() should be called instead.
+ */
+void
+intel_edp_drrs_update(struct intel_dp *intel_dp,
+                     const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+               return;
+
+       mutex_lock(&dev_priv->drrs.mutex);
+
+       /* New state matches current one? */
+       if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
+               goto unlock;
+
+       if (crtc_state->has_drrs)
+               intel_edp_drrs_enable_locked(intel_dp);
+       else
+               intel_edp_drrs_disable_locked(intel_dp, crtc_state);
+
+unlock:
+       mutex_unlock(&dev_priv->drrs.mutex);
+}
+
 static void intel_edp_drrs_downclock_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
@@ -8138,10 +7813,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
-       if (INTEL_GEN(dev_priv) >= 11)
-               connector->ycbcr_420_allowed = true;
-
-       intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
        intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 
        intel_dp_aux_init(intel_dp);
@@ -8166,7 +7837,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
        intel_dp_add_properties(intel_dp, connector);
 
        if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
-               int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
+               int ret = intel_dp_init_hdcp(dig_port, intel_connector);
                if (ret)
                        drm_dbg_kms(&dev_priv->drm,
                                    "HDCP init failed, skipping.\n");
@@ -8210,6 +7881,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        intel_encoder = &dig_port->base;
        encoder = &intel_encoder->base;
 
+       mutex_init(&dig_port->hdcp_mutex);
+
        if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
                             &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
                             "DP %c", port_name(port)))
@@ -8284,6 +7957,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        }
        intel_encoder->cloneable = 0;
        intel_encoder->port = port;
+       intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
 
        dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
index 0a3af34..08a1c0a 100644 (file)
@@ -17,6 +17,7 @@ struct drm_encoder;
 struct drm_i915_private;
 struct drm_modeset_acquire_ctx;
 struct drm_dp_vsc_sdp;
+struct intel_atomic_state;
 struct intel_connector;
 struct intel_crtc_state;
 struct intel_digital_port;
@@ -50,6 +51,7 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           const struct intel_crtc_state *crtc_state,
                                           bool enable);
@@ -81,6 +83,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
                           const struct intel_crtc_state *crtc_state);
 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
                            const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_update(struct intel_dp *intel_dp,
+                          const struct intel_crtc_state *crtc_state);
 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
                               unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
@@ -127,4 +131,12 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 
 u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
 
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+                          struct intel_encoder *encoder,
+                          const struct intel_crtc_state *crtc_state,
+                          const struct drm_connector_state *conn_state);
+
+int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
+                      struct intel_connector *intel_connector);
+
 #endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
new file mode 100644 (file)
index 0000000..03424d2
--- /dev/null
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_hdcp.h>
+#include <drm/drm_print.h>
+
+#include "intel_display_types.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_hdcp.h"
+
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+       long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+       ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+                                              msecs_to_jiffies(timeout));
+
+       if (!ret)
+               DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
+static
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
+                               u8 *an)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       u8 aksv[DRM_HDCP_KSV_LEN] = {};
+       ssize_t dpcd_ret;
+
+       /* Output An first, that's easy */
+       dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
+                                    an, DRM_HDCP_AN_LEN);
+       if (dpcd_ret != DRM_HDCP_AN_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Failed to write An over DP/AUX (%zd)\n",
+                           dpcd_ret);
+               return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+       }
+
+       /*
+        * Since Aksv is Oh-So-Secret, we can't access it in software. So we
+        * send an empty buffer of the correct length through the DP helpers. On
+        * the other side, in the transfer hook, we'll generate a flag based on
+        * the destination address which will tickle the hardware to output the
+        * Aksv on our behalf after the header is sent.
+        */
+       dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV,
+                                    aksv, DRM_HDCP_KSV_LEN);
+       if (dpcd_ret != DRM_HDCP_KSV_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Failed to write Aksv over DP/AUX (%zd)\n",
+                           dpcd_ret);
+               return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+       }
+       return 0;
+}
+
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
+                                  u8 *bksv)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+                              DRM_HDCP_KSV_LEN);
+       if (ret != DRM_HDCP_KSV_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Read Bksv from DP/AUX failed (%zd)\n", ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+       return 0;
+}
+
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
+                                     u8 *bstatus)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       /*
+        * For some reason the HDMI and DP HDCP specs call this register
+        * definition by different names. In the HDMI spec, it's called BSTATUS,
+        * but in DP it's called BINFO.
+        */
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+                              bstatus, DRM_HDCP_BSTATUS_LEN);
+       if (ret != DRM_HDCP_BSTATUS_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+       return 0;
+}
+
+static
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
+                            u8 *bcaps)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+                              bcaps, 1);
+       if (ret != 1) {
+               drm_dbg_kms(&i915->drm,
+                           "Read bcaps from DP/AUX failed (%zd)\n", ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+
+       return 0;
+}
+
+static
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
+                                  bool *repeater_present)
+{
+       ssize_t ret;
+       u8 bcaps;
+
+       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+       if (ret)
+               return ret;
+
+       *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
+       return 0;
+}
+
+static
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
+                               u8 *ri_prime)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+                              ri_prime, DRM_HDCP_RI_LEN);
+       if (ret != DRM_HDCP_RI_LEN) {
+               drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+                           ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+       return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
+                                bool *ksv_ready)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+       u8 bstatus;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+                              &bstatus, 1);
+       if (ret != 1) {
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+       *ksv_ready = bstatus & DP_BSTATUS_READY;
+       return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
+                               int num_downstream, u8 *ksv_fifo)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+       int i;
+
+       /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
+       for (i = 0; i < num_downstream; i += 3) {
+               size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
+               ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+                                      DP_AUX_HDCP_KSV_FIFO,
+                                      ksv_fifo + i * DRM_HDCP_KSV_LEN,
+                                      len);
+               if (ret != len) {
+                       drm_dbg_kms(&i915->drm,
+                                   "Read ksv[%d] from DP/AUX failed (%zd)\n",
+                                   i, ret);
+                       return ret >= 0 ? -EIO : ret;
+               }
+       }
+       return 0;
+}
+
+static
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
+                                   int i, u32 *part)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+               return -EINVAL;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+                              DP_AUX_HDCP_V_PRIME(i), part,
+                              DRM_HDCP_V_PRIME_PART_LEN);
+       if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+       return 0;
+}
+
+static
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+                                   enum transcoder cpu_transcoder,
+                                   bool enable)
+{
+       /* Not used for single stream DisplayPort setups */
+       return 0;
+}
+
+static
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
+                             struct intel_connector *connector)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+       u8 bstatus;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+                              &bstatus, 1);
+       if (ret != 1) {
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
+               return false;
+       }
+
+       return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
+}
+
+static
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
+                         bool *hdcp_capable)
+{
+       ssize_t ret;
+       u8 bcaps;
+
+       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+       if (ret)
+               return ret;
+
+       *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
+       return 0;
+}
+
+struct hdcp2_dp_errata_stream_type {
+       u8      msg_id;
+       u8      stream_type;
+} __packed;
+
+struct hdcp2_dp_msg_data {
+       u8 msg_id;
+       u32 offset;
+       bool msg_detectable;
+       u32 timeout;
+       u32 timeout2; /* Added for non_paired situation */
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+       { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+       { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+         false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+       { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+         false, 0, 0 },
+       { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+         false, 0, 0 },
+       { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+         true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+         HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+       { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+         DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+         HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+       { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+       { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+         false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+       { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+         0, 0 },
+       { HDCP_2_2_REP_SEND_RECVID_LIST,
+         DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+         HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+       { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+         0, 0 },
+       { HDCP_2_2_REP_STREAM_MANAGE,
+         DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+         0, 0 },
+       { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+         false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+       { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+         DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+         0, 0 },
+};
+
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
+                             u8 *rx_status)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       ssize_t ret;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+                              DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+                              HDCP_2_2_DP_RXSTATUS_LEN);
+       if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+               drm_dbg_kms(&i915->drm,
+                           "Read bstatus from DP/AUX failed (%zd)\n", ret);
+               return ret >= 0 ? -EIO : ret;
+       }
+
+       return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
+                                 u8 msg_id, bool *msg_ready)
+{
+       u8 rx_status;
+       int ret;
+
+       *msg_ready = false;
+       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+       if (ret < 0)
+               return ret;
+
+       switch (msg_id) {
+       case HDCP_2_2_AKE_SEND_HPRIME:
+               if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+                       *msg_ready = true;
+               break;
+       case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+               if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+                       *msg_ready = true;
+               break;
+       case HDCP_2_2_REP_SEND_RECVID_LIST:
+               if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+                       *msg_ready = true;
+               break;
+       default:
+               DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
+                           const struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+       u8 msg_id = hdcp2_msg_data->msg_id;
+       int ret, timeout;
+       bool msg_ready = false;
+
+       if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+               timeout = hdcp2_msg_data->timeout2;
+       else
+               timeout = hdcp2_msg_data->timeout;
+
+       /*
+        * There is no way to detect the CERT, LPRIME and STREAM_READY
+        * availability. So Wait for timeout and read the msg.
+        */
+       if (!hdcp2_msg_data->msg_detectable) {
+               mdelay(timeout);
+               ret = 0;
+       } else {
+               /*
+                * As we want to check the msg availability at timeout, Ignoring
+                * the timeout at wait for CP_IRQ.
+                */
+               intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+               ret = hdcp2_detect_msg_availability(dig_port,
+                                                   msg_id, &msg_ready);
+               if (!msg_ready)
+                       ret = -ETIMEDOUT;
+       }
+
+       if (ret)
+               drm_dbg_kms(&i915->drm,
+                           "msg_id %d, ret %d, timeout(mSec): %d\n",
+                           hdcp2_msg_data->msg_id, ret, timeout);
+
+       return ret;
+}
+
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+               if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+                       return &hdcp2_dp_msg_data[i];
+
+       return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
+                            void *buf, size_t size)
+{
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+       unsigned int offset;
+       u8 *byte = buf;
+       ssize_t ret, bytes_to_write, len;
+       const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+       hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+       if (!hdcp2_msg_data)
+               return -EINVAL;
+
+       offset = hdcp2_msg_data->offset;
+
+       /* No msg_id in DP HDCP2.2 msgs */
+       bytes_to_write = size - 1;
+       byte++;
+
+       hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+       while (bytes_to_write) {
+               len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+                               DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+               ret = drm_dp_dpcd_write(&dig_port->dp.aux,
+                                       offset, (void *)byte, len);
+               if (ret < 0)
+                       return ret;
+
+               bytes_to_write -= ret;
+               byte += ret;
+               offset += ret;
+       }
+
+       return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
+{
+       u8 rx_info[HDCP_2_2_RXINFO_LEN];
+       u32 dev_cnt;
+       ssize_t ret;
+
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+                              DP_HDCP_2_2_REG_RXINFO_OFFSET,
+                              (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+       if (ret != HDCP_2_2_RXINFO_LEN)
+               return ret >= 0 ? -EIO : ret;
+
+       dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+                  HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+       if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+               dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+       ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+               HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+               (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+       return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
+                           u8 msg_id, void *buf, size_t size)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       unsigned int offset;
+       u8 *byte = buf;
+       ssize_t ret, bytes_to_recv, len;
+       const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+       hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+       if (!hdcp2_msg_data)
+               return -EINVAL;
+       offset = hdcp2_msg_data->offset;
+
+       ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
+       if (ret < 0)
+               return ret;
+
+       if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+               ret = get_receiver_id_list_size(dig_port);
+               if (ret < 0)
+                       return ret;
+
+               size = ret;
+       }
+       bytes_to_recv = size - 1;
+
+       /* DP adaptation msgs has no msg_id */
+       byte++;
+
+       while (bytes_to_recv) {
+               len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+                     DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+               ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
+                                      (void *)byte, len);
+               if (ret < 0) {
+                       drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+                                   msg_id, ret);
+                       return ret;
+               }
+
+               bytes_to_recv -= ret;
+               byte += ret;
+               offset += ret;
+       }
+       byte = buf;
+       *byte = msg_id;
+
+       return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
+                                     bool is_repeater, u8 content_type)
+{
+       int ret;
+       struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+       if (is_repeater)
+               return 0;
+
+       /*
+        * Errata for DP: As Stream type is used for encryption, Receiver
+        * should be communicated with stream type for the decryption of the
+        * content.
+        * Repeater will be communicated with stream type as a part of it's
+        * auth later in time.
+        */
+       stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+       stream_type_msg.stream_type = content_type;
+
+       ret =  intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
+                                       sizeof(stream_type_msg));
+
+       return ret < 0 ? ret : 0;
+
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+{
+       u8 rx_status;
+       int ret;
+
+       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+       if (ret)
+               return ret;
+
+       if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+               ret = HDCP_REAUTH_REQUEST;
+       else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+               ret = HDCP_LINK_INTEGRITY_FAILURE;
+       else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+               ret = HDCP_TOPOLOGY_CHANGE;
+
+       return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
+                          bool *capable)
+{
+       u8 rx_caps[3];
+       int ret;
+
+       *capable = false;
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+                              DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+                              rx_caps, HDCP_2_2_RXCAPS_LEN);
+       if (ret != HDCP_2_2_RXCAPS_LEN)
+               return ret >= 0 ? -EIO : ret;
+
+       if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+           HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+               *capable = true;
+
+       return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
+       .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+       .read_bksv = intel_dp_hdcp_read_bksv,
+       .read_bstatus = intel_dp_hdcp_read_bstatus,
+       .repeater_present = intel_dp_hdcp_repeater_present,
+       .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+       .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+       .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+       .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+       .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+       .check_link = intel_dp_hdcp_check_link,
+       .hdcp_capable = intel_dp_hdcp_capable,
+       .write_2_2_msg = intel_dp_hdcp2_write_msg,
+       .read_2_2_msg = intel_dp_hdcp2_read_msg,
+       .config_stream_type = intel_dp_hdcp2_config_stream_type,
+       .check_2_2_link = intel_dp_hdcp2_check_link,
+       .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+       .protocol = HDCP_PROTOCOL_DP,
+};
+
+static int
+intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+                                   enum transcoder cpu_transcoder,
+                                   bool enable)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       int ret;
+
+       if (!enable)
+               usleep_range(6, 60); /* Bspec says >= 6us */
+
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
+                                              cpu_transcoder, enable);
+       if (ret)
+               drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
+                             enable ? "Enable" : "Disable", ret);
+       return ret;
+}
+
+static
+bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
+                                 struct intel_connector *connector)
+{
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *intel_dp = &dig_port->dp;
+       struct drm_dp_query_stream_enc_status_ack_reply reply;
+       int ret;
+
+       if (!intel_dp_hdcp_check_link(dig_port, connector))
+               return false;
+
+       ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
+                                                 connector->port, &reply);
+       if (ret) {
+               drm_dbg_kms(&i915->drm,
+                           "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
+                           connector->base.base.id, connector->base.name, ret);
+               return false;
+       }
+
+       return reply.auth_completed && reply.encryption_enabled;
+}
+
+static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
+       .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+       .read_bksv = intel_dp_hdcp_read_bksv,
+       .read_bstatus = intel_dp_hdcp_read_bstatus,
+       .repeater_present = intel_dp_hdcp_repeater_present,
+       .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+       .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+       .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+       .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+       .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
+       .check_link = intel_dp_mst_hdcp_check_link,
+       .hdcp_capable = intel_dp_hdcp_capable,
+
+       .protocol = HDCP_PROTOCOL_DP,
+};
+
+int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
+                      struct intel_connector *intel_connector)
+{
+       struct drm_device *dev = intel_connector->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_encoder *intel_encoder = &dig_port->base;
+       enum port port = intel_encoder->port;
+       struct intel_dp *intel_dp = &dig_port->dp;
+
+       if (!is_hdcp_supported(dev_priv, port))
+               return 0;
+
+       if (intel_connector->mst_port)
+               return intel_hdcp_init(intel_connector, port,
+                                      &intel_dp_mst_hdcp_shim);
+       else if (!intel_dp_is_edp(intel_dp))
+               return intel_hdcp_init(intel_connector, port,
+                                      &intel_dp_hdcp_shim);
+
+       return 0;
+}
index a2d91a4..64d8855 100644 (file)
@@ -37,6 +37,7 @@
 #include "intel_dp.h"
 #include "intel_dp_mst.h"
 #include "intel_dpio_phy.h"
+#include "intel_hdcp.h"
 
 static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
                                            struct intel_crtc_state *crtc_state,
@@ -352,6 +353,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
        drm_dbg_kms(&i915->drm, "active links %d\n",
                    intel_dp->active_mst_links);
 
+       intel_hdcp_disable(intel_mst->connector);
+
        drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -556,6 +559,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
 
        if (pipe_config->has_audio)
                intel_audio_codec_enable(encoder, pipe_config, conn_state);
+
+       /* Enable hdcp if it's desired */
+       if (conn_state->content_protection ==
+           DRM_MODE_CONTENT_PROTECTION_DESIRED)
+               intel_hdcp_enable(to_intel_connector(conn_state->connector),
+                                 pipe_config->cpu_transcoder,
+                                 (u8)conn_state->hdcp_content_type);
 }
 
 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -709,9 +719,13 @@ static int
 intel_dp_mst_detect(struct drm_connector *connector,
                    struct drm_modeset_acquire_ctx *ctx, bool force)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
 
+       if (!INTEL_DISPLAY_ENABLED(i915))
+               return connector_status_disconnected;
+
        if (drm_connector_is_unregistered(connector))
                return connector_status_disconnected;
 
@@ -799,6 +813,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
 
+
+       /* TODO: Figure out how to make HDCP work on GEN12+ */
+       if (INTEL_GEN(dev_priv) < 12) {
+               ret = intel_dp_init_hdcp(dig_port, intel_connector);
+               if (ret)
+                       DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+       }
+
        /*
         * Reuse the prop from the SST connector because we're
         * not allowed to create new props after device registration.
@@ -865,6 +887,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
        intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
        intel_encoder->disable = intel_mst_disable_dp;
        intel_encoder->post_disable = intel_mst_post_disable_dp;
+       intel_encoder->update_pipe = intel_ddi_update_pipe;
        intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
        intel_encoder->pre_enable = intel_mst_pre_enable_dp;
        intel_encoder->enable = intel_mst_enable_dp;
index c9013f8..e08684e 100644 (file)
@@ -147,6 +147,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
                        pll->info->name, onoff(state), onoff(cur_state));
 }
 
+static i915_reg_t
+intel_combo_pll_enable_reg(struct drm_i915_private *i915,
+                          struct intel_shared_dpll *pll)
+{
+
+       if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+               return MG_PLL_ENABLE(0);
+
+       return CNL_DPLL_ENABLE(pll->info->id);
+
+
+}
 /**
  * intel_prepare_shared_dpll - call a dpll's prepare hook
  * @crtc_state: CRTC, and its state, which has a shared dpll
@@ -3842,12 +3854,7 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
                                   struct intel_shared_dpll *pll,
                                   struct intel_dpll_hw_state *hw_state)
 {
-       i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
-
-       if (IS_ELKHARTLAKE(dev_priv) &&
-           pll->info->id == DPLL_ID_EHL_DPLL4) {
-               enable_reg = MG_PLL_ENABLE(0);
-       }
+       i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
 
        return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
 }
@@ -4045,11 +4052,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 static void combo_pll_enable(struct drm_i915_private *dev_priv,
                             struct intel_shared_dpll *pll)
 {
-       i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+       i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
 
        if (IS_ELKHARTLAKE(dev_priv) &&
            pll->info->id == DPLL_ID_EHL_DPLL4) {
-               enable_reg = MG_PLL_ENABLE(0);
 
                /*
                 * We need to disable DC states when this DPLL is enabled.
@@ -4157,19 +4163,14 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 static void combo_pll_disable(struct drm_i915_private *dev_priv,
                              struct intel_shared_dpll *pll)
 {
-       i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+       i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
 
-       if (IS_ELKHARTLAKE(dev_priv) &&
-           pll->info->id == DPLL_ID_EHL_DPLL4) {
-               enable_reg = MG_PLL_ENABLE(0);
-               icl_pll_disable(dev_priv, pll, enable_reg);
+       icl_pll_disable(dev_priv, pll, enable_reg);
 
+       if (IS_ELKHARTLAKE(dev_priv) &&
+           pll->info->id == DPLL_ID_EHL_DPLL4)
                intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
                                        pll->wakeref);
-               return;
-       }
-
-       icl_pll_disable(dev_priv, pll, enable_reg);
 }
 
 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
index 307ed8a..237dbb1 100644 (file)
@@ -313,9 +313,15 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
 static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
+
+       if (!INTEL_DISPLAY_ENABLED(i915))
+               return connector_status_disconnected;
+
        return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 
index bd39eb6..842c04e 100644 (file)
@@ -451,8 +451,7 @@ int intel_fbdev_init(struct drm_device *dev)
        struct intel_fbdev *ifbdev;
        int ret;
 
-       if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
-                       !INTEL_DISPLAY_ENABLED(dev_priv)))
+       if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
                return -ENODEV;
 
        ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
index a8d119b..e6b8d6d 100644 (file)
@@ -834,7 +834,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
        unsigned int pin;
        int ret;
 
-       if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+       if (!HAS_DISPLAY(dev_priv))
                return 0;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
index 1a0d49a..5492076 100644 (file)
@@ -148,9 +148,8 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
 
 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
        enum i915_power_well_id id;
+       intel_wakeref_t wakeref;
        bool enabled = false;
 
        /*
@@ -162,17 +161,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
        else
                id = SKL_DISP_PW_1;
 
-       mutex_lock(&power_domains->lock);
-
        /* PG1 (power well #1) needs to be enabled */
-       for_each_power_well(dev_priv, power_well) {
-               if (power_well->desc->id == id) {
-                       enabled = power_well->desc->ops->is_enabled(dev_priv,
-                                                                   power_well);
-                       break;
-               }
-       }
-       mutex_unlock(&power_domains->lock);
+       with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+               enabled = intel_display_power_well_is_enabled(dev_priv, id);
 
        /*
         * Another req for hdcp key loadability is enabled state of pll for
@@ -713,7 +704,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
                intel_de_write(dev_priv, HDCP_REP_CTL,
                               intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 
-       ret = shim->toggle_signalling(dig_port, true);
+       ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
        if (ret)
                return ret;
 
@@ -801,6 +792,19 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
        drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
                    connector->base.name, connector->base.base.id);
 
+       /*
+        * If there are other connectors on this port using HDCP, don't disable
+        * it. Instead, toggle the HDCP signalling off on that particular
+        * connector/pipe and exit.
+        */
+       if (dig_port->num_hdcp_streams > 0) {
+               ret = hdcp->shim->toggle_signalling(dig_port,
+                                                   cpu_transcoder, false);
+               if (ret)
+                       DRM_ERROR("Failed to disable HDCP signalling\n");
+               return ret;
+       }
+
        hdcp->hdcp_encrypted = false;
        intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
        if (intel_de_wait_for_clear(dev_priv,
@@ -816,7 +820,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
        intel_de_write(dev_priv, HDCP_REP_CTL,
                       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
 
-       ret = hdcp->shim->toggle_signalling(dig_port, false);
+       ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
        if (ret) {
                drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
                return ret;
@@ -876,6 +880,34 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
        return container_of(hdcp, struct intel_connector, hdcp);
 }
 
+static void intel_hdcp_update_value(struct intel_connector *connector,
+                                   u64 value, bool update_property)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+
+       drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
+
+       if (hdcp->value == value)
+               return;
+
+       drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
+
+       if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+               if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
+                       dig_port->num_hdcp_streams--;
+       } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+               dig_port->num_hdcp_streams++;
+       }
+
+       hdcp->value = value;
+       if (update_property) {
+               drm_connector_get(&connector->base);
+               schedule_work(&hdcp->prop_work);
+       }
+}
+
 /* Implements Part 3 of the HDCP authorization procedure */
 static int intel_hdcp_check_link(struct intel_connector *connector)
 {
@@ -887,6 +919,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
        int ret = 0;
 
        mutex_lock(&hdcp->mutex);
+       mutex_lock(&dig_port->hdcp_mutex);
+
        cpu_transcoder = hdcp->cpu_transcoder;
 
        /* Check_link valid only when HDCP1.4 is enabled */
@@ -903,15 +937,16 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
                        connector->base.name, connector->base.base.id,
                        intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
                ret = -ENXIO;
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                                       true);
                goto out;
        }
 
-       if (hdcp->shim->check_link(dig_port)) {
+       if (hdcp->shim->check_link(dig_port, connector)) {
                if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
+                       intel_hdcp_update_value(connector,
+                               DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
                }
                goto out;
        }
@@ -923,20 +958,23 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
        ret = _intel_hdcp_disable(connector);
        if (ret) {
                drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                                       true);
                goto out;
        }
 
        ret = _intel_hdcp_enable(connector);
        if (ret) {
                drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                                       true);
                goto out;
        }
 
 out:
+       mutex_unlock(&dig_port->hdcp_mutex);
        mutex_unlock(&hdcp->mutex);
        return ret;
 }
@@ -962,6 +1000,8 @@ static void intel_hdcp_prop_work(struct work_struct *work)
 
        mutex_unlock(&hdcp->mutex);
        drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+
+       drm_connector_put(&connector->base);
 }
 
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
@@ -1600,7 +1640,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
                    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
                    LINK_ENCRYPTION_STATUS);
        if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(dig_port, true);
+               ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+                                                   true);
                if (ret) {
                        drm_err(&dev_priv->drm,
                                "Failed to enable HDCP signalling. %d\n",
@@ -1650,7 +1691,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
                drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
 
        if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(dig_port, false);
+               ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+                                                   false);
                if (ret) {
                        drm_err(&dev_priv->drm,
                                "Failed to disable HDCP signalling. %d\n",
@@ -1766,16 +1808,18 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                        "HDCP2.2 link stopped the encryption, %x\n",
                        intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
                ret = -ENXIO;
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                                       true);
                goto out;
        }
 
        ret = hdcp->shim->check_2_2_link(dig_port);
        if (ret == HDCP_LINK_PROTECTED) {
                if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
+                       intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_ENABLED,
+                                       true);
                }
                goto out;
        }
@@ -1788,8 +1832,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                            "HDCP2.2 Downstream topology change\n");
                ret = hdcp2_authenticate_repeater_topology(connector);
                if (!ret) {
-                       hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-                       schedule_work(&hdcp->prop_work);
+                       intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_ENABLED,
+                                       true);
                        goto out;
                }
                drm_dbg_kms(&dev_priv->drm,
@@ -1807,8 +1852,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                drm_err(&dev_priv->drm,
                        "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
                        connector->base.name, connector->base.base.id, ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                               DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
                goto out;
        }
 
@@ -1818,8 +1863,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                            "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
                            connector->base.name, connector->base.base.id,
                            ret);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                                       true);
                goto out;
        }
 
@@ -1835,6 +1881,9 @@ static void intel_hdcp_check_work(struct work_struct *work)
                                               check_work);
        struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
 
+       if (drm_connector_is_unregistered(&connector->base))
+               return;
+
        if (!intel_hdcp2_check_link(connector))
                schedule_delayed_work(&hdcp->check_work,
                                      DRM_HDCP2_CHECK_PERIOD_MS);
@@ -1896,6 +1945,7 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
 }
 
 static int initialize_hdcp_port_data(struct intel_connector *connector,
+                                    enum port port,
                                     const struct intel_hdcp_shim *shim)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1903,8 +1953,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
        struct hdcp_port_data *data = &hdcp->port_data;
 
        if (INTEL_GEN(dev_priv) < 12)
-               data->fw_ddi =
-                       intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
+               data->fw_ddi = intel_get_mei_fw_ddi_index(port);
        else
                /*
                 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1974,14 +2023,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
        }
 }
 
-static void intel_hdcp2_init(struct intel_connector *connector,
+static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
                             const struct intel_hdcp_shim *shim)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        int ret;
 
-       ret = initialize_hdcp_port_data(connector, shim);
+       ret = initialize_hdcp_port_data(connector, port, shim);
        if (ret) {
                drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
                return;
@@ -1991,6 +2040,7 @@ static void intel_hdcp2_init(struct intel_connector *connector,
 }
 
 int intel_hdcp_init(struct intel_connector *connector,
+                   enum port port,
                    const struct intel_hdcp_shim *shim)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2000,8 +2050,8 @@ int intel_hdcp_init(struct intel_connector *connector,
        if (!shim)
                return -EINVAL;
 
-       if (is_hdcp2_supported(dev_priv))
-               intel_hdcp2_init(connector, shim);
+       if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
+               intel_hdcp2_init(connector, port, shim);
 
        ret =
        drm_connector_attach_content_protection_property(&connector->base,
@@ -2025,6 +2075,7 @@ int intel_hdcp_enable(struct intel_connector *connector,
                      enum transcoder cpu_transcoder, u8 content_type)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
        unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
        int ret = -EINVAL;
@@ -2033,14 +2084,14 @@ int intel_hdcp_enable(struct intel_connector *connector,
                return -ENOENT;
 
        mutex_lock(&hdcp->mutex);
+       mutex_lock(&dig_port->hdcp_mutex);
        drm_WARN_ON(&dev_priv->drm,
                    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
        hdcp->content_type = content_type;
+       hdcp->cpu_transcoder = cpu_transcoder;
 
-       if (INTEL_GEN(dev_priv) >= 12) {
-               hdcp->cpu_transcoder = cpu_transcoder;
+       if (INTEL_GEN(dev_priv) >= 12)
                hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
-       }
 
        /*
         * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2063,16 +2114,19 @@ int intel_hdcp_enable(struct intel_connector *connector,
 
        if (!ret) {
                schedule_delayed_work(&hdcp->check_work, check_link_interval);
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-               schedule_work(&hdcp->prop_work);
+               intel_hdcp_update_value(connector,
+                                       DRM_MODE_CONTENT_PROTECTION_ENABLED,
+                                       true);
        }
 
+       mutex_unlock(&dig_port->hdcp_mutex);
        mutex_unlock(&hdcp->mutex);
        return ret;
 }
 
 int intel_hdcp_disable(struct intel_connector *connector)
 {
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
        int ret = 0;
 
@@ -2080,15 +2134,20 @@ int intel_hdcp_disable(struct intel_connector *connector)
                return -ENOENT;
 
        mutex_lock(&hdcp->mutex);
+       mutex_lock(&dig_port->hdcp_mutex);
 
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
-               if (hdcp->hdcp2_encrypted)
-                       ret = _intel_hdcp2_disable(connector);
-               else if (hdcp->hdcp_encrypted)
-                       ret = _intel_hdcp_disable(connector);
-       }
+       if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+               goto out;
+
+       intel_hdcp_update_value(connector,
+                               DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
+       if (hdcp->hdcp2_encrypted)
+               ret = _intel_hdcp2_disable(connector);
+       else if (hdcp->hdcp_encrypted)
+               ret = _intel_hdcp_disable(connector);
 
+out:
+       mutex_unlock(&dig_port->hdcp_mutex);
        mutex_unlock(&hdcp->mutex);
        cancel_delayed_work_sync(&hdcp->check_work);
        return ret;
@@ -2102,11 +2161,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
        struct intel_connector *connector =
                                to_intel_connector(conn_state->connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       bool content_protection_type_changed =
+       bool content_protection_type_changed, desired_and_not_enabled = false;
+
+       if (!connector->hdcp.shim)
+               return;
+
+       content_protection_type_changed =
                (conn_state->hdcp_content_type != hdcp->content_type &&
                 conn_state->content_protection !=
                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
-       bool desired_and_not_enabled = false;
 
        /*
         * During the HDCP encryption session if Type change is requested,
@@ -2159,12 +2222,39 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
 
 void intel_hdcp_cleanup(struct intel_connector *connector)
 {
-       if (!connector->hdcp.shim)
+       struct intel_hdcp *hdcp = &connector->hdcp;
+
+       if (!hdcp->shim)
                return;
 
-       mutex_lock(&connector->hdcp.mutex);
-       kfree(connector->hdcp.port_data.streams);
-       mutex_unlock(&connector->hdcp.mutex);
+       /*
+        * If the connector is registered, it's possible userspace could kick
+        * off another HDCP enable, which would re-spawn the workers.
+        */
+       drm_WARN_ON(connector->base.dev,
+               connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
+
+       /*
+        * Now that the connector is not registered, check_work won't be run,
+        * but cancel any outstanding instances of it
+        */
+       cancel_delayed_work_sync(&hdcp->check_work);
+
+       /*
+        * We don't cancel prop_work in the same way as check_work since it
+        * requires connection_mutex which could be held while calling this
+        * function. Instead, we rely on the connector references grabbed before
+        * scheduling prop_work to ensure the connector is alive when prop_work
+        * is run. So if we're in the destroy path (which is where this
+        * function should be called), we're "guaranteed" that prop_work is not
+        * active (tl;dr This Should Never Happen).
+        */
+       drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
+
+       mutex_lock(&hdcp->mutex);
+       kfree(hdcp->port_data.streams);
+       hdcp->shim = NULL;
+       mutex_unlock(&hdcp->mutex);
 }
 
 void intel_hdcp_atomic_check(struct drm_connector *connector,
index 86bbaec..1bbf5b6 100644 (file)
@@ -22,7 +22,7 @@ enum transcoder;
 void intel_hdcp_atomic_check(struct drm_connector *connector,
                             struct drm_connector_state *old_state,
                             struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
+int intel_hdcp_init(struct intel_connector *connector, enum port port,
                    const struct intel_hdcp_shim *hdcp_shim);
 int intel_hdcp_enable(struct intel_connector *connector,
                      enum transcoder cpu_transcoder, u8 content_type);
index de2ce56..3f2008d 100644 (file)
@@ -1477,7 +1477,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
        return ret;
 }
 
-static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
+                                          enum transcoder cpu_transcoder)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1494,13 +1495,15 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
                usleep_range(25, 50);
        }
 
-       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+                                              false);
        if (ret) {
                drm_err(&dev_priv->drm,
                        "Disable HDCP signalling failed (%d)\n", ret);
                return ret;
        }
-       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+                                              true);
        if (ret) {
                drm_err(&dev_priv->drm,
                        "Enable HDCP signalling failed (%d)\n", ret);
@@ -1512,6 +1515,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 
 static
 int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+                                     enum transcoder cpu_transcoder,
                                      bool enable)
 {
        struct intel_hdmi *hdmi = &dig_port->hdmi;
@@ -1522,7 +1526,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
        if (!enable)
                usleep_range(6, 60); /* Bspec says >= 6us */
 
-       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+                                              enable);
        if (ret) {
                drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
                        enable ? "Enable" : "Disable", ret);
@@ -1534,17 +1539,17 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
         * opportunity and enc_en signalling in KABYLAKE.
         */
        if (IS_KABYLAKE(dev_priv) && enable)
-               return kbl_repositioning_enc_en_signal(connector);
+               return kbl_repositioning_enc_en_signal(connector,
+                                                      cpu_transcoder);
 
        return 0;
 }
 
 static
-bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
+                                    struct intel_connector *connector)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct intel_connector *connector =
-               dig_port->hdmi.attached_connector;
        enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
        int ret;
@@ -1572,13 +1577,14 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
 }
 
 static
-bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
+                               struct intel_connector *connector)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int retry;
 
        for (retry = 0; retry < 3; retry++)
-               if (intel_hdmi_hdcp_check_link_once(dig_port))
+               if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
                        return true;
 
        drm_err(&i915->drm, "Link check failed\n");
@@ -2271,35 +2277,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        return intel_mode_valid_max_plane_size(dev_priv, mode);
 }
 
-static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
-                                    int bpc)
+bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+                                   int bpc, bool has_hdmi_sink, bool ycbcr420_output)
 {
-       struct drm_i915_private *dev_priv =
-               to_i915(crtc_state->uapi.crtc->dev);
        struct drm_atomic_state *state = crtc_state->uapi.state;
        struct drm_connector_state *connector_state;
        struct drm_connector *connector;
-       const struct drm_display_mode *adjusted_mode =
-               &crtc_state->hw.adjusted_mode;
        int i;
 
-       if (HAS_GMCH(dev_priv))
-               return false;
-
-       if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
-               return false;
-
        if (crtc_state->pipe_bpp < bpc * 3)
                return false;
 
-       if (!crtc_state->has_hdmi_sink)
-               return false;
-
-       /*
-        * HDMI deep color affects the clocks, so it's only possible
-        * when not cloning with other encoder types.
-        */
-       if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+       if (!has_hdmi_sink)
                return false;
 
        for_each_new_connector_in_state(state, connector, connector_state, i) {
@@ -2308,7 +2297,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
                if (connector_state->crtc != crtc_state->uapi.crtc)
                        continue;
 
-               if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+               if (ycbcr420_output) {
                        const struct drm_hdmi_info *hdmi = &info->hdmi;
 
                        if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -2327,6 +2316,30 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
                }
        }
 
+       return true;
+}
+
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+                                    int bpc)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->uapi.crtc->dev);
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+
+       if (HAS_GMCH(dev_priv))
+               return false;
+
+       if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+               return false;
+
+       /*
+        * HDMI deep color affects the clocks, so it's only possible
+        * when not cloning with other encoder types.
+        */
+       if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI))
+               return false;
+
        /* Display Wa_1405510057:icl,ehl */
        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
            bpc == 10 && IS_GEN(dev_priv, 11) &&
@@ -2334,7 +2347,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
             adjusted_mode->crtc_hblank_start) % 8 == 2)
                return false;
 
-       return true;
+       return intel_hdmi_deep_color_possible(crtc_state, bpc,
+                                             crtc_state->has_hdmi_sink,
+                                             crtc_state->output_format ==
+                                             INTEL_OUTPUT_FORMAT_YCBCR420);
 }
 
 static int
@@ -2459,6 +2475,23 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
        }
 }
 
+static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *crtc_state,
+                                const struct drm_connector_state *conn_state)
+{
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       const struct intel_digital_connector_state *intel_conn_state =
+               to_intel_digital_connector_state(conn_state);
+
+       if (!crtc_state->has_hdmi_sink)
+               return false;
+
+       if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+               return intel_hdmi->has_audio;
+       else
+               return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
 int intel_hdmi_compute_config(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config,
                              struct drm_connector_state *conn_state)
@@ -2468,8 +2501,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
        struct drm_connector *connector = conn_state->connector;
        struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
-       struct intel_digital_connector_state *intel_conn_state =
-               to_intel_digital_connector_state(conn_state);
        int ret;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2495,13 +2526,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
                pipe_config->has_pch_encoder = true;
 
-       if (pipe_config->has_hdmi_sink) {
-               if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-                       pipe_config->has_audio = intel_hdmi->has_audio;
-               else
-                       pipe_config->has_audio =
-                               intel_conn_state->force_audio == HDMI_AUDIO_ON;
-       }
+       pipe_config->has_audio =
+               intel_hdmi_has_audio(encoder, pipe_config, conn_state);
 
        ret = intel_hdmi_compute_clock(encoder, pipe_config);
        if (ret)
@@ -2667,6 +2693,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
                    connector->base.id, connector->name);
 
+       if (!INTEL_DISPLAY_ENABLED(dev_priv))
+               return connector_status_disconnected;
+
        wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
        if (INTEL_GEN(dev_priv) >= 11 &&
@@ -3250,7 +3279,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                connector->ycbcr_420_allowed = true;
 
-       intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
        intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 
        if (HAS_DDI(dev_priv))
@@ -3264,7 +3292,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
        intel_hdmi->attached_connector = intel_connector;
 
        if (is_hdcp_supported(dev_priv, port)) {
-               int ret = intel_hdcp_init(intel_connector,
+               int ret = intel_hdcp_init(intel_connector, port,
                                          &intel_hdmi_hdcp_shim);
                if (ret)
                        drm_dbg_kms(&dev_priv->drm,
@@ -3335,6 +3363,8 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
 
        intel_encoder = &dig_port->base;
 
+       mutex_init(&dig_port->hdcp_mutex);
+
        drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
                         &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
                         "HDMI %c", port_name(port));
@@ -3382,6 +3412,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
                intel_encoder->pipe_mask = ~0;
        }
        intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+       intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
        /*
         * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
         * to work on real hardware. And since g4x can send infoframes to
index 5b348dc..15eb0cc 100644 (file)
@@ -48,5 +48,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
                          union hdmi_infoframe *frame);
 bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
                                    const struct drm_connector_state *conn_state);
+bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
+                                   bool has_hdmi_sink, bool ycbcr420_output);
 
 #endif /* __INTEL_HDMI_H__ */
index 3f1d7b8..5c58c1e 100644 (file)
  *
  * It is only valid and used by digital port encoder.
  *
- * Return pin that is associatade with @port and HDP_NONE if no pin is
- * hard associated with that @port.
+ * Return pin that is associatade with @port.
  */
 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
                                   enum port port)
 {
-       enum phy phy = intel_port_to_phy(dev_priv, port);
-
-       /*
-        * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
-        * based on the DDI rather than the PHY (i.e., the last two outputs
-        * shold be HPD_PORT_{D,E} rather than {C,D}.  Note that this differs
-        * from the behavior of both TGL+TGP and RKL+CMP.
-        */
-       if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
-               return HPD_PORT_A + port - PORT_A;
-
-       switch (phy) {
-       case PHY_F:
-               return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
-       case PHY_A ... PHY_E:
-       case PHY_G ... PHY_I:
-               return HPD_PORT_A + phy - PHY_A;
-       default:
-               MISSING_CASE(phy);
-               return HPD_NONE;
-       }
+       return HPD_PORT_A + port - PORT_A;
 }
 
 #define HPD_STORM_DETECT_PERIOD                1000
@@ -503,7 +482,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
         * only the one of them (DP) will have ->hpd_pulse().
         */
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
                enum port port = encoder->port;
                bool long_hpd;
 
@@ -511,7 +489,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                if (!(BIT(pin) & pin_mask))
                        continue;
 
-               if (!has_hpd_pulse)
+               if (!intel_encoder_has_hpd_pulse(encoder))
                        continue;
 
                long_hpd = long_mask & BIT(pin);
index 1888611..e65c2de 100644 (file)
@@ -456,12 +456,6 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        return 0;
 }
 
-static enum drm_connector_status
-intel_lvds_detect(struct drm_connector *connector, bool force)
-{
-       return connector_status_connected;
-}
-
 /*
  * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
  */
@@ -490,7 +484,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-       .detect = intel_lvds_detect,
+       .detect = intel_panel_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_digital_connector_atomic_get_property,
        .atomic_set_property = intel_digital_connector_atomic_set_property,
index 4072d70..9f23bac 100644 (file)
@@ -40,8 +40,6 @@
 #include "intel_dsi_dcs_backlight.h"
 #include "intel_panel.h"
 
-#define CRC_PMIC_PWM_PERIOD_NS 21333
-
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@ -594,10 +592,10 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
 static u32 pwm_get_backlight(struct intel_connector *connector)
 {
        struct intel_panel *panel = &connector->panel;
-       int duty_ns;
+       struct pwm_state state;
 
-       duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
-       return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+       pwm_get_state(panel->backlight.pwm, &state);
+       return pwm_get_relative_duty_cycle(&state, 100);
 }
 
 static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -671,9 +669,9 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
 static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
        struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
-       int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
 
-       pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+       pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 static void
@@ -842,10 +840,8 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
        struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
        struct intel_panel *panel = &connector->panel;
 
-       /* Disable the backlight */
-       intel_panel_actually_set_backlight(old_conn_state, 0);
-       usleep_range(2000, 3000);
-       pwm_disable(panel->backlight.pwm);
+       panel->backlight.pwm_state.enabled = false;
+       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -1177,9 +1173,12 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_panel *panel = &connector->panel;
+       int level = panel->backlight.level;
 
-       pwm_enable(panel->backlight.pwm);
-       intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+       level = intel_panel_compute_brightness(connector, level);
+       pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+       panel->backlight.pwm_state.enabled = true;
+       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1543,18 +1542,9 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
        return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
 }
 
-static u32 get_backlight_max_vbt(struct intel_connector *connector)
+static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_panel *panel = &connector->panel;
        u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
-       u32 pwm;
-
-       if (!panel->backlight.hz_to_pwm) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "backlight frequency conversion not supported\n");
-               return 0;
-       }
 
        if (pwm_freq_hz) {
                drm_dbg_kms(&dev_priv->drm,
@@ -1567,6 +1557,22 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
                            pwm_freq_hz);
        }
 
+       return pwm_freq_hz;
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_panel *panel = &connector->panel;
+       u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+       u32 pwm;
+
+       if (!panel->backlight.hz_to_pwm) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "backlight frequency conversion not supported\n");
+               return 0;
+       }
+
        pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
        if (!pwm) {
                drm_dbg_kms(&dev_priv->drm,
@@ -1891,8 +1897,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_panel *panel = &connector->panel;
        const char *desc;
-       u32 level, ns;
-       int retval;
+       u32 level;
 
        /* Get the right PWM chip for DSI backlight according to VBT */
        if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
@@ -1910,30 +1915,28 @@ static int pwm_setup_backlight(struct intel_connector *connector,
                return -ENODEV;
        }
 
-       /*
-        * FIXME: pwm_apply_args() should be removed when switching to
-        * the atomic PWM API.
-        */
-       pwm_apply_args(panel->backlight.pwm);
-
-       panel->backlight.min = 0; /* 0% */
        panel->backlight.max = 100; /* 100% */
-       level = intel_panel_compute_brightness(connector, 100);
-       ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+       panel->backlight.min = get_backlight_min_vbt(connector);
 
-       retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
-       if (retval < 0) {
-               drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
-               pwm_put(panel->backlight.pwm);
-               panel->backlight.pwm = NULL;
-               return retval;
-       }
+       if (pwm_is_enabled(panel->backlight.pwm)) {
+               /* PWM is already enabled, use existing settings */
+               pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+
+               level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
+                                                   100);
+               level = intel_panel_compute_brightness(connector, level);
+               panel->backlight.level = clamp(level, panel->backlight.min,
+                                              panel->backlight.max);
+               panel->backlight.enabled = true;
 
-       level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
-                            CRC_PMIC_PWM_PERIOD_NS);
-       panel->backlight.level =
-               intel_panel_compute_brightness(connector, level);
-       panel->backlight.enabled = panel->backlight.level != 0;
+               drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+                           NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+                           get_vbt_pwm_freq(dev_priv), level);
+       } else {
+               /* Set period from VBT frequency, leave other settings at 0. */
+               panel->backlight.pwm_state.period =
+                       NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+       }
 
        drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
                 desc);
@@ -2092,6 +2095,17 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
        }
 }
 
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force)
+{
+       struct drm_i915_private *i915 = to_i915(connector->dev);
+
+       if (!INTEL_DISPLAY_ENABLED(i915))
+               return connector_status_disconnected;
+
+       return connector_status_connected;
+}
+
 int intel_panel_init(struct intel_panel *panel,
                     struct drm_display_mode *fixed_mode,
                     struct drm_display_mode *downclock_mode)
index 968b952..5b813fe 100644 (file)
@@ -23,6 +23,8 @@ int intel_panel_init(struct intel_panel *panel,
                     struct drm_display_mode *fixed_mode,
                     struct drm_display_mode *downclock_mode);
 void intel_panel_fini(struct intel_panel *panel);
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force);
 void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                            struct drm_display_mode *adjusted_mode);
 int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
index 2b004ee..8a9d0bd 100644 (file)
@@ -555,7 +555,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 
        if (dev_priv->psr.psr2_sel_fetch_enabled) {
                /* WA 1408330847 */
-               if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+               if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
                    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
                        intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                                     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
@@ -1109,7 +1109,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 
        /* WA 1408330847 */
        if (dev_priv->psr.psr2_sel_fetch_enabled &&
-           (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+           (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
             IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
                intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                             DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
index 5e9fb34..4eaa4aa 100644 (file)
@@ -2084,14 +2084,18 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
-       u16 response;
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
+       u16 response;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
+       if (!INTEL_DISPLAY_ENABLED(i915))
+               return connector_status_disconnected;
+
        if (!intel_sdvo_get_value(intel_sdvo,
                                  SDVO_CMD_GET_ATTACHED_DISPLAYS,
                                  &response, 2))
index 6b72223..63040cb 100644 (file)
@@ -1626,8 +1626,7 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
        hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
                                      &plane_state->uapi.dst,
                                      0, INT_MAX);
-       if (hscale < 0x10000)
-               return pixel_rate;
+       hscale = max(hscale, 0x10000u);
 
        /* Decimation steps at 2x,4x,8x,16x */
        decimate = ilog2(hscale >> 16);
@@ -1640,8 +1639,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
        limit -= decimate;
 
        /* -10% for RGB */
-       if (fb->format->cpp[0] >= 4)
-               limit--; /* -10% for RGB */
+       if (!fb->format->is_yuv)
+               limit--;
 
        /*
         * We should also do -10% if sprite scaling is enabled
@@ -2845,7 +2844,7 @@ static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
 {
        /* Wa_14010477008:tgl[a0..c0],rkl[all] */
        if (IS_ROCKETLAKE(dev_priv) ||
-           IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
+           IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
                return false;
 
        return plane_id < PLANE_SPRITE4;
index 777032d..7a7b99b 100644 (file)
@@ -1706,6 +1706,9 @@ intel_tv_detect(struct drm_connector *connector,
        drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
                    connector->base.id, connector->name, force);
 
+       if (!INTEL_DISPLAY_ENABLED(i915))
+               return connector_status_disconnected;
+
        if (force) {
                struct intel_load_detect_pipe tmp;
                int ret;
index 6faabd4..54bcc6a 100644 (file)
@@ -293,8 +293,12 @@ struct bdb_general_features {
 #define DVO_PORT_HDMIE         12                              /* 193 */
 #define DVO_PORT_DPF           13                              /* N/A */
 #define DVO_PORT_HDMIF         14                              /* N/A */
-#define DVO_PORT_DPG           15
-#define DVO_PORT_HDMIG         16
+#define DVO_PORT_DPG           15                              /* 217 */
+#define DVO_PORT_HDMIG         16                              /* 217 */
+#define DVO_PORT_DPH           17                              /* 217 */
+#define DVO_PORT_HDMIH         18                              /* 217 */
+#define DVO_PORT_DPI           19                              /* 217 */
+#define DVO_PORT_HDMII         20                              /* 217 */
 #define DVO_PORT_MIPIA         21                              /* 171 */
 #define DVO_PORT_MIPIB         22                              /* 171 */
 #define DVO_PORT_MIPIC         23                              /* 171 */
@@ -330,6 +334,8 @@ enum vbt_gmbus_ddi {
 #define DP_AUX_E 0x50
 #define DP_AUX_F 0x60
 #define DP_AUX_G 0x70
+#define DP_AUX_H 0x80
+#define DP_AUX_I 0x90
 
 #define VBT_DP_MAX_LINK_RATE_HBR3      0
 #define VBT_DP_MAX_LINK_RATE_HBR2      1
index 052e0b3..5e55229 100644 (file)
@@ -1585,6 +1585,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+       .detect = intel_panel_detect,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_connector_destroy,
index d0a5143..4070b00 100644 (file)
@@ -483,7 +483,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
 
        if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
                drm_err(&dev_priv->drm,
-                       "Cant get a suitable ratio from DSI PLL ratios\n");
+                       "Can't get a suitable ratio from DSI PLL ratios\n");
                return -ECHRNG;
        } else
                drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
index cf5ecbd..4fd3810 100644 (file)
@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx)
        return rcu_dereference_protected(ctx->engines, true);
 }
 
-static bool __reset_engine(struct intel_engine_cs *engine)
-{
-       struct intel_gt *gt = engine->gt;
-       bool success = false;
-
-       if (!intel_has_reset_engine(gt))
-               return false;
-
-       if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
-                             &gt->reset.flags)) {
-               success = intel_engine_reset(engine, NULL) == 0;
-               clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
-                                     &gt->reset.flags);
-       }
-
-       return success;
-}
-
 static void __reset_context(struct i915_gem_context *ctx,
                            struct intel_engine_cs *engine)
 {
@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
         * kill the banned context, we fallback to doing a local reset
         * instead.
         */
-       if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
-           !intel_engine_pulse(engine))
-               return true;
-
-       /* If we are unable to send a pulse, try resetting this engine. */
-       return __reset_engine(engine);
+       return intel_engine_pulse(engine) == 0;
 }
 
 static bool
@@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
                spin_lock(&locked->active.lock);
        }
 
-       if (!i915_request_completed(rq)) {
-               if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+       if (i915_request_is_active(rq)) {
+               if (!i915_request_completed(rq))
                        *active = locked;
                ret = true;
        }
@@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
        if (!ce->timeline)
                return NULL;
 
+       /*
+        * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
+        * to the request to prevent it being transferred to a new timeline
+        * (and onto a new timeline->requests list).
+        */
        rcu_read_lock();
-       list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
-               if (i915_request_is_active(rq) && i915_request_completed(rq))
-                       continue;
+       list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+               bool found;
+
+               /* timeline is already completed upto this point? */
+               if (!i915_request_get_rcu(rq))
+                       break;
 
                /* Check with the backend if the request is inflight */
-               if (__active_engine(rq, &engine))
+               found = true;
+               if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
+                       found = __active_engine(rq, &engine);
+
+               i915_request_put(rq);
+               if (found)
                        break;
        }
        rcu_read_unlock();
@@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
        return engine;
 }
 
-static void kill_engines(struct i915_gem_engines *engines)
+static void kill_engines(struct i915_gem_engines *engines, bool ban)
 {
        struct i915_gem_engines_iter it;
        struct intel_context *ce;
@@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines)
        for_each_gem_engine(ce, engines, it) {
                struct intel_engine_cs *engine;
 
-               if (intel_context_set_banned(ce))
+               if (ban && intel_context_set_banned(ce))
                        continue;
 
                /*
@@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines)
                engine = active_engine(ce);
 
                /* First attempt to gracefully cancel the context */
-               if (engine && !__cancel_engine(engine))
+               if (engine && !__cancel_engine(engine) && ban)
                        /*
                         * If we are unable to send a preemptive pulse to bump
                         * the context from the GPU, we have to resort to a full
@@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines)
        }
 }
 
-static void kill_stale_engines(struct i915_gem_context *ctx)
+static void kill_context(struct i915_gem_context *ctx)
 {
+       bool ban = (!i915_gem_context_is_persistent(ctx) ||
+                   !ctx->i915->params.enable_hangcheck);
        struct i915_gem_engines *pos, *next;
 
        spin_lock_irq(&ctx->stale.lock);
@@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
 
                spin_unlock_irq(&ctx->stale.lock);
 
-               kill_engines(pos);
+               kill_engines(pos, ban);
 
                spin_lock_irq(&ctx->stale.lock);
                GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
@@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
        spin_unlock_irq(&ctx->stale.lock);
 }
 
-static void kill_context(struct i915_gem_context *ctx)
-{
-       kill_stale_engines(ctx);
-}
-
 static void engines_idle_release(struct i915_gem_context *ctx,
                                 struct i915_gem_engines *engines)
 {
@@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,
 
 kill:
        if (list_empty(&engines->link)) /* raced, already closed */
-               kill_engines(engines);
+               kill_engines(engines, true);
 
        i915_sw_fence_commit(&engines->fence);
 }
@@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx)
         * case we opt to forcibly kill off all remaining requests on
         * context close.
         */
-       if (!i915_gem_context_is_persistent(ctx) ||
-           !ctx->i915->params.enable_hangcheck)
-               kill_context(ctx);
+       kill_context(ctx);
 
        i915_gem_context_put(ctx);
 }
index 27fddc2..8dd295d 100644 (file)
@@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
                src = sg_next(src);
        }
 
-       if (!dma_map_sg_attrs(attachment->dev,
-                             st->sgl, st->nents, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC)) {
-               ret = -ENOMEM;
+       ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
+       if (ret)
                goto err_free_sg;
-       }
 
        return st;
 
@@ -73,9 +70,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
 
-       dma_unmap_sg_attrs(attachment->dev,
-                          sg->sgl, sg->nents, dir,
-                          DMA_ATTR_SKIP_CPU_SYNC);
+       dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sg);
        kfree(sg);
 
index 5509946..4b09bcd 100644 (file)
@@ -2267,8 +2267,8 @@ struct eb_parse_work {
        struct i915_vma *batch;
        struct i915_vma *shadow;
        struct i915_vma *trampoline;
-       unsigned int batch_offset;
-       unsigned int batch_length;
+       unsigned long batch_offset;
+       unsigned long batch_length;
 };
 
 static int __eb_parse(struct dma_fence_work *work)
@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
        struct eb_parse_work *pw;
        int err;
 
+       GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
+       GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
+
        pw = kzalloc(sizeof(*pw), GFP_KERNEL);
        if (!pw)
                return -ENOMEM;
index d93eb36..aee7ad3 100644 (file)
@@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
 
        vma[1] = i915_vma_instance(dst, vm, NULL);
        if (IS_ERR(vma[1]))
-               return PTR_ERR(vma);
+               return PTR_ERR(vma[1]);
 
        i915_gem_ww_ctx_init(&ww, true);
        intel_engine_pm_get(ce->engine);
index e8a0837..d6eeefa 100644 (file)
@@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
        if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
                return NULL;
 
+       if (GEM_WARN_ON(type == I915_MAP_WC &&
+                       !static_cpu_has(X86_FEATURE_PAT)))
+               return NULL;
+
        /* A single page can always be kmapped */
-       if (n_pte == 1 && type == I915_MAP_WB)
-               return kmap(sg_page(sgt->sgl));
+       if (n_pte == 1 && type == I915_MAP_WB) {
+               struct page *page = sg_page(sgt->sgl);
+
+               /*
+                * On 32b, highmem using a finite set of indirect PTE (i.e.
+                * vmap) to provide virtual mappings of the high pages.
+                * As these are finite, map_new_virtual() must wait for some
+                * other kmap() to finish when it runs out. If we map a large
+                * number of objects, there is no method for it to tell us
+                * to release the mappings, and we deadlock.
+                *
+                * However, if we make an explicit vmap of the page, that
+                * uses a larger vmalloc arena, and also has the ability
+                * to tell us to release unwanted mappings. Most importantly,
+                * it will fail and propagate an error instead of waiting
+                * forever.
+                *
+                * So if the page is beyond the 32b boundary, make an explicit
+                * vmap. On 64b, this check will be optimised away as we can
+                * directly kmap any page on the system.
+                */
+               if (!PageHighMem(page))
+                       return kmap(page);
+       }
 
        mem = stack;
        if (n_pte > ARRAY_SIZE(stack)) {
index debaf7b..be30b27 100644 (file)
@@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
                sg = sg_next(sg);
        }
 
-       if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
-               err = -ENOMEM;
+       err = dma_map_sgtable(attachment->dev, st, dir, 0);
+       if (err)
                goto err_st;
-       }
 
        return st;
 
@@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
                               struct sg_table *st,
                               enum dma_data_direction dir)
 {
-       dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
+       dma_unmap_sgtable(attachment->dev, st, dir, 0);
        sg_free_table(st);
        kfree(st);
 }
index d301dda..92a3f25 100644 (file)
@@ -472,6 +472,7 @@ retry:
                err = i915_gem_ww_ctx_backoff(&ww);
                if (!err)
                        goto retry;
+               rq = ERR_PTR(err);
        } else {
                rq = ERR_PTR(err);
        }
index 08e2c00..7c3a101 100644 (file)
@@ -337,4 +337,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
        return intel_engine_has_preemption(engine);
 }
 
+static inline bool
+intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
+{
+       if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+               return false;
+
+       return READ_ONCE(engine->props.heartbeat_interval_ms);
+}
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index 8ffdf67..5067d05 100644 (file)
@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
        INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
 }
 
+static int __intel_engine_pulse(struct intel_engine_cs *engine)
+{
+       struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+       struct intel_context *ce = engine->kernel_context;
+       struct i915_request *rq;
+
+       lockdep_assert_held(&ce->timeline->mutex);
+       GEM_BUG_ON(!intel_engine_has_preemption(engine));
+       GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+       intel_context_enter(ce);
+       rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+       intel_context_exit(ce);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+       idle_pulse(engine, rq);
+
+       __i915_request_commit(rq);
+       __i915_request_queue(rq, &attr);
+       GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+
+       return 0;
+}
+
+static unsigned long set_heartbeat(struct intel_engine_cs *engine,
+                                  unsigned long delay)
+{
+       unsigned long old;
+
+       old = xchg(&engine->props.heartbeat_interval_ms, delay);
+       if (delay)
+               intel_engine_unpark_heartbeat(engine);
+       else
+               intel_engine_park_heartbeat(engine);
+
+       return old;
+}
+
 int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
                               unsigned long delay)
 {
-       int err;
+       struct intel_context *ce = engine->kernel_context;
+       int err = 0;
 
-       /* Send one last pulse before to cleanup persistent hogs */
-       if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
-               err = intel_engine_pulse(engine);
-               if (err)
-                       return err;
-       }
+       if (!delay && !intel_engine_has_preempt_reset(engine))
+               return -ENODEV;
+
+       intel_engine_pm_get(engine);
+
+       err = mutex_lock_interruptible(&ce->timeline->mutex);
+       if (err)
+               goto out_rpm;
 
-       WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+       if (delay != engine->props.heartbeat_interval_ms) {
+               unsigned long saved = set_heartbeat(engine, delay);
 
-       if (intel_engine_pm_get_if_awake(engine)) {
-               if (delay)
-                       intel_engine_unpark_heartbeat(engine);
-               else
-                       intel_engine_park_heartbeat(engine);
-               intel_engine_pm_put(engine);
+               /* recheck current execution */
+               if (intel_engine_has_preemption(engine)) {
+                       err = __intel_engine_pulse(engine);
+                       if (err)
+                               set_heartbeat(engine, saved);
+               }
        }
 
-       return 0;
+       mutex_unlock(&ce->timeline->mutex);
+
+out_rpm:
+       intel_engine_pm_put(engine);
+       return err;
 }
 
 int intel_engine_pulse(struct intel_engine_cs *engine)
 {
-       struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
        struct intel_context *ce = engine->kernel_context;
-       struct i915_request *rq;
        int err;
 
        if (!intel_engine_has_preemption(engine))
@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
        if (!intel_engine_pm_get_if_awake(engine))
                return 0;
 
-       if (mutex_lock_interruptible(&ce->timeline->mutex)) {
-               err = -EINTR;
-               goto out_rpm;
-       }
-
-       intel_context_enter(ce);
-       rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
-       intel_context_exit(ce);
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto out_unlock;
+       err = -EINTR;
+       if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
+               err = __intel_engine_pulse(engine);
+               mutex_unlock(&ce->timeline->mutex);
        }
 
-       __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
-       idle_pulse(engine, rq);
-
-       __i915_request_commit(rq);
-       __i915_request_queue(rq, &attr);
-       GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
-       err = 0;
-
-out_unlock:
-       mutex_unlock(&ce->timeline->mutex);
-out_rpm:
        intel_engine_pm_put(engine);
        return err;
 }
index 4b7671a..104cb30 100644 (file)
@@ -134,6 +134,7 @@ static void pool_retire(struct i915_active *ref)
        /* Return this object to the shrinker pool */
        i915_gem_object_make_purgeable(node->obj);
 
+       GEM_BUG_ON(node->age);
        spin_lock_irqsave(&pool->lock, flags);
        list_add_rcu(&node->link, list);
        WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
@@ -155,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
        if (!node)
                return ERR_PTR(-ENOMEM);
 
+       node->age = 0;
        node->pool = pool;
        i915_active_init(&node->active, pool_active, pool_retire);
 
index a3f72b7..6c580d0 100644 (file)
@@ -70,6 +70,19 @@ const struct i915_rev_steppings kbl_revids[] = {
        [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
 };
 
+const struct i915_rev_steppings tgl_uy_revids[] = {
+       [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
+       [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
+       [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
+       [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
+};
+
+/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
+const struct i915_rev_steppings tgl_revids[] = {
+       [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
+       [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
+};
+
 static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
 {
        wal->name = name;
@@ -1219,13 +1232,13 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
        gen12_gt_workarounds_init(i915, wal);
 
        /* Wa_1409420604:tgl */
-       if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+       if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
                wa_write_or(wal,
                            SUBSLICE_UNIT_LEVEL_CLKGATE2,
                            CPSSUNIT_CLKGATE_DIS);
 
        /* Wa_1607087056:tgl also know as BUG:1409180338 */
-       if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+       if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
                wa_write_or(wal,
                            SLICE_UNIT_LEVEL_CLKGATE,
                            L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1660,7 +1673,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+       if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
                /*
                 * Wa_1607138336:tgl
                 * Wa_1607063988:tgl
@@ -1700,7 +1713,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                 * Wa_1407928979:tgl A*
                 * Wa_18011464164:tgl B0+
                 * Wa_22010931296:tgl B0+
-                * Wa_14010919138:rkl
+                * Wa_14010919138:rkl,tgl
                 */
                wa_write_or(wal, GEN7_FF_THREAD_MODE,
                            GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
@@ -1716,15 +1729,23 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                             GEN6_RC_SLEEP_PSMI_CONTROL,
                             GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
                             GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-       }
 
-       if (IS_TIGERLAKE(i915)) {
-               /* Wa_1606700617:tgl */
+               /*
+                * Wa_1606700617:tgl
+                * Wa_22010271021:tgl,rkl
+                */
                wa_masked_en(wal,
                             GEN9_CS_DEBUG_MODE1,
                             FF_DOP_CLOCK_GATE_DISABLE);
        }
 
+       if (IS_GEN(i915, 12)) {
+               /* Wa_1406941453:gen12 */
+               wa_masked_en(wal,
+                            GEN10_SAMPLER_MODE,
+                            ENABLE_SMALLPL);
+       }
+
        if (IS_GEN(i915, 11)) {
                /* This is not an Wa. Enable for better image quality */
                wa_masked_en(wal,
index d0a599b..16b582c 100644 (file)
@@ -936,7 +936,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                return -EFAULT;
        }
 
-       if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+       if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
                gvt_vgpu_err("%s access to non-render register (%x)\n",
                                cmd, offset);
                return -EBADRQC;
@@ -976,7 +976,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
         * inhibit context will restore with correct values
         */
        if (IS_GEN(s->engine->i915, 9) &&
-           intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+           intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
            !strncmp(cmd, "lri", 3)) {
                intel_gvt_hypervisor_read_gpa(s->vgpu,
                        s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
@@ -992,8 +992,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                }
        }
 
-       /* TODO: Update the global mask if this MMIO is a masked-MMIO */
-       intel_gvt_mmio_set_cmd_accessed(gvt, offset);
        return 0;
 }
 
index ff7f251..9831361 100644 (file)
@@ -256,11 +256,11 @@ struct intel_gvt_mmio {
 /* This reg has been accessed by a VM */
 #define F_ACCESSED     (1 << 4)
 /* This reg has been accessed through GPU commands */
-#define F_CMD_ACCESSED (1 << 5)
-/* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
-/* This reg is saved/restored in context */
-#define F_IN_CTX       (1 << 7)
+/* This reg is in GVT's mmio save-restor list and in hardware
+ * logical context image
+ */
+#define F_SR_IN_CTX    (1 << 7)
 
        struct gvt_mmio_block *mmio_block;
        unsigned int num_mmio_block;
@@ -597,39 +597,42 @@ static inline void intel_gvt_mmio_set_accessed(
 }
 
 /**
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
  * @gvt: a GVT device
  * @offset: register offset
  *
+ * Returns:
+ * True if an MMIO is able to be accessed by GPU commands
  */
-static inline bool intel_gvt_mmio_is_cmd_access(
+static inline bool intel_gvt_mmio_is_cmd_accessible(
                        struct intel_gvt *gvt, unsigned int offset)
 {
        return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
 }
 
 /**
- * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * intel_gvt_mmio_set_cmd_accessible -
+ *                             mark a MMIO could be accessible by command
  * @gvt: a GVT device
  * @offset: register offset
  *
  */
-static inline bool intel_gvt_mmio_is_unalign(
+static inline void intel_gvt_mmio_set_cmd_accessible(
                        struct intel_gvt *gvt, unsigned int offset)
 {
-       return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
 }
 
 /**
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
  * @gvt: a GVT device
  * @offset: register offset
  *
  */
-static inline void intel_gvt_mmio_set_cmd_accessed(
+static inline bool intel_gvt_mmio_is_unalign(
                        struct intel_gvt *gvt, unsigned int offset)
 {
-       gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
 }
 
 /**
@@ -648,30 +651,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
 }
 
 /**
- * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * intel_gvt_mmio_is_sr_in_ctx -
+ *             check if an MMIO has F_SR_IN_CTX mask
  * @gvt: a GVT device
  * @offset: register offset
  *
  * Returns:
- * True if a MMIO has a in-context mask, false if it isn't.
+ * True if an MMIO has an F_SR_IN_CTX  mask, false if it isn't.
  *
  */
-static inline bool intel_gvt_mmio_is_in_ctx(
+static inline bool intel_gvt_mmio_is_sr_in_ctx(
                        struct intel_gvt *gvt, unsigned int offset)
 {
-       return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
 }
 
 /**
- * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * intel_gvt_mmio_set_sr_in_ctx -
+ *             mask an MMIO in GVT's mmio save-restore list and also
+ *             in hardware logical context image
  * @gvt: a GVT device
  * @offset: register offset
  *
  */
-static inline void intel_gvt_mmio_set_in_ctx(
+static inline void intel_gvt_mmio_set_sr_in_ctx(
                        struct intel_gvt *gvt, unsigned int offset)
 {
-       gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
 }
 
 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
index 05f3bc9..3be37e6 100644 (file)
@@ -1892,7 +1892,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        struct drm_i915_private *dev_priv = gvt->gt->i915;
        int ret;
 
-       MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+       MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
                intel_vgpu_reg_imr_handler);
 
        MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
@@ -1900,7 +1900,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
        MMIO_D(SDEISR, D_ALL);
 
-       MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
+
 
        MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
                gamw_echo_dev_rw_ia_write);
@@ -1927,11 +1928,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
        MMIO_D(GEN7_CXT_SIZE, D_ALL);
 
-       MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
-       MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
+       MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
+       MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
+       MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
+       MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
+       MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
 
        /* RING MODE */
 #define RING_REG(base) _MMIO((base) + 0x29c)
@@ -2686,7 +2687,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 
        MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+       MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
        MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2771,7 +2772,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
                intel_vgpu_reg_master_irq_handler);
 
-       MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+       MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
                mmio_read_from_hw, NULL);
 
 #define RING_REG(base) _MMIO((base) + 0xd0)
@@ -2785,7 +2786,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
 #undef RING_REG
 
 #define RING_REG(base) _MMIO((base) + 0x234)
-       MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+       MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
                NULL, NULL);
 #undef RING_REG
 
@@ -2820,7 +2821,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
-       MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
+       MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
 
        MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
@@ -2921,7 +2922,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+       MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
        MMIO_D(DC_STATE_EN, D_SKL_PLUS);
        MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
@@ -3137,7 +3138,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                 NULL, NULL);
 
-       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+       MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
 
        return 0;
@@ -3357,7 +3358,10 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
        gvt->mmio.mmio_attribute = NULL;
 }
 
-/* Special MMIO blocks. */
+/* Special MMIO blocks. registers in MMIO block ranges should not be command
+ * accessible (should have no F_CMD_ACCESS flag).
+ * otherwise, need to update cmd_reg_handler in cmd_parser.c
+ */
 static struct gvt_mmio_block mmio_blocks[] = {
        {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
        {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
index 2919936..b6811f6 100644 (file)
@@ -251,6 +251,9 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
                /* set the bit 0:2(Core C-State ) to C0 */
                vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
 
+               /* uc reset hw expect GS_MIA_IN_RESET */
+               vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+
                if (IS_BROXTON(vgpu->gvt->gt->i915)) {
                        vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
                                    ~(BIT(0) | BIT(1));
index 86a60bd..afe574d 100644 (file)
@@ -595,7 +595,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
             i915_mmio_reg_valid(mmio->reg); mmio++) {
                if (mmio->in_context) {
                        gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
-                       intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+                       intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
                }
        }
 }
index 5ac4a99..e889702 100644 (file)
@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                       struct drm_i915_gem_object *src_obj,
-                      u32 offset, u32 length)
+                      unsigned long offset, unsigned long length)
 {
        bool needs_clflush;
        void *dst, *src;
@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                }
        }
        if (IS_ERR(src)) {
+               unsigned long x, n;
                void *ptr;
-               int x, n;
 
                /*
                 * We can avoid clflushing partial cachelines before the write
@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                ptr = dst;
                x = offset_in_page(offset);
                for (n = offset >> PAGE_SHIFT; length; n++) {
-                       int len = min_t(int, length, PAGE_SIZE - x);
+                       int len = min(length, PAGE_SIZE - x);
 
                        src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
                        if (needs_clflush)
@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
  */
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
-                           u32 batch_offset,
-                           u32 batch_length,
+                           unsigned long batch_offset,
+                           unsigned long batch_length,
                            struct i915_vma *shadow,
                            bool trampoline)
 {
index 7842199..ea46916 100644 (file)
@@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m,
                }
                i915_gem_context_unlock_engines(ctx);
 
+               mutex_lock(&ctx->mutex);
                if (!IS_ERR_OR_NULL(ctx->file_priv)) {
                        struct file_stats stats = {
                                .vm = rcu_access_pointer(ctx->vm),
@@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m,
 
                        print_file_stats(m, name, stats);
                }
+               mutex_unlock(&ctx->mutex);
 
                spin_lock(&i915->gem.contexts.lock);
                list_safe_reset_next(ctx, cn, link);
index 00292a8..acc3206 100644 (file)
@@ -58,7 +58,6 @@
 #include "display/intel_hotplug.h"
 #include "display/intel_overlay.h"
 #include "display/intel_pipe_crc.h"
-#include "display/intel_psr.h"
 #include "display/intel_sprite.h"
 #include "display/intel_vga.h"
 
@@ -216,125 +215,6 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
                release_resource(&dev_priv->mch_res);
 }
 
-/* part #1: call before irq install */
-static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
-{
-       int ret;
-
-       if (i915_inject_probe_failure(i915))
-               return -ENODEV;
-
-       if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
-               ret = drm_vblank_init(&i915->drm,
-                                     INTEL_NUM_PIPES(i915));
-               if (ret)
-                       return ret;
-       }
-
-       intel_bios_init(i915);
-
-       ret = intel_vga_register(i915);
-       if (ret)
-               goto cleanup_bios;
-
-       intel_power_domains_init_hw(i915, false);
-
-       intel_csr_ucode_init(i915);
-
-       ret = intel_modeset_init_noirq(i915);
-       if (ret)
-               goto cleanup_vga_client_pw_domain_csr;
-
-       return 0;
-
-cleanup_vga_client_pw_domain_csr:
-       intel_csr_ucode_fini(i915);
-       intel_power_domains_driver_remove(i915);
-       intel_vga_unregister(i915);
-cleanup_bios:
-       intel_bios_driver_remove(i915);
-       return ret;
-}
-
-/* part #2: call after irq install */
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
-{
-       int ret;
-
-       /* Important: The output setup functions called by modeset_init need
-        * working irqs for e.g. gmbus and dp aux transfers. */
-       ret = intel_modeset_init(i915);
-       if (ret)
-               goto out;
-
-       ret = i915_gem_init(i915);
-       if (ret)
-               goto cleanup_modeset;
-
-       intel_overlay_setup(i915);
-
-       if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
-               return 0;
-
-       ret = intel_fbdev_init(&i915->drm);
-       if (ret)
-               goto cleanup_gem;
-
-       /* Only enable hotplug handling once the fbdev is fully set up. */
-       intel_hpd_init(i915);
-
-       intel_init_ipc(i915);
-
-       intel_psr_set_force_mode_changed(i915->psr.dp);
-
-       return 0;
-
-cleanup_gem:
-       i915_gem_suspend(i915);
-       i915_gem_driver_remove(i915);
-       i915_gem_driver_release(i915);
-cleanup_modeset:
-       /* FIXME */
-       intel_modeset_driver_remove(i915);
-       intel_irq_uninstall(i915);
-       intel_modeset_driver_remove_noirq(i915);
-out:
-       return ret;
-}
-
-/* part #1: call before irq uninstall */
-static void i915_driver_modeset_remove(struct drm_i915_private *i915)
-{
-       intel_modeset_driver_remove(i915);
-}
-
-/* part #2: call after irq uninstall */
-static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
-{
-       intel_csr_ucode_fini(i915);
-
-       intel_power_domains_driver_remove(i915);
-
-       intel_vga_unregister(i915);
-
-       intel_bios_driver_remove(i915);
-}
-
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
-{
-       /*
-        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
-        * CHV x1 PHY (DP/HDMI D)
-        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
-        */
-       if (IS_CHERRYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
-               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-       }
-}
-
 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 {
        /*
@@ -463,7 +343,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
        intel_detect_pch(dev_priv);
 
        intel_pm_setup(dev_priv);
-       intel_init_dpio(dev_priv);
        ret = intel_power_domains_init(dev_priv);
        if (ret < 0)
                goto err_gem;
@@ -798,7 +677,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
                drm_err(&dev_priv->drm,
                        "Failed to register driver for userspace access!\n");
 
-       if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
+       if (HAS_DISPLAY(dev_priv)) {
                /* Must be done after probing outputs */
                intel_opregion_register(dev_priv);
                acpi_video_register();
@@ -821,7 +700,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
         * We need to coordinate the hotplugs with the asynchronous fbdev
         * configuration, for which we use the fbdev->async_cookie.
         */
-       if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
+       if (HAS_DISPLAY(dev_priv))
                drm_kms_helper_poll_init(dev);
 
        intel_power_domains_enable(dev_priv);
@@ -988,7 +867,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret < 0)
                goto out_cleanup_mmio;
 
-       ret = i915_driver_modeset_probe_noirq(i915);
+       ret = intel_modeset_init_noirq(i915);
        if (ret < 0)
                goto out_cleanup_hw;
 
@@ -996,10 +875,18 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_cleanup_modeset;
 
-       ret = i915_driver_modeset_probe(i915);
-       if (ret < 0)
+       ret = intel_modeset_init_nogem(i915);
+       if (ret)
                goto out_cleanup_irq;
 
+       ret = i915_gem_init(i915);
+       if (ret)
+               goto out_cleanup_modeset2;
+
+       ret = intel_modeset_init(i915);
+       if (ret)
+               goto out_cleanup_gem;
+
        i915_driver_register(i915);
 
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1010,10 +897,20 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+out_cleanup_gem:
+       i915_gem_suspend(i915);
+       i915_gem_driver_remove(i915);
+       i915_gem_driver_release(i915);
+out_cleanup_modeset2:
+       /* FIXME clean up the error path */
+       intel_modeset_driver_remove(i915);
+       intel_irq_uninstall(i915);
+       intel_modeset_driver_remove_noirq(i915);
+       goto out_cleanup_modeset;
 out_cleanup_irq:
        intel_irq_uninstall(i915);
 out_cleanup_modeset:
-       i915_driver_modeset_remove_noirq(i915);
+       intel_modeset_driver_remove_nogem(i915);
 out_cleanup_hw:
        i915_driver_hw_remove(i915);
        intel_memory_regions_driver_release(i915);
@@ -1045,7 +942,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
        intel_gvt_driver_remove(i915);
 
-       i915_driver_modeset_remove(i915);
+       intel_modeset_driver_remove(i915);
 
        intel_irq_uninstall(i915);
 
@@ -1054,7 +951,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
        i915_reset_error_state(i915);
        i915_gem_driver_remove(i915);
 
-       i915_driver_modeset_remove_noirq(i915);
+       intel_modeset_driver_remove_nogem(i915);
 
        i915_driver_hw_remove(i915);
 
index ab17084..eef9a82 100644 (file)
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20200824"
-#define DRIVER_TIMESTAMP       1598293597
+#define DRIVER_DATE            "20200917"
+#define DRIVER_TIMESTAMP       1600375437
 
 struct drm_i915_gem_object;
 
-/*
- * The code assumes that the hpd_pins below have consecutive values and
- * starting with HPD_PORT_A, the HPD pin associated with any port can be
- * retrieved by adding the corresponding port (or phy) enum value to
- * HPD_PORT_A in most cases. For example:
- * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
- */
 enum hpd_pin {
        HPD_NONE = 0,
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
@@ -131,10 +124,12 @@ enum hpd_pin {
        HPD_PORT_C,
        HPD_PORT_D,
        HPD_PORT_E,
-       HPD_PORT_F,
-       HPD_PORT_G,
-       HPD_PORT_H,
-       HPD_PORT_I,
+       HPD_PORT_TC1,
+       HPD_PORT_TC2,
+       HPD_PORT_TC3,
+       HPD_PORT_TC4,
+       HPD_PORT_TC5,
+       HPD_PORT_TC6,
 
        HPD_NUM_PINS
 };
@@ -537,13 +532,9 @@ struct intel_gmbus {
 
 struct i915_suspend_saved_registers {
        u32 saveDSPARB;
-       u32 saveFBC_CONTROL;
-       u32 saveCACHE_MODE_0;
-       u32 saveMI_ARB_STATE;
        u32 saveSWF0[16];
        u32 saveSWF1[16];
        u32 saveSWF3[3];
-       u32 savePCH_PORT_HOTPLUG;
        u16 saveGCDGMBUS;
 };
 
@@ -1020,8 +1011,6 @@ struct drm_i915_private {
         */
        u8 active_pipes;
 
-       int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
-
        struct i915_wa_list gt_wa_list;
 
        struct i915_frontbuffer_tracking fb_tracking;
@@ -1572,12 +1561,41 @@ extern const struct i915_rev_steppings kbl_revids[];
 #define IS_EHL_REVID(p, since, until) \
        (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
 
-#define TGL_REVID_A0           0x0
-#define TGL_REVID_B0           0x1
-#define TGL_REVID_C0           0x2
+enum {
+       TGL_REVID_A0,
+       TGL_REVID_B0,
+       TGL_REVID_B1,
+       TGL_REVID_C0,
+       TGL_REVID_D0,
+};
+
+extern const struct i915_rev_steppings tgl_uy_revids[];
+extern const struct i915_rev_steppings tgl_revids[];
+
+static inline const struct i915_rev_steppings *
+tgl_revids_get(struct drm_i915_private *dev_priv)
+{
+       if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
+               return tgl_uy_revids;
+       else
+               return tgl_revids;
+}
+
+#define IS_TGL_DISP_REVID(p, since, until) \
+       (IS_TIGERLAKE(p) && \
+        tgl_revids_get(p)->disp_stepping >= (since) && \
+        tgl_revids_get(p)->disp_stepping <= (until))
+
+#define IS_TGL_UY_GT_REVID(p, since, until) \
+       ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
+        tgl_uy_revids->gt_stepping >= (since) && \
+        tgl_uy_revids->gt_stepping <= (until))
 
-#define IS_TGL_REVID(p, since, until) \
-       (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+#define IS_TGL_GT_REVID(p, since, until) \
+       (IS_TIGERLAKE(p) && \
+        !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
+        tgl_revids->gt_stepping >= (since) && \
+        tgl_revids->gt_stepping <= (until))
 
 #define RKL_REVID_A0           0x0
 #define RKL_REVID_B0           0x1
@@ -1931,8 +1949,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
-                           u32 batch_offset,
-                           u32 batch_length,
+                           unsigned long batch_offset,
+                           unsigned long batch_length,
                            struct i915_vma *shadow,
                            bool trampoline);
 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
index 3e6cbb0..a635ec8 100644 (file)
@@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c,
 
                if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
                        return -EIO;
+
+               cond_resched();
        } while (zstream->avail_in);
 
        /* Fallback to uncompressed if we increase size? */
@@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c,
        if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
                memcpy(ptr, src, PAGE_SIZE);
        dst->pages[dst->page_count++] = ptr;
+       cond_resched();
 
        return 0;
 }
index f113fe4..759f523 100644 (file)
@@ -132,40 +132,24 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
 };
 
 static const u32 hpd_gen11[HPD_NUM_PINS] = {
-       [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
-       [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
-       [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
-       [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
-};
-
-static const u32 hpd_gen12[HPD_NUM_PINS] = {
-       [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
-       [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
-       [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
-       [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
-       [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
-       [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
+       [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
+       [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
+       [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
+       [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
+       [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
+       [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
 };
 
 static const u32 hpd_icp[HPD_NUM_PINS] = {
        [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
        [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
-       [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
-       [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
-       [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
-       [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
-};
-
-static const u32 hpd_tgp[HPD_NUM_PINS] = {
-       [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
-       [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
        [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
-       [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
-       [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
-       [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
-       [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
-       [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
-       [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
+       [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+       [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+       [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+       [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+       [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+       [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
 };
 
 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
@@ -181,9 +165,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
                return;
        }
 
-       if (INTEL_GEN(dev_priv) >= 12)
-               hpd->hpd = hpd_gen12;
-       else if (INTEL_GEN(dev_priv) >= 11)
+       if (INTEL_GEN(dev_priv) >= 11)
                hpd->hpd = hpd_gen11;
        else if (IS_GEN9_LP(dev_priv))
                hpd->hpd = hpd_bxt;
@@ -197,9 +179,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
        if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
                return;
 
-       if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
-               hpd->pch_hpd = hpd_tgp;
-       else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+       if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
+           HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
                hpd->pch_hpd = hpd_icp;
        else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
                hpd->pch_hpd = hpd_spt;
@@ -1049,33 +1030,17 @@ out:
 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
        switch (pin) {
-       case HPD_PORT_C:
-               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
-       case HPD_PORT_D:
-               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
-       case HPD_PORT_E:
-               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
-       case HPD_PORT_F:
-               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
-       default:
-               return false;
-       }
-}
-
-static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
-       switch (pin) {
-       case HPD_PORT_D:
+       case HPD_PORT_TC1:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
-       case HPD_PORT_E:
+       case HPD_PORT_TC2:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
-       case HPD_PORT_F:
+       case HPD_PORT_TC3:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
-       case HPD_PORT_G:
+       case HPD_PORT_TC4:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
-       case HPD_PORT_H:
+       case HPD_PORT_TC5:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
-       case HPD_PORT_I:
+       case HPD_PORT_TC6:
                return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
        default:
                return false;
@@ -1113,33 +1078,17 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
        switch (pin) {
-       case HPD_PORT_C:
-               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
-       case HPD_PORT_D:
-               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
-       case HPD_PORT_E:
-               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
-       case HPD_PORT_F:
-               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
-       default:
-               return false;
-       }
-}
-
-static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
-       switch (pin) {
-       case HPD_PORT_D:
+       case HPD_PORT_TC1:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
-       case HPD_PORT_E:
+       case HPD_PORT_TC2:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
-       case HPD_PORT_F:
+       case HPD_PORT_TC3:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
-       case HPD_PORT_G:
+       case HPD_PORT_TC4:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
-       case HPD_PORT_H:
+       case HPD_PORT_TC5:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
-       case HPD_PORT_I:
+       case HPD_PORT_TC6:
                return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
        default:
                return false;
@@ -1893,19 +1842,16 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 {
        u32 ddi_hotplug_trigger, tc_hotplug_trigger;
        u32 pin_mask = 0, long_mask = 0;
-       bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
 
        if (HAS_PCH_TGP(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
-               tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
        } else if (HAS_PCH_JSP(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
                tc_hotplug_trigger = 0;
        } else if (HAS_PCH_MCC(dev_priv)) {
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
                tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
-               tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
        } else {
                drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
                         "Unrecognized PCH type 0x%x\n",
@@ -1913,7 +1859,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 
                ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
                tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
-               tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
        }
 
        if (ddi_hotplug_trigger) {
@@ -1937,7 +1882,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   tc_hotplug_trigger, dig_hotplug_reg,
                                   dev_priv->hotplug.pch_hpd,
-                                  tc_port_hotplug_long_detect);
+                                  icp_tc_port_hotplug_long_detect);
        }
 
        if (pin_mask)
@@ -2185,12 +2130,6 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
        u32 pin_mask = 0, long_mask = 0;
        u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
        u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
-       long_pulse_detect_func long_pulse_detect;
-
-       if (INTEL_GEN(dev_priv) >= 12)
-               long_pulse_detect = gen12_port_hotplug_long_detect;
-       else
-               long_pulse_detect = gen11_port_hotplug_long_detect;
 
        if (trigger_tc) {
                u32 dig_hotplug_reg;
@@ -2201,7 +2140,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   trigger_tc, dig_hotplug_reg,
                                   dev_priv->hotplug.hpd,
-                                  long_pulse_detect);
+                                  gen11_port_hotplug_long_detect);
        }
 
        if (trigger_tbt) {
@@ -2213,7 +2152,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   trigger_tbt, dig_hotplug_reg,
                                   dev_priv->hotplug.hpd,
-                                  long_pulse_detect);
+                                  gen11_port_hotplug_long_detect);
        }
 
        if (pin_mask)
@@ -3048,6 +2987,18 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
        return enabled_irqs;
 }
 
+static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+                                 const u32 hpd[HPD_NUM_PINS])
+{
+       struct intel_encoder *encoder;
+       u32 hotplug_irqs = 0;
+
+       for_each_intel_encoder(&dev_priv->drm, encoder)
+               hotplug_irqs |= hpd[encoder->hpd_pin];
+
+       return hotplug_irqs;
+}
+
 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug;
@@ -3077,50 +3028,50 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug_irqs, enabled_irqs;
 
-       if (HAS_PCH_IBX(dev_priv))
-               hotplug_irqs = SDE_HOTPLUG_MASK;
-       else
-               hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
        ibx_hpd_detection_setup(dev_priv);
 }
 
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
-                                   u32 ddi_hotplug_enable_mask,
-                                   u32 tc_hotplug_enable_mask)
+static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
+                                       u32 enable_mask)
 {
        u32 hotplug;
 
        hotplug = I915_READ(SHOTPLUG_CTL_DDI);
-       hotplug |= ddi_hotplug_enable_mask;
+       hotplug |= enable_mask;
        I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+}
 
-       if (tc_hotplug_enable_mask) {
-               hotplug = I915_READ(SHOTPLUG_CTL_TC);
-               hotplug |= tc_hotplug_enable_mask;
-               I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
-       }
+static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
+                                      u32 enable_mask)
+{
+       u32 hotplug;
+
+       hotplug = I915_READ(SHOTPLUG_CTL_TC);
+       hotplug |= enable_mask;
+       I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
 }
 
 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
-                             u32 sde_ddi_mask, u32 sde_tc_mask,
                              u32 ddi_enable_mask, u32 tc_enable_mask)
 {
        u32 hotplug_irqs, enabled_irqs;
 
-       hotplug_irqs = sde_ddi_mask | sde_tc_mask;
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
                I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
-       icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
+       icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
+       if (tc_enable_mask)
+               icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
 }
 
 /*
@@ -3130,7 +3081,6 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        icp_hpd_irq_setup(dev_priv,
-                         SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
                          ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
 }
 
@@ -3142,7 +3092,6 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        icp_hpd_irq_setup(dev_priv,
-                         SDE_DDI_MASK_TGP, 0,
                          TGP_DDI_HPD_ENABLE_MASK, 0);
 }
 
@@ -3154,14 +3103,18 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
-                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
        I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
 
        hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
        hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
                   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
-                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
        I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
 }
 
@@ -3171,7 +3124,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
        u32 val;
 
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-       hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 
        val = I915_READ(GEN11_DE_HPD_IMR);
        val &= ~hotplug_irqs;
@@ -3182,10 +3135,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
        gen11_hpd_detection_setup(dev_priv);
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
-               icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+               icp_hpd_irq_setup(dev_priv,
                                  TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-               icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+               icp_hpd_irq_setup(dev_priv,
                                  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
 }
 
@@ -3221,8 +3174,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
                I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
-       hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3249,22 +3202,13 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
        u32 hotplug_irqs, enabled_irqs;
 
-       if (INTEL_GEN(dev_priv) >= 8) {
-               hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 
+       if (INTEL_GEN(dev_priv) >= 8)
                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
-       } else if (INTEL_GEN(dev_priv) >= 7) {
-               hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-
-               ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
-       } else {
-               hotplug_irqs = DE_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-
+       else
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
-       }
 
        ilk_hpd_detection_setup(dev_priv);
 
@@ -3313,7 +3257,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
        u32 hotplug_irqs, enabled_irqs;
 
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-       hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+       hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 
        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3534,17 +3478,18 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
        gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
        I915_WRITE(SDEIMR, ~mask);
 
-       if (HAS_PCH_TGP(dev_priv))
-               icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
-                                       TGP_TC_HPD_ENABLE_MASK);
-       else if (HAS_PCH_JSP(dev_priv))
-               icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
-       else if (HAS_PCH_MCC(dev_priv))
-               icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
-                                       ICP_TC_HPD_ENABLE(PORT_TC1));
-       else
-               icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
-                                       ICP_TC_HPD_ENABLE_MASK);
+       if (HAS_PCH_TGP(dev_priv)) {
+               icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
+               icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
+       } else if (HAS_PCH_JSP(dev_priv)) {
+               icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
+       } else if (HAS_PCH_MCC(dev_priv)) {
+               icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
+               icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
+       } else {
+               icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
+               icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
+       }
 }
 
 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
index ac69192..d805d4d 100644 (file)
@@ -1382,7 +1382,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  DPIO_CMNRST                   (1 << 0)
 
 #define DPIO_PHY(pipe)                 ((pipe) >> 1)
-#define DPIO_PHY_IOSF_PORT(phy)                (dev_priv->dpio_phy_iosf_port[phy])
 
 /*
  * Per pipe/PLL DPIO regs
@@ -7760,32 +7759,20 @@ enum {
 #define GEN11_DE_HPD_IMR               _MMIO(0x44474)
 #define GEN11_DE_HPD_IIR               _MMIO(0x44478)
 #define GEN11_DE_HPD_IER               _MMIO(0x4447c)
-#define  GEN12_TC6_HOTPLUG                     (1 << 21)
-#define  GEN12_TC5_HOTPLUG                     (1 << 20)
-#define  GEN11_TC4_HOTPLUG                     (1 << 19)
-#define  GEN11_TC3_HOTPLUG                     (1 << 18)
-#define  GEN11_TC2_HOTPLUG                     (1 << 17)
-#define  GEN11_TC1_HOTPLUG                     (1 << 16)
 #define  GEN11_TC_HOTPLUG(tc_port)             (1 << ((tc_port) + 16))
-#define  GEN11_DE_TC_HOTPLUG_MASK              (GEN12_TC6_HOTPLUG | \
-                                                GEN12_TC5_HOTPLUG | \
-                                                GEN11_TC4_HOTPLUG | \
-                                                GEN11_TC3_HOTPLUG | \
-                                                GEN11_TC2_HOTPLUG | \
-                                                GEN11_TC1_HOTPLUG)
-#define  GEN12_TBT6_HOTPLUG                    (1 << 5)
-#define  GEN12_TBT5_HOTPLUG                    (1 << 4)
-#define  GEN11_TBT4_HOTPLUG                    (1 << 3)
-#define  GEN11_TBT3_HOTPLUG                    (1 << 2)
-#define  GEN11_TBT2_HOTPLUG                    (1 << 1)
-#define  GEN11_TBT1_HOTPLUG                    (1 << 0)
+#define  GEN11_DE_TC_HOTPLUG_MASK              (GEN11_TC_HOTPLUG(PORT_TC6) | \
+                                                GEN11_TC_HOTPLUG(PORT_TC5) | \
+                                                GEN11_TC_HOTPLUG(PORT_TC4) | \
+                                                GEN11_TC_HOTPLUG(PORT_TC3) | \
+                                                GEN11_TC_HOTPLUG(PORT_TC2) | \
+                                                GEN11_TC_HOTPLUG(PORT_TC1))
 #define  GEN11_TBT_HOTPLUG(tc_port)            (1 << (tc_port))
-#define  GEN11_DE_TBT_HOTPLUG_MASK             (GEN12_TBT6_HOTPLUG | \
-                                                GEN12_TBT5_HOTPLUG | \
-                                                GEN11_TBT4_HOTPLUG | \
-                                                GEN11_TBT3_HOTPLUG | \
-                                                GEN11_TBT2_HOTPLUG | \
-                                                GEN11_TBT1_HOTPLUG)
+#define  GEN11_DE_TBT_HOTPLUG_MASK             (GEN11_TBT_HOTPLUG(PORT_TC6) | \
+                                                GEN11_TBT_HOTPLUG(PORT_TC5) | \
+                                                GEN11_TBT_HOTPLUG(PORT_TC4) | \
+                                                GEN11_TBT_HOTPLUG(PORT_TC3) | \
+                                                GEN11_TBT_HOTPLUG(PORT_TC2) | \
+                                                GEN11_TBT_HOTPLUG(PORT_TC1))
 
 #define GEN11_TBT_HOTPLUG_CTL                          _MMIO(0x44030)
 #define GEN11_TC_HOTPLUG_CTL                           _MMIO(0x44038)
@@ -9315,6 +9302,7 @@ enum {
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC      (1 << 7)
 
 #define GEN10_SAMPLER_MODE             _MMIO(0xE18C)
+#define   ENABLE_SMALLPL                       REG_BIT(15)
 #define   GEN11_SAMPLER_ENABLE_HEADLESS_MSG    REG_BIT(5)
 
 /* IVYBRIDGE DPF */
index 11e2724..0e81381 100644 (file)
@@ -542,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request)
        if (i915_request_completed(request))
                goto xfer;
 
+       if (unlikely(intel_context_is_closed(request->context) &&
+                    !intel_engine_has_heartbeat(engine)))
+               intel_context_set_banned(request->context);
+
        if (unlikely(intel_context_is_banned(request->context)))
                i915_request_set_error_once(request, -EIO);
+
        if (unlikely(fatal_error(request->fence.error)))
                __i915_request_skip(request);
 
@@ -593,16 +598,8 @@ xfer:
        __notify_execute_cb_irq(request);
 
        /* We may be recursing from the signal callback of another i915 fence */
-       if (!i915_request_signaled(request)) {
-               spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
-               if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
-                            &request->fence.flags) &&
-                   !i915_request_enable_breadcrumb(request))
-                       intel_engine_signal_breadcrumbs(engine);
-
-               spin_unlock(&request->lock);
-       }
+       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+               i915_request_enable_breadcrumb(request);
 
        return result;
 }
index ed2be34..7b64e71 100644 (file)
 
 static void i915_save_display(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+
        /* Display arbitration control */
        if (INTEL_GEN(dev_priv) <= 4)
                dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
-       /* save FBC interval */
-       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
-               dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+       if (IS_GEN(dev_priv, 4))
+               pci_read_config_word(pdev, GCDGMBUS,
+                                    &dev_priv->regfile.saveGCDGMBUS);
 }
 
 static void i915_restore_display(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+
+       if (IS_GEN(dev_priv, 4))
+               pci_write_config_word(pdev, GCDGMBUS,
+                                     dev_priv->regfile.saveGCDGMBUS);
+
        /* Display arbitration */
        if (INTEL_GEN(dev_priv) <= 4)
                I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
@@ -52,31 +60,17 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
        /* only restore FBC info on the platform that supports FBC*/
        intel_fbc_global_disable(dev_priv);
 
-       /* restore FBC interval */
-       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
-               I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
-
        intel_vga_redisable(dev_priv);
+
+       intel_gmbus_reset(dev_priv);
 }
 
 int i915_save_state(struct drm_i915_private *dev_priv)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
        i915_save_display(dev_priv);
 
-       if (IS_GEN(dev_priv, 4))
-               pci_read_config_word(pdev, GCDGMBUS,
-                                    &dev_priv->regfile.saveGCDGMBUS);
-
-       /* Cache mode state */
-       if (INTEL_GEN(dev_priv) < 7)
-               dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
-       /* Memory Arbitration state */
-       dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
        /* Scratch space */
        if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
@@ -102,22 +96,10 @@ int i915_save_state(struct drm_i915_private *dev_priv)
 
 int i915_restore_state(struct drm_i915_private *dev_priv)
 {
-       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
-       if (IS_GEN(dev_priv, 4))
-               pci_write_config_word(pdev, GCDGMBUS,
-                                     dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev_priv);
 
-       /* Cache mode state */
-       if (INTEL_GEN(dev_priv) < 7)
-               I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
-                          0xffff0000);
-
-       /* Memory arbitration state */
-       I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
-
        /* Scratch space */
        if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
                for (i = 0; i < 7; i++) {
@@ -138,7 +120,5 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
                        I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
        }
 
-       intel_gmbus_reset(dev_priv);
-
        return 0;
 }
index 495d28f..ffb5287 100644 (file)
@@ -892,9 +892,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 
                /* Allocate enough page directories to used PTE */
                if (vma->vm->allocate_va_range) {
-                       i915_vm_alloc_pt_stash(vma->vm,
-                                              &work->stash,
-                                              vma->size);
+                       err = i915_vm_alloc_pt_stash(vma->vm,
+                                                    &work->stash,
+                                                    vma->size);
+                       if (err)
+                               goto err_fence;
 
                        err = i915_vm_pin_pt_stash(vma->vm,
                                                   &work->stash);
index e2aa5bc..adc836f 100644 (file)
@@ -516,6 +516,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                                            S32_MAX),
                                USEC_PER_SEC));
        }
+
+       if (!HAS_DISPLAY(dev_priv)) {
+               dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
+                                                  DRIVER_ATOMIC);
+               memset(&info->display, 0, sizeof(info->display));
+               memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
+               memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
+       }
 }
 
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
index b4bd192..34e0d22 100644 (file)
@@ -7136,7 +7136,7 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
                   I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
 
        /* Wa_1409825376:tgl (pre-prod)*/
-       if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+       if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
                I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
                           TGL_VRH_GATING_DIS);
 
index 916ccd1..5b32792 100644 (file)
@@ -231,9 +231,21 @@ void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
                        SB_CRWRDA_NP, reg, &val);
 }
 
+static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
+{
+       /*
+        * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+        * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+        */
+       if (IS_CHERRYVIEW(i915))
+               return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
+       else
+               return IOSF_PORT_DPIO;
+}
+
 u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
 {
-       int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+       u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
        u32 val = 0;
 
        vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
@@ -252,7 +264,7 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
 void vlv_dpio_write(struct drm_i915_private *i915,
                    enum pipe pipe, int reg, u32 val)
 {
-       int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+       u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
 
        vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
 }
index 8d5a933..263ffcb 100644 (file)
@@ -1993,13 +1993,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
                                 unsigned int slow_timeout_ms,
                                 u32 *out_value)
 {
-       u32 reg_value;
+       u32 reg_value = 0;
 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
        int ret;
 
        /* Catch any overuse of this function */
        might_sleep_if(slow_timeout_ms);
        GEM_BUG_ON(fast_timeout_us > 20000);
+       GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
 
        ret = -ETIMEDOUT;
        if (fast_timeout_us && fast_timeout_us <= 20000)
index c207d22..b6c42fd 100644 (file)
@@ -116,11 +116,11 @@ static struct dev_pm_domain pm_domain = {
 
 struct drm_i915_private *mock_gem_device(void)
 {
-       struct drm_i915_private *i915;
-       struct pci_dev *pdev;
 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
-       struct dev_iommu iommu;
+       static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
 #endif
+       struct drm_i915_private *i915;
+       struct pci_dev *pdev;
 
        pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
@@ -132,10 +132,8 @@ struct drm_i915_private *mock_gem_device(void)
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 
 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
-       /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
-       memset(&iommu, 0, sizeof(iommu));
-       iommu.priv = (void *)-1;
-       pdev->dev.iommu = &iommu;
+       /* HACK to disable iommu for the fake device; force identity mapping */
+       pdev->dev.iommu = &fake_iommu;
 #endif
        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
                put_device(&pdev->dev);
index 155f2b4..11223fe 100644 (file)
@@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                return ret;
 
        if (bo->base.sgt) {
-               dma_unmap_sg(dev, bo->base.sgt->sgl,
-                            bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+               dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
                sg_free_table(bo->base.sgt);
        } else {
                bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
@@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                }
        }
 
-       dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+       ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret) {
+               sg_free_table(&sgt);
+               kfree(bo->base.sgt);
+               bo->base.sgt = NULL;
+               return ret;
+       }
 
        *bo->base.sgt = sgt;
 
index 5b92fb8..2b2739a 100644 (file)
@@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
        if (err)
                goto err_out1;
 
-       for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+       for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
                err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
                                       bo_va->node.start + offset);
                if (err)
@@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
        mutex_lock(&vm->lock);
 
        base = bo_va->node.start + (pageoff << PAGE_SHIFT);
-       for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
-                            bo->base.sgt->nents, pageoff) {
+       for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
                err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
                                       base + offset);
                if (err)
index aa74aac..65cd03a 100644 (file)
@@ -24,6 +24,6 @@ config DRM_MEDIATEK_HDMI
        tristate "DRM HDMI Support for Mediatek SoCs"
        depends on DRM_MEDIATEK
        select SND_SOC_HDMI_CODEC if SND_SOC
-       select GENERIC_PHY
+       select PHY_MTK_HDMI
        help
          DRM/KMS HDMI driver for Mediatek SoCs
index b7a82ed..77b0fd8 100644 (file)
@@ -19,9 +19,6 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
 
 mediatek-drm-hdmi-objs := mtk_cec.o \
                          mtk_hdmi.o \
-                         mtk_hdmi_ddc.o \
-                         mtk_mt2701_hdmi_phy.o \
-                         mtk_mt8173_hdmi_phy.o \
-                         mtk_hdmi_phy.o
+                         mtk_hdmi_ddc.o
 
 obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
index d4f0fb7..cf11c48 100644 (file)
@@ -64,7 +64,8 @@ enum mtk_dpi_out_color_format {
 struct mtk_dpi {
        struct mtk_ddp_comp ddp_comp;
        struct drm_encoder encoder;
-       struct drm_bridge *bridge;
+       struct drm_bridge bridge;
+       struct drm_bridge *next_bridge;
        void __iomem *regs;
        struct device *dev;
        struct clk *engine_clk;
@@ -83,9 +84,9 @@ struct mtk_dpi {
        int refcount;
 };
 
-static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
+static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b)
 {
-       return container_of(e, struct mtk_dpi, encoder);
+       return container_of(b, struct mtk_dpi, bridge);
 }
 
 enum mtk_dpi_polarity {
@@ -521,50 +522,53 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        return 0;
 }
 
-static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
-                                      const struct drm_display_mode *mode,
-                                      struct drm_display_mode *adjusted_mode)
+static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
 {
-       return true;
+       drm_encoder_cleanup(encoder);
 }
 
-static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode,
-                                    struct drm_display_mode *adjusted_mode)
+static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
+       .destroy = mtk_dpi_encoder_destroy,
+};
+
+static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+       return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+                                &dpi->bridge, flags);
+}
+
+static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge,
+                               const struct drm_display_mode *mode,
+                               const struct drm_display_mode *adjusted_mode)
+{
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        drm_mode_copy(&dpi->mode, adjusted_mode);
 }
 
-static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        mtk_dpi_power_off(dpi);
 }
 
-static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        mtk_dpi_power_on(dpi);
        mtk_dpi_set_display_mode(dpi, &dpi->mode);
 }
 
-static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
-                               struct drm_crtc_state *crtc_state,
-                               struct drm_connector_state *conn_state)
-{
-       return 0;
-}
-
-static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = {
-       .mode_fixup = mtk_dpi_encoder_mode_fixup,
-       .mode_set = mtk_dpi_encoder_mode_set,
-       .disable = mtk_dpi_encoder_disable,
-       .enable = mtk_dpi_encoder_enable,
-       .atomic_check = mtk_dpi_atomic_check,
+static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
+       .attach = mtk_dpi_bridge_attach,
+       .mode_set = mtk_dpi_bridge_mode_set,
+       .disable = mtk_dpi_bridge_disable,
+       .enable = mtk_dpi_bridge_enable,
 };
 
 static void mtk_dpi_start(struct mtk_ddp_comp *comp)
@@ -605,12 +609,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
                dev_err(dev, "Failed to initialize decoder: %d\n", ret);
                goto err_unregister;
        }
-       drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs);
 
-       /* Currently DPI0 is fixed to be driven by OVL1 */
-       dpi->encoder.possible_crtcs = BIT(1);
+       dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
 
-       ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
+       ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
        if (ret) {
                dev_err(dev, "Failed to attach bridge: %d\n", ret);
                goto err_cleanup;
@@ -770,11 +772,11 @@ static int mtk_dpi_probe(struct platform_device *pdev)
        }
 
        ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
-                                         NULL, &dpi->bridge);
+                                         NULL, &dpi->next_bridge);
        if (ret)
                return ret;
 
-       dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node);
+       dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
 
        comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
        if (comp_id < 0) {
@@ -791,8 +793,15 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dpi);
 
+       dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
+       dpi->bridge.of_node = dev->of_node;
+       dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
+
+       drm_bridge_add(&dpi->bridge);
+
        ret = component_add(dev, &mtk_dpi_component_ops);
        if (ret) {
+               drm_bridge_remove(&dpi->bridge);
                dev_err(dev, "Failed to add component: %d\n", ret);
                return ret;
        }
@@ -802,7 +811,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 
 static int mtk_dpi_remove(struct platform_device *pdev)
 {
+       struct mtk_dpi *dpi = platform_get_drvdata(pdev);
+
        component_del(&pdev->dev, &mtk_dpi_component_ops);
+       drm_bridge_remove(&dpi->bridge);
 
        return 0;
 }
index 57c88de..bfd42ae 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
+#include <drm/drm_print.h>
+
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
@@ -412,6 +414,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
        [DDP_COMPONENT_WDMA1]   = { MTK_DISP_WDMA,      1, NULL },
 };
 
+static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+                                    const enum mtk_ddp_comp_id *path,
+                                    unsigned int path_len)
+{
+       unsigned int i;
+
+       if (path == NULL)
+               return false;
+
+       for (i = 0U; i < path_len; i++)
+               if (ddp_comp.id == path[i])
+                       return true;
+
+       return false;
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
                        enum mtk_ddp_comp_type comp_type)
 {
@@ -427,6 +445,26 @@ int mtk_ddp_comp_get_id(struct device_node *node,
        return -EINVAL;
 }
 
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+                                               struct mtk_ddp_comp ddp_comp)
+{
+       struct mtk_drm_private *private = drm->dev_private;
+       unsigned int ret = 0;
+
+       if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+               ret = BIT(0);
+       else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
+                                         private->data->ext_len))
+               ret = BIT(1);
+       else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
+                                         private->data->third_len))
+               ret = BIT(2);
+       else
+               DRM_INFO("Failed to find comp in ddp table\n");
+
+       return ret;
+}
+
 int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
                      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
                      const struct mtk_ddp_comp_funcs *funcs)
index debe363..1d9e00b 100644 (file)
@@ -202,6 +202,8 @@ static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
 
 int mtk_ddp_comp_get_id(struct device_node *node,
                        enum mtk_ddp_comp_type comp_type);
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+                                               struct mtk_ddp_comp ddp_comp);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
                      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
                      const struct mtk_ddp_comp_funcs *funcs);
index 040a8f3..2350e32 100644 (file)
@@ -74,6 +74,19 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
        DDP_COMPONENT_DPI0,
 };
 
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = {
+       DDP_COMPONENT_OVL0,
+       DDP_COMPONENT_RDMA0,
+       DDP_COMPONENT_COLOR0,
+       DDP_COMPONENT_BLS,
+       DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = {
+       DDP_COMPONENT_RDMA1,
+       DDP_COMPONENT_DSI0,
+};
+
 static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
        DDP_COMPONENT_OVL0,
        DDP_COMPONENT_COLOR0,
@@ -127,6 +140,14 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
        .shadow_register = true,
 };
 
+static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
+       .main_path = mt7623_mtk_ddp_main,
+       .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
+       .ext_path = mt7623_mtk_ddp_ext,
+       .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext),
+       .shadow_register = true,
+};
+
 static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
        .main_path = mt2712_mtk_ddp_main,
        .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
@@ -422,6 +443,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
 static const struct of_device_id mtk_drm_of_ids[] = {
        { .compatible = "mediatek,mt2701-mmsys",
          .data = &mt2701_mmsys_driver_data},
+       { .compatible = "mediatek,mt7623-mmsys",
+         .data = &mt7623_mmsys_driver_data},
        { .compatible = "mediatek,mt2712-mmsys",
          .data = &mt2712_mmsys_driver_data},
        { .compatible = "mediatek,mt8173-mmsys",
index 6190cc3..0583e55 100644 (file)
@@ -212,46 +212,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
                        struct dma_buf_attachment *attach, struct sg_table *sg)
 {
        struct mtk_drm_gem_obj *mtk_gem;
-       int ret;
-       struct scatterlist *s;
-       unsigned int i;
-       dma_addr_t expected;
 
-       mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+       /* check if the entries in the sg_table are contiguous */
+       if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
+               DRM_ERROR("sg_table is not contiguous");
+               return ERR_PTR(-EINVAL);
+       }
 
+       mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
        if (IS_ERR(mtk_gem))
                return ERR_CAST(mtk_gem);
 
-       expected = sg_dma_address(sg->sgl);
-       for_each_sg(sg->sgl, s, sg->nents, i) {
-               if (!sg_dma_len(s))
-                       break;
-
-               if (sg_dma_address(s) != expected) {
-                       DRM_ERROR("sg_table is not contiguous");
-                       ret = -EINVAL;
-                       goto err_gem_free;
-               }
-               expected = sg_dma_address(s) + sg_dma_len(s);
-       }
-
        mtk_gem->dma_addr = sg_dma_address(sg->sgl);
        mtk_gem->sg = sg;
 
        return &mtk_gem->base;
-
-err_gem_free:
-       kfree(mtk_gem);
-       return ERR_PTR(ret);
 }
 
 void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
        struct sg_table *sgt;
-       struct sg_page_iter iter;
        unsigned int npages;
-       unsigned int i = 0;
 
        if (mtk_gem->kvaddr)
                return mtk_gem->kvaddr;
@@ -265,11 +247,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
        if (!mtk_gem->pages)
                goto out;
 
-       for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
-               mtk_gem->pages[i++] = sg_page_iter_page(&iter);
-               if (i > npages)
-                       break;
-       }
+       drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+
        mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
                               pgprot_writecombine(PAGE_KERNEL));
 
index 16fd99d..20f3489 100644 (file)
@@ -970,11 +970,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
                return ret;
        }
 
-       /*
-        * Currently display data paths are statically assigned to a crtc each.
-        * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
-        */
-       dsi->encoder.possible_crtcs = 1;
+       dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
 
        ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
                                DRM_BRIDGE_ATTACH_NO_CONNECTOR);
index f2e9b42..0ed7b0b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of_platform.h>
 #include <linux/of.h>
@@ -145,11 +146,16 @@ struct hdmi_audio_param {
        struct hdmi_codec_params codec_params;
 };
 
+struct mtk_hdmi_conf {
+       bool tz_disabled;
+};
+
 struct mtk_hdmi {
        struct drm_bridge bridge;
        struct drm_bridge *next_bridge;
        struct drm_connector conn;
        struct device *dev;
+       const struct mtk_hdmi_conf *conf;
        struct phy *phy;
        struct device *cec_dev;
        struct i2c_adapter *ddc_adpt;
@@ -234,7 +240,6 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
 static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
 {
        struct arm_smccc_res res;
-       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy);
 
        /*
         * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
@@ -242,7 +247,7 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
         * The ARM trusted firmware provides an API for the HDMI driver to set
         * this control bit to enable HDMI output in supervisor mode.
         */
-       if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled)
+       if (hdmi->conf && hdmi->conf->tz_disabled)
                regmap_update_bits(hdmi->sys_regmap,
                                   hdmi->sys_offset + HDMI_SYS_CFG20,
                                   0x80008005, enable ? 0x80000005 : 0x8000);
@@ -1723,6 +1728,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        hdmi->dev = dev;
+       hdmi->conf = of_device_get_match_data(dev);
 
        ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
        if (ret)
@@ -1803,8 +1809,16 @@ static int mtk_hdmi_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
                         mtk_hdmi_suspend, mtk_hdmi_resume);
 
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
+       .tz_disabled = true,
+};
+
 static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
-       { .compatible = "mediatek,mt8173-hdmi", },
+       { .compatible = "mediatek,mt2701-hdmi",
+         .data = &mtk_hdmi_conf_mt2701,
+       },
+       { .compatible = "mediatek,mt8173-hdmi",
+       },
        {}
 };
 
@@ -1819,7 +1833,6 @@ static struct platform_driver mtk_hdmi_driver = {
 };
 
 static struct platform_driver * const mtk_hdmi_drivers[] = {
-       &mtk_hdmi_phy_driver,
        &mtk_hdmi_ddc_driver,
        &mtk_cec_driver,
        &mtk_hdmi_driver,
index bb3653d..472bf14 100644 (file)
@@ -5,7 +5,6 @@
  */
 #ifndef _MTK_HDMI_CTRL_H
 #define _MTK_HDMI_CTRL_H
-#include "mtk_hdmi_phy.h"
 
 struct platform_driver;
 
index 6deaa7d..e5816b4 100644 (file)
@@ -6,8 +6,8 @@ config DRM_MSM
        depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
-       depends on INTERCONNECT || !INTERCONNECT
        depends on QCOM_OCMEM || QCOM_OCMEM=n
+       select IOMMU_IO_PGTABLE
        select QCOM_MDT_LOADER if ARCH_QCOM
        select REGULATOR
        select DRM_KMS_HELPER
@@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP
        help
          Choose this option to enable HDCP state machine
 
+config DRM_MSM_DP
+       bool "Enable DisplayPort support in MSM DRM driver"
+       depends on DRM_MSM
+       default y
+       help
+         Compile in support for DP driver in MSM DRM driver. DP external
+         display support is enabled through this config option. It can
+         be primary or secondary display on device.
+
 config DRM_MSM_DSI
        bool "Enable DSI support in MSM DRM driver"
        depends on DRM_MSM
@@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY
        default y
        help
          Choose this option if DSI PHY on SDM845 is used on the platform.
+
+config DRM_MSM_DSI_7NM_PHY
+       bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
+       depends on DRM_MSM_DSI
+       default y
+       help
+         Choose this option if DSI PHY on SM8150/SM8250 is used on the
+         platform.
index 42f8aae..340682c 100644 (file)
@@ -2,6 +2,7 @@
 ccflags-y := -I $(srctree)/$(src)
 ccflags-y += -I $(srctree)/$(src)/disp/dpu1
 ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
 
 msm-y := \
        adreno/adreno_device.o \
@@ -95,10 +96,23 @@ msm-y := \
        msm_gpu_tracepoints.o \
        msm_gpummu.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+       dp/dp_debug.o
 
 msm-$(CONFIG_DRM_MSM_GPU_STATE)        += adreno/a6xx_gpu_state.o
 
+msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
+       dp/dp_catalog.o \
+       dp/dp_ctrl.o \
+       dp/dp_display.o \
+       dp/dp_drm.o \
+       dp/dp_hpd.o \
+       dp/dp_link.o \
+       dp/dp_panel.o \
+       dp/dp_parser.o \
+       dp/dp_power.o \
+       dp/dp_audio.o
+
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
 msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
@@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
 
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
 msm-y += dsi/pll/dsi_pll.o
@@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
 endif
 
 obj-$(CONFIG_DRM_MSM)  += msm.o
index 48fa49f..7e82c41 100644 (file)
@@ -10,6 +10,48 @@ extern bool hang_debug;
 static void a2xx_dump(struct msm_gpu *gpu);
 static bool a2xx_idle(struct msm_gpu *gpu);
 
+static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS);
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+       OUT_PKT3(ring, CP_INTERRUPT, 1);
+       OUT_RING(ring, 0x80000000);
+
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
 static bool a2xx_me_init(struct msm_gpu *gpu)
 {
        struct msm_ringbuffer *ring = gpu->rb[0];
@@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
        OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
        OUT_RING(ring, 1);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
        return a2xx_idle(gpu);
 }
 
@@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
        return aspace;
 }
 
-/* Register offset defines for A2XX - copy of A3XX */
-static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
 
 static const struct adreno_gpu_funcs funcs = {
        .base = {
@@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = msm_gpu_pm_suspend,
                .pm_resume = msm_gpu_pm_resume,
                .recover = a2xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a2xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a2xx_irq,
                .destroy = a2xx_destroy,
@@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a2xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = a2xx_create_address_space,
+               .get_rptr = a2xx_get_rptr,
        },
 };
 
@@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
        else
                adreno_gpu->registers = a220_registers;
 
-       adreno_gpu->reg_offsets = a2xx_register_offsets;
-
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
                goto fail;
index f647114..f29c77d 100644 (file)
@@ -28,6 +28,61 @@ extern bool hang_debug;
 static void a3xx_dump(struct msm_gpu *gpu);
 static bool a3xx_idle(struct msm_gpu *gpu);
 
+static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* Flush HLSQ lazy updates to make sure there is nothing
+        * pending for indirect loads after the timestamp has
+        * passed:
+        */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, HLSQ_FLUSH);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+
+#if 0
+       /* Dummy set-constant to trigger context rollover */
+       OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+       OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+       OUT_RING(ring, 0x00000000);
+#endif
+
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
 static bool a3xx_me_init(struct msm_gpu *gpu)
 {
        struct msm_ringbuffer *ring = gpu->rb[0];
@@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
        return a3xx_idle(gpu);
 }
 
@@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
        return state;
 }
 
-/* Register offset defines for A3XX */
-static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
 
 static const struct adreno_gpu_funcs funcs = {
        .base = {
@@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = msm_gpu_pm_suspend,
                .pm_resume = msm_gpu_pm_resume,
                .recover = a3xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a3xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a3xx_irq,
                .destroy = a3xx_destroy,
@@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a3xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a3xx_get_rptr,
        },
 };
 
@@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
 
        adreno_gpu->registers = a3xx_registers;
-       adreno_gpu->reg_offsets = a3xx_register_offsets;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
index 9547536..2b93b33 100644 (file)
@@ -22,6 +22,54 @@ extern bool hang_debug;
 static void a4xx_dump(struct msm_gpu *gpu);
 static bool a4xx_idle(struct msm_gpu *gpu);
 
+static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* Flush HLSQ lazy updates to make sure there is nothing
+        * pending for indirect loads after the timestamp has
+        * passed:
+        */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, HLSQ_FLUSH);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+
+       adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+}
+
 /*
  * a4xx_enable_hwcg() - Program the clock control registers
  * @device: The adreno device pointer
@@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
        return a4xx_idle(gpu);
 }
 
@@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
        return state;
 }
 
-/* Register offset defines for A4XX, in order of enum adreno_regs */
-static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
-};
-
 static void a4xx_dump(struct msm_gpu *gpu)
 {
        printk("status:   %08x\n",
@@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        return 0;
 }
 
+static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = a4xx_pm_suspend,
                .pm_resume = a4xx_pm_resume,
                .recover = a4xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a4xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a4xx_irq,
                .destroy = a4xx_destroy,
@@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a4xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a4xx_get_rptr,
        },
        .get_timestamp = a4xx_get_timestamp,
 };
@@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 
        adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
                                                             a4xx_registers;
-       adreno_gpu->reg_offsets = a4xx_register_offsets;
 
        /* if needed, allocate gmem: */
-       if (adreno_is_a4xx(adreno_gpu)) {
-               ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
-                                           &a4xx_gpu->ocmem);
-               if (ret)
-                       goto fail;
-       }
+       ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+                                   &a4xx_gpu->ocmem);
+       if (ret)
+               goto fail;
 
        if (!gpu->aspace) {
                /* TODO we think it is possible to configure the GPU to
index 68eddac..fc2c905 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "a5xx_gpu.h"
 
-static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
        }
-
-       return 0;
 }
 
-static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
        }
-
-       return 0;
 }
 
-static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
        }
-
-       return 0;
 }
 
-static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x %08x %08x %08x\n", i,
                        val[0], val[1], val[2], val[3]);
        }
-
-       return 0;
 }
 
 static int show(struct seq_file *m, void *arg)
@@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg)
        struct drm_device *dev = node->minor->dev;
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_printer p = drm_seq_file_printer(m);
-       int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+       void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
                node->info_ent->data;
 
-       return show(priv->gpu, &p);
+       show(priv->gpu, &p);
+       return 0;
 }
 
 #define ENT(n) { .name = #n, .show = show, .data = n ##_print }
index 91726da..d6804a8 100644 (file)
@@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu);
 
 #define GPU_PAS_ID 13
 
-static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+               bool sync)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
 
+       /*
+        * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+        * the rptr shadow
+        */
+       if (a5xx_gpu->has_whereami && sync) {
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+       }
+
        spin_lock_irqsave(&ring->lock, flags);
 
        /* Copy the shadow to the actual register */
@@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
                gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
 }
 
-static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct msm_drm_private *priv = gpu->dev->dev_private;
        struct msm_ringbuffer *ring = submit->ring;
@@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                }
        }
 
-       a5xx_flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        a5xx_preempt_trigger(gpu);
 
        /* we might not necessarily have a cmd from userspace to
@@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
        msm_gpu_retire(gpu);
 }
 
-static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
                priv->lastctx = NULL;
-               a5xx_submit_in_rb(gpu, submit, ctx);
+               a5xx_submit_in_rb(gpu, submit);
                return;
        }
 
@@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        /* Set bit 0 to trigger an interrupt on preempt complete */
        OUT_RING(ring, 0x01);
 
-       a5xx_flush(gpu, ring);
+       /* A WHERE_AM_I packet is not needed after a YIELD */
+       a5xx_flush(gpu, ring, false);
 
        /* Check to see if we need to start preemption */
        a5xx_preempt_trigger(gpu);
@@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
@@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
        OUT_RING(ring, 0x01);
        OUT_RING(ring, 0x01);
 
-       gpu->funcs->flush(gpu, ring);
+       /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+       a5xx_flush(gpu, ring, false);
 
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
+static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+               struct drm_gem_object *obj)
+{
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+
+       if (IS_ERR(buf))
+               return;
+
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a5xx_gpu->has_whereami = true;
+
+       msm_gem_put_vaddr(obj);
+}
+
 static int a5xx_ucode_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+               a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
 
        gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 static int a5xx_hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        int ret;
 
        gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
 
+       /*
+        * If the microcode supports the WHERE_AM_I opcode then we can use that
+        * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+        * can't safely use the RPTR shadow or preemption. In either case, the
+        * RPTR shadow should be disabled in hardware.
+        */
        gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
                MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
 
+       /* Disable preemption if WHERE_AM_I isn't available */
+       if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
+               a5xx_preempt_fini(gpu);
+               gpu->nr_rings = 1;
+       } else {
+               /* Create a privileged buffer for the RPTR shadow */
+               if (!a5xx_gpu->shadow_bo) {
+                       a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a5xx_gpu->shadow_bo,
+                               &a5xx_gpu->shadow_iova);
+
+                       if (IS_ERR(a5xx_gpu->shadow))
+                               return PTR_ERR(a5xx_gpu->shadow);
+               }
+
+               gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+                       REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+       }
+
        a5xx_preempt_hw_init(gpu);
 
        /* Disable the interrupts through the initial bringup stage */
@@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
                OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
 
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        }
@@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
                OUT_RING(gpu->rb[0], 0x00000000);
 
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        } else if (ret == -ENODEV) {
@@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu)
                drm_gem_object_put(a5xx_gpu->gpmu_bo);
        }
 
+       if (a5xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a5xx_gpu->shadow_bo);
+       }
+
        adreno_gpu_cleanup(adreno_gpu);
        kfree(a5xx_gpu);
 }
@@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
        return IRQ_HANDLED;
 }
 
-static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A5XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
-};
-
 static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
@@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+       if (a5xx_gpu->has_whereami)
+               return a5xx_gpu->shadow[ring->id];
+
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = a5xx_pm_resume,
                .recover = a5xx_recover,
                .submit = a5xx_submit,
-               .flush = a5xx_flush,
                .active_ring = a5xx_active_ring,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
@@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a5xx_gpu_state_get,
                .gpu_state_put = a5xx_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a5xx_get_rptr,
        },
        .get_timestamp = a5xx_get_timestamp,
 };
@@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
        gpu = &adreno_gpu->base;
 
        adreno_gpu->registers = a5xx_registers;
-       adreno_gpu->reg_offsets = a5xx_register_offsets;
 
        a5xx_gpu->lm_leakage = 0x4E001A;
 
        check_speed_bin(&pdev->dev);
 
-       /* Restricting nr_rings to 1 to temporarily disable preemption */
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
        if (ret) {
                a5xx_destroy(&(a5xx_gpu->base.base));
                return ERR_PTR(ret);
index 1e5b1a1..c7187bc 100644 (file)
@@ -37,6 +37,13 @@ struct a5xx_gpu {
 
        atomic_t preempt_state;
        struct timer_list preempt_timer;
+
+       struct drm_gem_object *shadow_bo;
+       uint64_t shadow_iova;
+       uint32_t *shadow;
+
+       /* True if the microcode supports the WHERE_AM_I opcode */
+       bool has_whereami;
 };
 
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
        return -ETIMEDOUT;
 }
 
+#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
+               ((ring)->id * sizeof(uint32_t)))
+
 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 
@@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu);
 void a5xx_preempt_irq(struct msm_gpu *gpu);
 void a5xx_preempt_fini(struct msm_gpu *gpu);
 
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
+
 /* Return true if we are in a preempt state */
 static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
 {
index 321a806..f176a6f 100644 (file)
@@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
        OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
        OUT_RING(ring, 1);
 
-       gpu->funcs->flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
 
        if (!a5xx_idle(gpu, ring)) {
                DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
index 9f3fe17..7e04509 100644 (file)
@@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
        ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
        ptr->info = 0;
        ptr->data = 0;
-       ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
-       ptr->rptr_addr = rbmemptr(ring, rptr);
+       ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+
+       ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
        ptr->counter = counters_iova;
 
        return 0;
index e1c7bcd..491fee4 100644 (file)
@@ -11,6 +11,7 @@
 #include "a6xx_gpu.h"
 #include "a6xx_gmu.xml.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 #include "msm_mmu.h"
 
 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
@@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
        gmu->current_perf_index = perf_index;
        gmu->freq = gmu->gpu_freqs[perf_index];
 
+       trace_msm_gmu_freq_change(gmu->freq, perf_index);
+
        /*
         * This can get called from devfreq while the hardware is idle. Don't
         * bring up the power if it isn't already active
index 66a95e2..948f365 100644 (file)
@@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 
 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
 
+       /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+       if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+               struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+       }
+
        spin_lock_irqsave(&ring->lock, flags);
 
        /* Copy the shadow to the actual register */
@@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
        OUT_RING(ring, upper_32_bits(iova));
 }
 
-static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+               struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+{
+       phys_addr_t ttbr;
+       u32 asid;
+       u64 memptr = rbmemptr(ring, ttbr0);
+
+       if (ctx == a6xx_gpu->cur_ctx)
+               return;
+
+       if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+               return;
+
+       /* Execute the table update */
+       OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+
+       OUT_RING(ring,
+               CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+               CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+
+       /*
+        * Write the new TTBR0 to the memstore. This is good for debugging.
+        */
+       OUT_PKT7(ring, CP_MEM_WRITE, 4);
+       OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+       OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+       OUT_RING(ring, lower_32_bits(ttbr));
+       OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+
+       /*
+        * And finally, trigger a uche flush to be sure there isn't anything
+        * lingering in that part of the GPU
+        */
+
+       OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, 0x31);
+
+       a6xx_gpu->cur_ctx = ctx;
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
        struct msm_drm_private *priv = gpu->dev->dev_private;
@@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_ringbuffer *ring = submit->ring;
        unsigned int i;
 
+       a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_start));
 
@@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
+static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+               struct drm_gem_object *obj)
+{
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+
+       if (IS_ERR(buf))
+               return;
+
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        *
+        * Otherwise check that the firmware is greater than or equal to 1.90
+        * which was the first version that had this fix built in
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a6xx_gpu->has_whereami = true;
+       else if ((buf[0] & 0xfff) > 0x190)
+               a6xx_gpu->has_whereami = true;
+
+       msm_gem_put_vaddr(obj);
+}
+
 static int a6xx_ucode_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
 
-       gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
-               MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+       /* Targets that support extended APRIV can use the RPTR shadow from
+        * hardware but all the other ones need to disable the feature. Targets
+        * that support the WHERE_AM_I opcode can use that instead
+        */
+       if (adreno_gpu->base.hw_apriv)
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+       else
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+                       MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+       /*
+        * Expanded APRIV and targets that support WHERE_AM_I both need a
+        * privileged buffer to store the RPTR shadow
+        */
+
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
+               if (!a6xx_gpu->shadow_bo) {
+                       a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a6xx_gpu->shadow_bo,
+                               &a6xx_gpu->shadow_iova);
+
+                       if (IS_ERR(a6xx_gpu->shadow))
+                               return PTR_ERR(a6xx_gpu->shadow);
+               }
+
+               gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+                       REG_A6XX_CP_RB_RPTR_ADDR_HI,
+                       shadowptr(a6xx_gpu, gpu->rb[0]));
+       }
 
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
 
+       a6xx_gpu->cur_ctx = NULL;
+
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
 
@@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
        return IRQ_HANDLED;
 }
 
-static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
-               REG_A6XX_CP_RB_RPTR_ADDR_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A6XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
-};
-
 static int a6xx_pm_resume(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
 
        gpu->needs_hw_init = true;
 
+       trace_msm_gpu_resume(0);
+
        ret = a6xx_gmu_resume(a6xx_gpu);
        if (ret)
                return ret;
@@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
+       trace_msm_gpu_suspend(0);
+
        devfreq_suspend_device(gpu->devfreq.devfreq);
 
        return a6xx_gmu_stop(a6xx_gpu);
@@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
                drm_gem_object_put(a6xx_gpu->sqe_bo);
        }
 
+       if (a6xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a6xx_gpu->shadow_bo);
+       }
+
        a6xx_gmu_remove(a6xx_gpu);
 
        adreno_gpu_cleanup(adreno_gpu);
@@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static struct msm_gem_address_space *
+a6xx_create_private_address_space(struct msm_gpu *gpu)
+{
+       struct msm_mmu *mmu;
+
+       mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+
+       if (IS_ERR(mmu))
+               return ERR_CAST(mmu);
+
+       return msm_gem_address_space_create(mmu,
+               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+}
+
+static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+               return a6xx_gpu->shadow[ring->id];
+
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = a6xx_pm_resume,
                .recover = a6xx_recover,
                .submit = a6xx_submit,
-               .flush = a6xx_flush,
                .active_ring = a6xx_active_ring,
                .irq = a6xx_irq,
                .destroy = a6xx_destroy,
@@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_put = a6xx_gpu_state_put,
 #endif
                .create_address_space = adreno_iommu_create_address_space,
+               .create_private_address_space = a6xx_create_private_address_space,
+               .get_rptr = a6xx_get_rptr,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
@@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct platform_device *pdev = priv->gpu_pdev;
+       struct adreno_platform_config *config = pdev->dev.platform_data;
+       const struct adreno_info *info;
        struct device_node *node;
        struct a6xx_gpu *a6xx_gpu;
        struct adreno_gpu *adreno_gpu;
@@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
        gpu = &adreno_gpu->base;
 
        adreno_gpu->registers = NULL;
-       adreno_gpu->reg_offsets = a6xx_register_offsets;
 
-       if (adreno_is_a650(adreno_gpu))
+       /*
+        * We need to know the platform type before calling into adreno_gpu_init
+        * so that the hw_apriv flag can be correctly set. Snoop into the info
+        * and grab the revision number
+        */
+       info = adreno_info(config->rev);
+
+       if (info && info->revn == 650)
                adreno_gpu->base.hw_apriv = true;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
index 03ba60d..3eeebf6 100644 (file)
@@ -19,8 +19,15 @@ struct a6xx_gpu {
        uint64_t sqe_iova;
 
        struct msm_ringbuffer *cur_ring;
+       struct msm_file_private *cur_ctx;
 
        struct a6xx_gmu gmu;
+
+       struct drm_gem_object *shadow_bo;
+       uint64_t shadow_iova;
+       uint32_t *shadow;
+
+       bool has_whereami;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
        return true;
 }
 
+#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
+               ((_ring)->id * sizeof(uint32_t)))
+
 int a6xx_gmu_resume(struct a6xx_gpu *gpu);
 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
 
index b12f5b4..e9ede19 100644 (file)
@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
        int i;
 
        a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
-               sizeof(a6xx_state->indexed_regs));
+               sizeof(*a6xx_state->indexed_regs));
        if (!a6xx_state->indexed_regs)
                return;
 
index 9eeb46b..58e03b2 100644 (file)
@@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
        int ret;
 
        if (pdev)
-               gpu = platform_get_drvdata(pdev);
+               gpu = dev_to_gpu(&pdev->dev);
 
        if (!gpu) {
                dev_err_once(dev->dev, "no GPU device was found\n");
@@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(gpu);
        }
 
-       dev_set_drvdata(dev, gpu);
-
        return 0;
 }
 
 static void adreno_unbind(struct device *dev, struct device *master,
                void *data)
 {
-       struct msm_gpu *gpu = dev_get_drvdata(dev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        pm_runtime_force_suspend(dev);
        gpu->funcs->destroy(gpu);
@@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = {
 #ifdef CONFIG_PM
 static int adreno_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_gpu *gpu = platform_get_drvdata(pdev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_resume(gpu);
 }
 
 static int adreno_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_gpu *gpu = platform_get_drvdata(pdev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_suspend(gpu);
 }
index 862dd35..458b5b2 100644 (file)
@@ -189,12 +189,27 @@ struct msm_gem_address_space *
 adreno_iommu_create_address_space(struct msm_gpu *gpu,
                struct platform_device *pdev)
 {
-       struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
-       struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+       struct iommu_domain *iommu;
+       struct msm_mmu *mmu;
        struct msm_gem_address_space *aspace;
+       u64 start, size;
 
-       aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               0xffffffff - SZ_16M);
+       iommu = iommu_domain_alloc(&platform_bus_type);
+       if (!iommu)
+               return NULL;
+
+       mmu = msm_iommu_new(&pdev->dev, iommu);
+
+       /*
+        * Use the aperture start or SZ_16M, whichever is greater. This will
+        * ensure that we align with the allocated pagetable range while still
+        * allowing room in the lower 32 bits for GMEM and whatnot
+        */
+       start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+       size = iommu->geometry.aperture_end - start + 1;
+
+       aspace = msm_gem_address_space_create(mmu, "gpu",
+               start & GENMASK_ULL(48, 0), size);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
@@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu)
 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
                struct msm_ringbuffer *ring)
 {
-       return ring->memptrs->rptr = adreno_gpu_read(
-               adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+       struct msm_gpu *gpu = &adreno_gpu->base;
+
+       return gpu->funcs->get_rptr(gpu, ring);
 }
 
 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu)
        }
 }
 
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct msm_drm_private *priv = gpu->dev->dev_private;
-       struct msm_ringbuffer *ring = submit->ring;
-       unsigned i;
-
-       for (i = 0; i < submit->nr_cmds; i++) {
-               switch (submit->cmd[i].type) {
-               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
-                       /* ignore IB-targets */
-                       break;
-               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       /* ignore if there has not been a ctx switch: */
-                       if (priv->lastctx == ctx)
-                               break;
-                       fallthrough;
-               case MSM_SUBMIT_CMD_BUF:
-                       OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
-                               CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
-                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
-                       OUT_RING(ring, submit->cmd[i].size);
-                       OUT_PKT2(ring);
-                       break;
-               }
-       }
-
-       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
-       OUT_RING(ring, submit->seqno);
-
-       if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
-               /* Flush HLSQ lazy updates to make sure there is nothing
-                * pending for indirect loads after the timestamp has
-                * passed:
-                */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 1);
-               OUT_RING(ring, HLSQ_FLUSH);
-       }
-
-       /* wait for idle before cache flush/interrupt */
-       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
-       OUT_RING(ring, 0x00000000);
-
-       if (!adreno_is_a2xx(adreno_gpu)) {
-               /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-               OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
-               OUT_RING(ring, rbmemptr(ring, fence));
-               OUT_RING(ring, submit->seqno);
-       } else {
-               /* BIT(31) means something else on a2xx */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-               OUT_RING(ring, CACHE_FLUSH_TS);
-               OUT_RING(ring, rbmemptr(ring, fence));
-               OUT_RING(ring, submit->seqno);
-               OUT_PKT3(ring, CP_INTERRUPT, 1);
-               OUT_RING(ring, 0x80000000);
-       }
-
-#if 0
-       if (adreno_is_a3xx(adreno_gpu)) {
-               /* Dummy set-constant to trigger context rollover */
-               OUT_PKT3(ring, CP_SET_CONSTANT, 2);
-               OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
-               OUT_RING(ring, 0x00000000);
-       }
-#endif
-
-       gpu->funcs->flush(gpu, ring);
-}
-
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
 {
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        uint32_t wptr;
 
        /* Copy the shadow to the actual register */
@@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
 
-       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
+       gpu_write(gpu, reg, wptr);
 }
 
 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
index e55abae..c3775f7 100644 (file)
 #include "adreno_common.xml.h"
 #include "adreno_pm4.xml.h"
 
-#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
-#define REG_SKIP ~0
-#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
-
 extern bool snapshot_debugbus;
 
-/**
- * adreno_regs: List of registers that are used in across all
- * 3D devices. Each device type has different offset value for the same
- * register, so an array of register offsets are declared for every device
- * and are indexed by the enumeration values defined in this enum
- */
-enum adreno_regs {
-       REG_ADRENO_CP_RB_BASE,
-       REG_ADRENO_CP_RB_BASE_HI,
-       REG_ADRENO_CP_RB_RPTR_ADDR,
-       REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-       REG_ADRENO_CP_RB_RPTR,
-       REG_ADRENO_CP_RB_WPTR,
-       REG_ADRENO_CP_RB_CNTL,
-       REG_ADRENO_REGISTER_MAX,
-};
-
 enum {
        ADRENO_FW_PM4 = 0,
        ADRENO_FW_SQE = 0, /* a6xx */
@@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu)
        return gpu->revn == 225;
 }
 
-static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
-{
-       return (gpu->revn >= 300) && (gpu->revn < 400);
-}
-
 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
 {
        return gpu->revn == 305;
@@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
        return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
 }
 
-static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
-{
-       return (gpu->revn >= 400) && (gpu->revn < 500);
-}
-
 static inline int adreno_is_a405(struct adreno_gpu *gpu)
 {
        return gpu->revn == 405;
@@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
                const struct firmware *fw, u64 *iova);
 int adreno_hw_init(struct msm_gpu *gpu);
 void adreno_recover(struct msm_gpu *gpu);
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
@@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
                ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
 }
 
-/*
- * adreno_reg_check() - Checks the validity of a register enum
- * @gpu:               Pointer to struct adreno_gpu
- * @offset_name:       The register enum that is checked
- */
-static inline bool adreno_reg_check(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name)
-{
-       BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
-
-       /*
-        * REG_SKIP is a special value that tell us that the register in
-        * question isn't implemented on target but don't trigger a BUG(). This
-        * is used to cleanly implement adreno_gpu_write64() and
-        * adreno_gpu_read64() in a generic fashion
-        */
-       if (gpu->reg_offsets[offset_name] == REG_SKIP)
-               return false;
-
-       return true;
-}
-
-static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name)
-{
-       u32 reg = gpu->reg_offsets[offset_name];
-       u32 val = 0;
-       if(adreno_reg_check(gpu,offset_name))
-               val = gpu_read(&gpu->base, reg - 1);
-       return val;
-}
-
-static inline void adreno_gpu_write(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name, u32 data)
-{
-       u32 reg = gpu->reg_offsets[offset_name];
-       if(adreno_reg_check(gpu, offset_name))
-               gpu_write(&gpu->base, reg - 1, data);
-}
-
 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
 
-static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
-               enum adreno_regs lo, enum adreno_regs hi, u64 data)
-{
-       adreno_gpu_write(gpu, lo, lower_32_bits(data));
-       adreno_gpu_write(gpu, hi, upper_32_bits(data));
-}
-
 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
 {
        return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
index 3931eec..59bb8c1 100644 (file)
@@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets {
        CP_SET_BIN_DATA5_OFFSET = 46,
        CP_SET_CTXSWITCH_IB = 85,
        CP_REG_WRITE = 109,
+       CP_WHERE_AM_I = 98,
 };
 
 enum adreno_state_block {
index f1bc6a1..84ea09d 100644 (file)
@@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
 }
 
 #ifdef CONFIG_DEBUG_FS
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
-
 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 {
        struct dpu_irq *irq_obj = s->private;
@@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
 
 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
index b36919d..393858e 100644 (file)
@@ -30,6 +30,74 @@ enum dpu_perf_mode {
        DPU_PERF_MODE_MAX
 };
 
+/**
+ * @_dpu_core_perf_calc_bw() - to calculate BW per crtc
+ * @kms -  pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * Return: returns aggregated BW for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
+               struct drm_crtc *crtc)
+{
+       struct drm_plane *plane;
+       struct dpu_plane_state *pstate;
+       u64 crtc_plane_bw = 0;
+       u32 bw_factor;
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               pstate = to_dpu_plane_state(plane->state);
+               if (!pstate)
+                       continue;
+
+               crtc_plane_bw += pstate->plane_fetch_bw;
+       }
+
+       bw_factor = kms->catalog->perf.bw_inefficiency_factor;
+       if (bw_factor) {
+               crtc_plane_bw *= bw_factor;
+               do_div(crtc_plane_bw, 100);
+       }
+
+       return crtc_plane_bw;
+}
+
+/**
+ * _dpu_core_perf_calc_clk() - to calculate clock per crtc
+ * @kms -  pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * @state - pointer to a crtc state
+ * Return: returns max clk for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
+               struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+       struct drm_plane *plane;
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       u64 crtc_clk;
+       u32 clk_factor;
+
+       mode = &state->adjusted_mode;
+
+       crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               pstate = to_dpu_plane_state(plane->state);
+               if (!pstate)
+                       continue;
+
+               crtc_clk = max(pstate->plane_clk, crtc_clk);
+       }
+
+       clk_factor = kms->catalog->perf.clk_inefficiency_factor;
+       if (clk_factor) {
+               crtc_clk *= clk_factor;
+               do_div(crtc_clk, 100);
+       }
+
+       return crtc_clk;
+}
+
 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
 {
        struct msm_drm_private *priv;
@@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
        dpu_cstate = to_dpu_crtc_state(state);
        memset(perf, 0, sizeof(struct dpu_core_perf_params));
 
-       if (!dpu_cstate->bw_control) {
-               perf->bw_ctl = kms->catalog->perf.max_bw_high *
-                                       1000ULL;
-               perf->max_per_pipe_ib = perf->bw_ctl;
-               perf->core_clk_rate = kms->perf.max_core_clk_rate;
-       } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+       if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
                perf->bw_ctl = 0;
                perf->max_per_pipe_ib = 0;
                perf->core_clk_rate = 0;
@@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
                perf->bw_ctl = kms->perf.fix_core_ab_vote;
                perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
                perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+       } else {
+               perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
+               perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib;
+               perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
        }
 
        DPU_DEBUG(
@@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
                        DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
                                tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
                                tmp_cstate->bw_control);
-                       /*
-                        * For bw check only use the bw if the
-                        * atomic property has been already set
-                        */
-                       if (tmp_cstate->bw_control)
+
                                bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
                }
 
@@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 
                DPU_DEBUG("final threshold bw limit = %d\n", threshold);
 
-               if (!dpu_cstate->bw_control) {
-                       DPU_DEBUG("bypass bandwidth check\n");
-               } else if (!threshold) {
+               if (!threshold) {
                        DPU_ERROR("no bandwidth limits specified\n");
                        return -E2BIG;
                } else if (bw > threshold) {
@@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
                                        = dpu_crtc_get_client_type(crtc);
        struct drm_crtc *tmp_crtc;
        struct dpu_crtc_state *dpu_cstate;
-       int ret = 0;
+       int i, ret = 0;
+       u64 avg_bw;
+
+       if (!kms->num_paths)
+               return -EINVAL;
 
        drm_for_each_crtc(tmp_crtc, crtc->dev) {
                if (tmp_crtc->enabled &&
@@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
                        perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
                                        dpu_cstate->new_perf.max_per_pipe_ib);
 
-                       DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
-                                       dpu_cstate->new_perf.bw_ctl);
+                       perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+                       DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
+                                 tmp_crtc->base.id,
+                                 dpu_cstate->new_perf.bw_ctl, kms->num_paths);
                }
        }
+
+       avg_bw = perf.bw_ctl;
+       do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+
+       for (i = 0; i < kms->num_paths; i++)
+               icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+
        return ret;
 }
 
index c2729f7..f56414a 100644 (file)
@@ -265,11 +265,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
 {
        struct drm_encoder *encoder;
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return INTF_MODE_NONE;
-       }
-
        /*
         * TODO: This function is called from dpu debugfs and as part of atomic
         * check. When called from debugfs, the crtc->mutex must be held to
@@ -297,7 +292,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
                dpu_crtc->vblank_cb_time = ktime_get();
        else
                dpu_crtc->vblank_cb_count++;
-       _dpu_crtc_complete_flip(crtc);
        drm_crtc_handle_vblank(crtc);
        trace_dpu_crtc_vblank_cb(DRMID(crtc));
 }
@@ -402,6 +396,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
 {
        trace_dpu_crtc_complete_commit(DRMID(crtc));
+       _dpu_crtc_complete_flip(crtc);
 }
 
 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
@@ -421,8 +416,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 
                trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
        }
-
-       drm_mode_debug_printmodeline(adj_mode);
 }
 
 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@ -457,7 +450,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
        struct dpu_crtc_mixer *mixer = cstate->mixers;
        struct dpu_hw_pcc_cfg cfg;
        struct dpu_hw_ctl *ctl;
-       struct dpu_hw_mixer *lm;
        struct dpu_hw_dspp *dspp;
        int i;
 
@@ -467,7 +459,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 
        for (i = 0; i < cstate->num_mixers; i++) {
                ctl = mixer[i].lm_ctl;
-               lm = mixer[i].hw_lm;
                dspp = mixer[i].hw_dspp;
 
                if (!dspp || !dspp->ops.setup_pcc)
@@ -496,16 +487,8 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
                struct drm_crtc_state *old_state)
 {
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_crtc_state *cstate;
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
-       struct drm_device *dev;
-       unsigned long flags;
-
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
 
        if (!crtc->state->enable) {
                DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
@@ -515,21 +498,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
 
        DPU_DEBUG("crtc%d\n", crtc->base.id);
 
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(crtc->state);
-       dev = crtc->dev;
-
        _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
 
-       if (dpu_crtc->event) {
-               WARN_ON(dpu_crtc->event);
-       } else {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               dpu_crtc->event = crtc->state->event;
-               crtc->state->event = NULL;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
-
        /* encoder will trigger pending mask now */
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_trigger_kickoff_pending(encoder);
@@ -583,14 +553,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
                return;
        }
 
-       if (dpu_crtc->event) {
-               DPU_DEBUG("already received dpu_crtc->event\n");
-       } else {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               dpu_crtc->event = crtc->state->event;
-               crtc->state->event = NULL;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
+       WARN_ON(dpu_crtc->event);
+       spin_lock_irqsave(&dev->event_lock, flags);
+       dpu_crtc->event = crtc->state->event;
+       crtc->state->event = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 
        /*
         * If no mixers has been allocated in dpu_crtc_atomic_check(),
@@ -635,14 +602,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
-       struct dpu_crtc_state *cstate;
-
-       if (!crtc || !state) {
-               DPU_ERROR("invalid argument(s)\n");
-               return;
-       }
-
-       cstate = to_dpu_crtc_state(state);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
 
        DPU_DEBUG("crtc%d\n", crtc->base.id);
 
@@ -731,14 +691,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
  */
 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct dpu_crtc_state *cstate, *old_cstate;
+       struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
 
-       if (!crtc || !crtc->state) {
-               DPU_ERROR("invalid argument(s)\n");
-               return NULL;
-       }
-
-       old_cstate = to_dpu_crtc_state(crtc->state);
        cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
        if (!cstate) {
                DPU_ERROR("failed to allocate state\n");
@@ -754,19 +708,12 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 static void dpu_crtc_disable(struct drm_crtc *crtc,
                             struct drm_crtc_state *old_crtc_state)
 {
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_crtc_state *cstate;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
        unsigned long flags;
        bool release_bandwidth = false;
 
-       if (!crtc || !crtc->state) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(crtc->state);
-
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 
        /* Disable/save vblank irq handling */
@@ -825,19 +772,13 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
 static void dpu_crtc_enable(struct drm_crtc *crtc,
                struct drm_crtc_state *old_crtc_state)
 {
-       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct drm_encoder *encoder;
        bool request_bandwidth = false;
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
-
        pm_runtime_get_sync(crtc->dev->dev);
 
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
-       dpu_crtc = to_dpu_crtc(crtc);
 
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
                /* in video mode, we hold an extra bandwidth reference
@@ -873,15 +814,15 @@ struct plane_state {
 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
-       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
        struct plane_state *pstates;
-       struct dpu_crtc_state *cstate;
 
        const struct drm_plane_state *pstate;
        struct drm_plane *plane;
        struct drm_display_mode *mode;
 
-       int cnt = 0, rc = 0, mixer_width, i, z_pos;
+       int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
 
        struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
        int multirect_count = 0;
@@ -889,16 +830,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
        int left_zpos_cnt = 0, right_zpos_cnt = 0;
        struct drm_rect crtc_rect = { 0 };
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return -EINVAL;
-       }
-
        pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
 
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(state);
-
        if (!state->enable || !state->active) {
                DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
                                crtc->base.id, state->enable, state->active);
@@ -914,9 +847,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 
        memset(pipe_staged, 0, sizeof(pipe_staged));
 
-       mixer_width = mode->hdisplay / cstate->num_mixers;
+       if (cstate->num_mixers) {
+               mixer_width = mode->hdisplay / cstate->num_mixers;
 
-       _dpu_crtc_setup_lm_bounds(crtc, state);
+               _dpu_crtc_setup_lm_bounds(crtc, state);
+       }
 
        crtc_rect.x2 = mode->hdisplay;
        crtc_rect.y2 = mode->vdisplay;
@@ -1242,23 +1177,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
        return 0;
 }
 
-static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, _dpu_debugfs_status_show, inode->i_private);
-}
-
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
 
 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
 {
@@ -1275,25 +1194,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
 
        return 0;
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
 
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
-       static const struct file_operations debugfs_status_fops = {
-               .open =         _dpu_debugfs_status_open,
-               .read =         seq_read,
-               .llseek =       seq_lseek,
-               .release =      single_release,
-       };
-
        dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
                        crtc->dev->primary->debugfs_root);
 
        debugfs_create_file("status", 0400,
                        dpu_crtc->debugfs_root,
-                       dpu_crtc, &debugfs_status_fops);
+                       dpu_crtc, &_dpu_debugfs_status_fops);
        debugfs_create_file("state", 0600,
                        dpu_crtc->debugfs_root,
                        &dpu_crtc->base,
index bd6def4..f7f5c25 100644 (file)
@@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 
        trace_dpu_enc_mode_set(DRMID(drm_enc));
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
+               msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+
        list_for_each_entry(conn_iter, connector_list, head)
                if (conn_iter->encoder == drm_enc)
                        conn = conn_iter;
@@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
                return;
        }
 
+
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+               dpu_enc->cur_master->hw_mdptop &&
+               dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+               dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+                       dpu_enc->cur_master->hw_mdptop);
+
        _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
 
        if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
@@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 {
        struct dpu_encoder_virt *dpu_enc = NULL;
        int ret = 0;
+       struct msm_drm_private *priv;
        struct drm_display_mode *cur_mode = NULL;
 
        if (!drm_enc) {
@@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 
        mutex_lock(&dpu_enc->enc_lock);
        cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+       priv = drm_enc->dev->dev_private;
 
        trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
                             cur_mode->vdisplay);
@@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 
        _dpu_encoder_virt_enable_helper(drm_enc);
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               ret = msm_dp_display_enable(priv->dp,
+                                               drm_enc);
+               if (ret) {
+                       DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
+                               ret);
+                       goto out;
+               }
+       }
        dpu_enc->enabled = true;
 
 out:
@@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
        /* wait for idle */
        dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+                       DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
+       }
+
        dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
                        phys->ops.disable(phys);
        }
 
+
        /* after phys waits for frame-done, should be no more frames pending */
        if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
                DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
@@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 
        DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               if (msm_dp_display_disable(priv->dp, drm_enc))
+                       DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
+       }
+
        mutex_unlock(&dpu_enc->enc_lock);
 }
 
@@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
        return 0;
 }
 
-static int _dpu_encoder_debugfs_status_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, _dpu_encoder_status_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
 
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
        int i;
 
-       static const struct file_operations debugfs_status_fops = {
-               .open =         _dpu_encoder_debugfs_status_open,
-               .read =         seq_read,
-               .llseek =       seq_lseek,
-               .release =      single_release,
-       };
-
        char name[DPU_NAME_SIZE];
 
        if (!drm_enc->dev) {
@@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
        /* don't error check these */
        debugfs_create_file("status", 0600,
-               dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+               dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++)
                if (dpu_enc->phys_encs[i]->ops.late_register)
@@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
 {
        int ret = 0;
        int i = 0;
-       enum dpu_intf_type intf_type;
+       enum dpu_intf_type intf_type = INTF_NONE;
        struct dpu_enc_phys_init_params phys_params;
 
        if (!dpu_enc) {
@@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
        case DRM_MODE_ENCODER_DSI:
                intf_type = INTF_DSI;
                break;
-       default:
-               DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
-               return -EINVAL;
+       case DRM_MODE_ENCODER_TMDS:
+               intf_type = INTF_DP;
+               break;
        }
 
        WARN_ON(disp_info->num_of_h_tiles < 1);
index b5a4905..805e059 100644 (file)
@@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params(
         * display_v_end -= mode->hsync_start - mode->hdisplay;
         * }
         */
+       /* for DP/EDP, Shift timings to align it to bottom right */
+       if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
+               (phys_enc->hw_intf->cap->type == INTF_EDP)) {
+               timing->h_back_porch += timing->h_front_porch;
+               timing->h_front_porch = 0;
+               timing->v_back_porch += timing->v_front_porch;
+               timing->v_front_porch = 0;
+       }
 }
 
 static u32 get_horizontal_total(const struct intf_timing_params *timing)
@@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
        struct dpu_hw_ctl *hw_ctl;
        unsigned long lock_flags;
        u32 flush_register = 0;
-       int new_cnt = -1, old_cnt = -1;
 
        hw_ctl = phys_enc->hw_ctl;
 
@@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
                phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
                                phys_enc);
 
-       old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+       atomic_read(&phys_enc->pending_kickoff_cnt);
 
        /*
         * only decrement the pending flush count if we've actually flushed
@@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
                flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
        if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
-               new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-                               -1, 0);
+               atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
        spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
        /* Signal any waiting atomic commit thread */
index 97d122e..60b304b 100644 (file)
@@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
        .max_bw_high = 6800000,
        .min_core_ib = 2400000,
        .min_llcc_ib = 800000,
-       .min_dram_ib = 800000,
+       .min_dram_ib = 1600000,
+       .min_prefill_lines = 24,
        .danger_lut_tbl = {0xff, 0xffff, 0x0},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_linear),
@@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
                {.rd_enable = 1, .wr_enable = 1},
                {.rd_enable = 1, .wr_enable = 0}
        },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
 };
 
 static const struct dpu_perf_cfg sm8150_perf_data = {
index 1b7a921..3544af1 100644 (file)
@@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg {
  * @downscaling_prefill_lines  downscaling latency in lines
  * @amortizable_theshold minimum y position for traffic shaping prefill
  * @min_prefill_lines  minimum pipeline latency in lines
+ * @clk_inefficiency_factor DPU src clock inefficiency factor
+ * @bw_inefficiency_factor DPU axi bus bw inefficiency factor
  * @safe_lut_tbl: LUT tables for safe signals
  * @danger_lut_tbl: LUT tables for danger signals
  * @qos_lut_tbl: LUT tables for QoS signals
@@ -683,6 +685,8 @@ struct dpu_perf_cfg {
        u32 downscaling_prefill_lines;
        u32 amortizable_threshold;
        u32 min_prefill_lines;
+       u32 clk_inefficiency_factor;
+       u32 bw_inefficiency_factor;
        u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
        u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
        struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
index c0a4d4e..d93c44f 100644 (file)
@@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s,
        return 0;
 }
 
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
-
 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
 {
        return _dpu_danger_signal_status(s, true);
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
 
 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 {
        return _dpu_danger_signal_status(s, false);
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
 
 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
@@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
        void *p = dpu_hw_util_get_log_mask_ptr();
        struct dentry *entry;
+       struct drm_device *dev;
+       struct msm_drm_private *priv;
 
        if (!p)
                return -EINVAL;
 
+       dev = dpu_kms->dev;
+       priv = dev->dev_private;
+
        entry = debugfs_create_dir("debug", minor->debugfs_root);
 
        debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
        dpu_debugfs_vbif_init(dpu_kms, entry);
        dpu_debugfs_core_irq_init(dpu_kms, entry);
 
+       if (priv->dp)
+               msm_dp_debugfs_init(priv->dp, minor);
+
        return dpu_core_perf_debugfs_init(dpu_kms, entry);
 }
 #endif
@@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
        return 0;
 }
 
+static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
+{
+       struct icc_path *path0;
+       struct icc_path *path1;
+       struct drm_device *dev = dpu_kms->dev;
+
+       path0 = of_icc_get(dev->dev, "mdp0-mem");
+       path1 = of_icc_get(dev->dev, "mdp1-mem");
+
+       if (IS_ERR_OR_NULL(path0))
+               return PTR_ERR_OR_ZERO(path0);
+
+       dpu_kms->path[0] = path0;
+       dpu_kms->num_paths = 1;
+
+       if (!IS_ERR_OR_NULL(path1)) {
+               dpu_kms->path[1] = path1;
+               dpu_kms->num_paths++;
+       }
+       return 0;
+}
+
 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
        return dpu_crtc_vblank(crtc, true);
@@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
        return rc;
 }
 
+static int _dpu_kms_initialize_displayport(struct drm_device *dev,
+                                           struct msm_drm_private *priv,
+                                           struct dpu_kms *dpu_kms)
+{
+       struct drm_encoder *encoder = NULL;
+       int rc = 0;
+
+       if (!priv->dp)
+               return rc;
+
+       encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+       if (IS_ERR(encoder)) {
+               DPU_ERROR("encoder init failed for dsi display\n");
+               return PTR_ERR(encoder);
+       }
+
+       rc = msm_dp_modeset_init(priv->dp, dev, encoder);
+       if (rc) {
+               DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+               drm_encoder_cleanup(encoder);
+               return rc;
+       }
+
+       priv->encoders[priv->num_encoders++] = encoder;
+       return rc;
+}
+
 /**
  * _dpu_kms_setup_displays - create encoders, bridges and connectors
  *                           for underlying displays
@@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
                                    struct msm_drm_private *priv,
                                    struct dpu_kms *dpu_kms)
 {
-       /**
-        * Extend this function to initialize other
-        * types of displays
-        */
+       int rc = 0;
 
-       return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+       rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+       if (rc) {
+               DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
+               return rc;
+       }
+
+       rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
+       if (rc) {
+               DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
+               return rc;
+       }
+
+       return rc;
 }
 
 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
        info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
                        MSM_DISPLAY_CAP_VID_MODE;
 
-       /* TODO: No support for DSI swap */
-       for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
-               if (priv->dsi[i]) {
-                       info.h_tile_instance[info.num_of_h_tiles] = i;
-                       info.num_of_h_tiles++;
+       switch (info.intf_type) {
+       case DRM_MODE_ENCODER_DSI:
+               /* TODO: No support for DSI swap */
+               for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+                       if (priv->dsi[i]) {
+                               info.h_tile_instance[info.num_of_h_tiles] = i;
+                               info.num_of_h_tiles++;
+                       }
                }
-       }
+               break;
+       case DRM_MODE_ENCODER_TMDS:
+               info.num_of_h_tiles = 1;
+               break;
+       };
 
        rc = dpu_encoder_setup(encoder->dev, encoder, &info);
        if (rc)
@@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms)
        dpu_core_irq_preinstall(dpu_kms);
 }
 
+static int dpu_irq_postinstall(struct msm_kms *kms)
+{
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       if (!dpu_kms || !dpu_kms->dev)
+               return -EINVAL;
+
+       priv = dpu_kms->dev->dev_private;
+       if (!priv)
+               return -EINVAL;
+
+       msm_dp_irq_postinstall(priv->dp);
+
+       return 0;
+}
+
 static void dpu_irq_uninstall(struct msm_kms *kms)
 {
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms)
 static const struct msm_kms_funcs kms_funcs = {
        .hw_init         = dpu_kms_hw_init,
        .irq_preinstall  = dpu_irq_preinstall,
+       .irq_postinstall = dpu_irq_postinstall,
        .irq_uninstall   = dpu_irq_uninstall,
        .irq             = dpu_irq,
        .enable_commit   = dpu_kms_enable_commit,
@@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
        dpu_vbif_init_memtypes(dpu_kms);
 
+       if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+               dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        return 0;
@@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev)
 
 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
 {
-       int rc = -1;
+       int i, rc = -1;
        struct platform_device *pdev = to_platform_device(dev);
        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
        struct dss_module_power *mp = &dpu_kms->mp;
@@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
        if (rc)
                DPU_ERROR("clock disable failed rc:%d\n", rc);
 
+       for (i = 0; i < dpu_kms->num_paths; i++)
+               icc_set_bw(dpu_kms->path[i], 0, 0);
+
        return rc;
 }
 
@@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
        struct drm_encoder *encoder;
        struct drm_device *ddev;
        struct dss_module_power *mp = &dpu_kms->mp;
+       int i;
 
        ddev = dpu_kms->dev;
+
+       /* Min vote of BW is required before turning on AXI clk */
+       for (i = 0; i < dpu_kms->num_paths; i++)
+               icc_set_bw(dpu_kms->path[i], 0,
+                       dpu_kms->catalog->perf.min_dram_ib);
+
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (rc) {
                DPU_ERROR("clock enable failed rc:%d\n", rc);
index e140cd6..1c0e4c0 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef __DPU_KMS_H__
 #define __DPU_KMS_H__
 
+#include <linux/interconnect.h>
+
 #include <drm/drm_drv.h>
 
 #include "msm_drv.h"
@@ -140,6 +142,8 @@ struct dpu_kms {
         * when disabled.
         */
        atomic_t bandwidth_ref;
+       struct icc_path *path[2];
+       u32 num_paths;
 };
 
 struct vsync_info {
index 7d3fdbb..cd40788 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/irqdesc.h>
 #include <linux/irqchip/chained_irq.h>
 #include "dpu_kms.h"
-#include <linux/interconnect.h>
 
 #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
 
@@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev)
 
        DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
 
-       ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
-       if (ret)
-               return ret;
+       if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
+               ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+               if (ret)
+                       return ret;
+       }
 
        mp = &dpu_mdss->mp;
        ret = msm_dss_parse_clock(pdev, mp);
index 29e373d..7ea90d2 100644 (file)
@@ -132,6 +132,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
 }
 
 /**
+ * _dpu_plane_calc_bw - calculate bandwidth required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated bandwidth in the plane state.
+ * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
+ * Prefill BW Equation: line src bytes * line_time
+ */
+static void _dpu_plane_calc_bw(struct drm_plane *plane,
+       struct drm_framebuffer *fb)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       const struct dpu_format *fmt = NULL;
+       struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+       int src_width, src_height, dst_height, fps;
+       u64 plane_prefill_bw;
+       u64 plane_bw;
+       u32 hw_latency_lines;
+       u64 scale_factor;
+       int vbp, vpw;
+
+       pstate = to_dpu_plane_state(plane->state);
+       mode = &plane->state->crtc->mode;
+
+       fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
+
+       src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+       src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+       dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+       fps = drm_mode_vrefresh(mode);
+       vbp = mode->vtotal - mode->vsync_end;
+       vpw = mode->vsync_end - mode->vsync_start;
+       hw_latency_lines =  dpu_kms->catalog->perf.min_prefill_lines;
+       scale_factor = src_height > dst_height ?
+               mult_frac(src_height, 1, dst_height) : 1;
+
+       plane_bw =
+               src_width * mode->vtotal * fps * fmt->bpp *
+               scale_factor;
+
+       plane_prefill_bw =
+               src_width * hw_latency_lines * fps * fmt->bpp *
+               scale_factor * mode->vtotal;
+
+       do_div(plane_prefill_bw, (vbp+vpw));
+
+       pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
+}
+
+/**
+ * _dpu_plane_calc_clk - calculate clock required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated clock in the plane state.
+ * Clock equation: dst_w * v_total * fps * (src_h / dst_h)
+ */
+static void _dpu_plane_calc_clk(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       int dst_width, src_height, dst_height, fps;
+
+       pstate = to_dpu_plane_state(plane->state);
+       mode = &plane->state->crtc->mode;
+
+       src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+       dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+       dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+       fps = drm_mode_vrefresh(mode);
+
+       pstate->plane_clk =
+               dst_width * mode->vtotal * fps;
+
+       if (src_height > dst_height) {
+               pstate->plane_clk *= src_height;
+               do_div(pstate->plane_clk, dst_height);
+       }
+}
+
+/**
  * _dpu_plane_calc_fill_level - calculate fill level of the given source format
  * @plane:             Pointer to drm plane
  * @fmt:               Pointer to source buffer format
@@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
        }
 
        _dpu_plane_set_qos_remap(plane);
+
+       _dpu_plane_calc_bw(plane, fb);
+
+       _dpu_plane_calc_clk(plane);
 }
 
 static void _dpu_plane_atomic_disable(struct drm_plane *plane)
index 4569497..ca83b87 100644 (file)
@@ -25,6 +25,8 @@
  * @scaler3_cfg: configuration data for scaler3
  * @pixel_ext: configuration data for pixel extensions
  * @cdp_cfg:   CDP configuration
+ * @plane_fetch_bw: calculated BW per plane
+ * @plane_clk: calculated clk per plane
  */
 struct dpu_plane_state {
        struct drm_plane_state base;
@@ -39,6 +41,8 @@ struct dpu_plane_state {
        struct dpu_hw_pixel_ext pixel_ext;
 
        struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+       u64 plane_fetch_bw;
+       u64 plane_clk;
 };
 
 /**
index 5d89560..88645db 100644 (file)
@@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-/* not ironically named at all.. no, really.. */
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       struct drm_device *dev = mdp4_dtv_encoder->base.dev;
-       struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
-
-       if (!dtv_pdata) {
-               DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
-               return;
-       }
-
-       if (dtv_pdata->bus_scale_table) {
-               mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
-                               dtv_pdata->bus_scale_table);
-               DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
-               DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
-               if (dtv_pdata->lcdc_power_save)
-                       dtv_pdata->lcdc_power_save(1);
-       }
-}
-
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
-               mdp4_dtv_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
-#endif
-
 static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       bs_fini(mdp4_dtv_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp4_dtv_encoder);
 }
@@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
        clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
        clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
 
-       bs_set(mdp4_dtv_encoder, 0);
-
        mdp4_dtv_encoder->enabled = false;
 }
 
@@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
                        MDP4_DMA_CONFIG_PACK(0x21));
        mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
 
-       bs_set(mdp4_dtv_encoder, 1);
-
        DBG("setting mdp_clk=%lu", pc);
 
        ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
@@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
                goto fail;
        }
 
-       bs_init(mdp4_dtv_encoder);
-
        return encoder;
 
 fail:
index 18933bd..e8ee92a 100644 (file)
@@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
 }
 #endif
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-/* bus scaling data is associated with extra pointless platform devices,
- * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
- * to find their pdata to make the bus-scaling stuff work.
- */
-static inline void *mdp4_find_pdata(const char *devname)
-{
-       struct device *dev;
-       dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
-       return dev ? dev->platform_data : NULL;
-}
-#endif
-
 #endif /* __MDP4_KMS_H__ */
index 871f351..10eb3e5 100644 (file)
@@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
-       struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
-       struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
-
-       if (!lcdc_pdata) {
-               DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
-               return;
-       }
-
-       if (lcdc_pdata->bus_scale_table) {
-               mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
-                               lcdc_pdata->bus_scale_table);
-               DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
-       }
-}
-
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
-       if (mdp4_lcdc_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc);
-               mdp4_lcdc_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx)
-{
-       if (mdp4_lcdc_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {}
-#endif
-
 static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
                        to_mdp4_lcdc_encoder(encoder);
-       bs_fini(mdp4_lcdc_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp4_lcdc_encoder);
 }
@@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
                        DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
        }
 
-       bs_set(mdp4_lcdc_encoder, 0);
-
        mdp4_lcdc_encoder->enabled = false;
 }
 
@@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
        mdp4_crtc_set_config(encoder->crtc, config);
        mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
 
-       bs_set(mdp4_lcdc_encoder, 1);
-
        for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
                ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
                if (ret)
@@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        }
        mdp4_lcdc_encoder->regs[2] = reg;
 
-       bs_init(mdp4_lcdc_encoder);
-
        return encoder;
 
 fail:
index eeef41f..ff2c1d5 100644 (file)
@@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp5_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
-{
-       if (mdp5_cmd_enc->bsc) {
-               DBG("set bus scaling: %d", idx);
-               /* HACK: scaling down, and then immediately back up
-                * seems to leave things broken (underflow).. so
-                * never disable:
-                */
-               idx = 1;
-               msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
-       }
-}
-#else
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
-#endif
-
 #define VSYNC_CLK_RATE 19200000
 static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
                                    struct drm_display_mode *mode)
@@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
        mdp5_ctl_set_encoder_state(ctl, pipeline, false);
        mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
 
-       bs_set(mdp5_cmd_enc, 0);
-
        mdp5_cmd_enc->enabled = false;
 }
 
@@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
        if (WARN_ON(mdp5_cmd_enc->enabled))
                return;
 
-       bs_set(mdp5_cmd_enc, 1);
        if (pingpong_tearcheck_enable(encoder))
                return;
 
index f488272..79d67c4 100644 (file)
@@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp5_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <mach/msm_bus.h>
-#include <mach/msm_bus_board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)           \
-       {                                               \
-               .src = MSM_BUS_MASTER_MDP_PORT0,        \
-               .dst = MSM_BUS_SLAVE_EBI_CH0,           \
-               .ab = (ab_val),                         \
-               .ib = (ib_val),                         \
-       }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
-       MDP_BUS_VECTOR_ENTRY(0, 0),
-       MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
-               .num_paths = 1,
-               .vectors = &mdp_bus_vectors[0],
-}, {
-               .num_paths = 1,
-               .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
-       .usecase = mdp_bus_usecases,
-       .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
-       .name = "mdss_mdp",
-};
-
-static void bs_init(struct mdp5_encoder *mdp5_encoder)
-{
-       mdp5_encoder->bsc = msm_bus_scale_register_client(
-                       &mdp_bus_scale_table);
-       DBG("bus scale client: %08x", mdp5_encoder->bsc);
-}
-
-static void bs_fini(struct mdp5_encoder *mdp5_encoder)
-{
-       if (mdp5_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp5_encoder->bsc);
-               mdp5_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
-{
-       if (mdp5_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               /* HACK: scaling down, and then immediately back up
-                * seems to leave things broken (underflow).. so
-                * never disable:
-                */
-               idx = 1;
-               msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
-#endif
-
 static void mdp5_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
-       bs_fini(mdp5_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp5_encoder);
 }
@@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
         */
        mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
 
-       bs_set(mdp5_encoder, 0);
-
        mdp5_encoder->enabled = false;
 }
 
@@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
        if (WARN_ON(mdp5_encoder->enabled))
                return;
 
-       bs_set(mdp5_encoder, 1);
        spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
        spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
@@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 
        drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
 
-       bs_init(mdp5_encoder);
-
        return encoder;
 
 fail:
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
new file mode 100644 (file)
index 0000000..82a8673
--- /dev/null
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/of_platform.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "dp_catalog.h"
+#include "dp_audio.h"
+#include "dp_panel.h"
+#include "dp_display.h"
+
+#define HEADER_BYTE_2_BIT       0
+#define PARITY_BYTE_2_BIT       8
+#define HEADER_BYTE_1_BIT      16
+#define PARITY_BYTE_1_BIT      24
+#define HEADER_BYTE_3_BIT      16
+#define PARITY_BYTE_3_BIT      24
+
+struct dp_audio_private {
+       struct platform_device *audio_pdev;
+       struct platform_device *pdev;
+       struct dp_catalog *catalog;
+       struct dp_panel *panel;
+
+       bool engine_on;
+       u32 channels;
+
+       struct dp_audio dp_audio;
+};
+
+static u8 dp_audio_get_g0_value(u8 data)
+{
+       u8 c[4];
+       u8 g[4];
+       u8 ret_data = 0;
+       u8 i;
+
+       for (i = 0; i < 4; i++)
+               c[i] = (data >> i) & 0x01;
+
+       g[0] = c[3];
+       g[1] = c[0] ^ c[3];
+       g[2] = c[1];
+       g[3] = c[2];
+
+       for (i = 0; i < 4; i++)
+               ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+       return ret_data;
+}
+
+static u8 dp_audio_get_g1_value(u8 data)
+{
+       u8 c[4];
+       u8 g[4];
+       u8 ret_data = 0;
+       u8 i;
+
+       for (i = 0; i < 4; i++)
+               c[i] = (data >> i) & 0x01;
+
+       g[0] = c[0] ^ c[3];
+       g[1] = c[0] ^ c[1] ^ c[3];
+       g[2] = c[1] ^ c[2];
+       g[3] = c[2] ^ c[3];
+
+       for (i = 0; i < 4; i++)
+               ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+       return ret_data;
+}
+
+static u8 dp_audio_calculate_parity(u32 data)
+{
+       u8 x0 = 0;
+       u8 x1 = 0;
+       u8 ci = 0;
+       u8 iData = 0;
+       u8 i = 0;
+       u8 parity_byte;
+       u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+       for (i = 0; i < num_byte; i++) {
+               iData = (data >> i*4) & 0xF;
+
+               ci = iData ^ x1;
+               x1 = x0 ^ dp_audio_get_g1_value(ci);
+               x0 = dp_audio_get_g0_value(ci);
+       }
+
+       parity_byte = x1 | (x0 << 4);
+
+       return parity_byte;
+}
+
+static u32 dp_audio_get_header(struct dp_catalog *catalog,
+               enum dp_catalog_audio_sdp_type sdp,
+               enum dp_catalog_audio_header_type header)
+{
+       catalog->sdp_type = sdp;
+       catalog->sdp_header = header;
+       dp_catalog_audio_get_header(catalog);
+
+       return catalog->audio_data;
+}
+
+static void dp_audio_set_header(struct dp_catalog *catalog,
+               u32 data,
+               enum dp_catalog_audio_sdp_type sdp,
+               enum dp_catalog_audio_header_type header)
+{
+       catalog->sdp_type = sdp;
+       catalog->sdp_header = header;
+       catalog->audio_data = data;
+       dp_catalog_audio_set_header(catalog);
+}
+
+static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x02;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+       new_value = value;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = audio->channels - 1;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+               value, parity_byte);
+
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x1;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+               value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x17;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = (0x0 | (0x11 << 2));
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x84;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x1b;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = (0x0 | (0x11 << 2));
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       new_value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x05;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x0F;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = 0x0;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x06;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x0F;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+}
+
+static void dp_audio_setup_sdp(struct dp_audio_private *audio)
+{
+       dp_catalog_audio_config_sdp(audio->catalog);
+
+       dp_audio_stream_sdp(audio);
+       dp_audio_timestamp_sdp(audio);
+       dp_audio_infoframe_sdp(audio);
+       dp_audio_copy_management_sdp(audio);
+       dp_audio_isrc_sdp(audio);
+}
+
+static void dp_audio_setup_acr(struct dp_audio_private *audio)
+{
+       u32 select = 0;
+       struct dp_catalog *catalog = audio->catalog;
+
+       switch (audio->dp_audio.bw_code) {
+       case DP_LINK_BW_1_62:
+               select = 0;
+               break;
+       case DP_LINK_BW_2_7:
+               select = 1;
+               break;
+       case DP_LINK_BW_5_4:
+               select = 2;
+               break;
+       case DP_LINK_BW_8_1:
+               select = 3;
+               break;
+       default:
+               DRM_DEBUG_DP("Unknown link rate\n");
+               select = 0;
+               break;
+       }
+
+       catalog->audio_data = select;
+       dp_catalog_audio_config_acr(catalog);
+}
+
+static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 safe_to_exit_level = 0;
+
+       switch (audio->dp_audio.lane_count) {
+       case 1:
+               safe_to_exit_level = 14;
+               break;
+       case 2:
+               safe_to_exit_level = 8;
+               break;
+       case 4:
+               safe_to_exit_level = 5;
+               break;
+       default:
+               DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n",
+                               safe_to_exit_level);
+               safe_to_exit_level = 14;
+               break;
+       }
+
+       catalog->audio_data = safe_to_exit_level;
+       dp_catalog_audio_sfe_level(catalog);
+}
+
+static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+{
+       struct dp_catalog *catalog = audio->catalog;
+
+       catalog->audio_data = enable;
+       dp_catalog_audio_enable(catalog);
+
+       audio->engine_on = enable;
+}
+
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+{
+       struct dp_audio *dp_audio;
+       struct msm_dp *dp_display;
+
+       if (!pdev) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       dp_audio = dp_display->dp_audio;
+
+       if (!dp_audio) {
+               DRM_ERROR("invalid dp_audio data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       return container_of(dp_audio, struct dp_audio_private, dp_audio);
+}
+
+static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+               hdmi_codec_plugged_cb fn,
+               struct device *codec_dev)
+{
+
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       if (!pdev) {
+               pr_err("invalid input\n");
+               return -ENODEV;
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               pr_err("invalid input\n");
+               return -ENODEV;
+       }
+
+       return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+}
+
+static int dp_audio_get_eld(struct device *dev,
+       void *data, uint8_t *buf, size_t len)
+{
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+
+       if (!pdev) {
+               DRM_ERROR("invalid input\n");
+               return -ENODEV;
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return -ENODEV;
+       }
+
+       memcpy(buf, dp_display->connector->eld,
+               min(sizeof(dp_display->connector->eld), len));
+
+       return 0;
+}
+
+int dp_audio_hw_params(struct device *dev,
+       void *data,
+       struct hdmi_codec_daifmt *daifmt,
+       struct hdmi_codec_params *params)
+{
+       int rc = 0;
+       struct dp_audio_private *audio;
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       dp_display = platform_get_drvdata(pdev);
+
+       /*
+        * there could be cases where sound card can be opened even
+        * before OR even when DP is not connected . This can cause
+        * unclocked access as the audio subsystem relies on the DP
+        * driver to maintain the correct state of clocks. To protect
+        * such cases check for connection status and bail out if not
+        * connected.
+        */
+       if (!dp_display->power_on) {
+               rc = -EINVAL;
+               goto end;
+       }
+
+       audio = dp_audio_get_data(pdev);
+       if (IS_ERR(audio)) {
+               rc = PTR_ERR(audio);
+               goto end;
+       }
+
+       audio->channels = params->channels;
+
+       dp_audio_setup_sdp(audio);
+       dp_audio_setup_acr(audio);
+       dp_audio_safe_to_exit_level(audio);
+       dp_audio_enable(audio, true);
+       dp_display->audio_enabled = true;
+
+end:
+       return rc;
+}
+
+static void dp_audio_shutdown(struct device *dev, void *data)
+{
+       struct dp_audio_private *audio;
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       dp_display = platform_get_drvdata(pdev);
+       audio = dp_audio_get_data(pdev);
+       if (IS_ERR(audio)) {
+               DRM_ERROR("failed to get audio data\n");
+               return;
+       }
+
+       /*
+        * if audio was not enabled there is no need
+        * to execute the shutdown and we can bail out early.
+        * This also makes sure that we dont cause an unclocked
+        * access when audio subsystem calls this without DP being
+        * connected. is_connected cannot be used here as its set
+        * to false earlier than this call
+        */
+       if (!dp_display->audio_enabled)
+               return;
+
+       dp_audio_enable(audio, false);
+       /* signal the dp display to safely shutdown clocks */
+       dp_display_signal_audio_complete(dp_display);
+}
+
+static const struct hdmi_codec_ops dp_audio_codec_ops = {
+       .hw_params = dp_audio_hw_params,
+       .audio_shutdown = dp_audio_shutdown,
+       .get_eld = dp_audio_get_eld,
+       .hook_plugged_cb = dp_audio_hook_plugged_cb,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+       .ops = &dp_audio_codec_ops,
+       .max_i2s_channels = 8,
+       .i2s = 1,
+};
+
+int dp_register_audio_driver(struct device *dev,
+               struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio_priv;
+
+       audio_priv = container_of(dp_audio,
+                       struct dp_audio_private, dp_audio);
+
+       audio_priv->audio_pdev = platform_device_register_data(dev,
+                                               HDMI_CODEC_DRV_NAME,
+                                               PLATFORM_DEVID_AUTO,
+                                               &codec_data,
+                                               sizeof(codec_data));
+       return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
+}
+
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+                       struct dp_panel *panel,
+                       struct dp_catalog *catalog)
+{
+       int rc = 0;
+       struct dp_audio_private *audio;
+       struct dp_audio *dp_audio;
+
+       if (!pdev || !panel || !catalog) {
+               DRM_ERROR("invalid input\n");
+               rc = -EINVAL;
+               goto error;
+       }
+
+       audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
+       if (!audio) {
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       audio->pdev = pdev;
+       audio->panel = panel;
+       audio->catalog = catalog;
+
+       dp_audio = &audio->dp_audio;
+
+       dp_catalog_audio_init(catalog);
+
+       return dp_audio;
+error:
+       return ERR_PTR(rc);
+}
+
+void dp_audio_put(struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio;
+
+       if (!dp_audio)
+               return;
+
+       audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+       devm_kfree(&audio->pdev->dev, audio);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
new file mode 100644 (file)
index 0000000..84e5f4a
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUDIO_H_
+#define _DP_AUDIO_H_
+
+#include <linux/platform_device.h>
+
+#include "dp_panel.h"
+#include "dp_catalog.h"
+#include <sound/hdmi-codec.h>
+
+/**
+ * struct dp_audio
+ * @lane_count: number of lanes configured in current session
+ * @bw_code: link rate's bandwidth code for current session
+ */
+struct dp_audio {
+       u32 lane_count;
+       u32 bw_code;
+};
+
+/**
+ * dp_audio_get()
+ *
+ * Creates and instance of dp audio.
+ *
+ * @pdev: caller's platform device instance.
+ * @panel: an instance of dp_panel module.
+ * @catalog: an instance of dp_catalog module.
+ *
+ * Returns the error code in case of failure, otherwize
+ * an instance of newly created dp_module.
+ */
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+                       struct dp_panel *panel,
+                       struct dp_catalog *catalog);
+
+/**
+ * dp_register_audio_driver()
+ *
+ * Registers DP device with hdmi_codec interface.
+ *
+ * @dev: DP device instance.
+ * @dp_audio: an instance of dp_audio module.
+ *
+ *
+ * Returns the error code in case of failure, otherwise
+ * zero on success.
+ */
+int dp_register_audio_driver(struct device *dev,
+               struct dp_audio *dp_audio);
+
+/**
+ * dp_audio_put()
+ *
+ * Cleans the dp_audio instance.
+ *
+ * @dp_audio: an instance of dp_audio.
+ */
+void dp_audio_put(struct dp_audio *dp_audio);
+
+int dp_audio_hw_params(struct device *dev,
+       void *data,
+       struct hdmi_codec_daifmt *daifmt,
+       struct hdmi_codec_params *params);
+
+#endif /* _DP_AUDIO_H_ */
+
+
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
new file mode 100644 (file)
index 0000000..19b35ae
--- /dev/null
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_aux.h"
+
+#define DP_AUX_ENUM_STR(x)             #x
+
+struct dp_aux_private {
+       struct device *dev;
+       struct dp_catalog *catalog;
+
+       struct mutex mutex;
+       struct completion comp;
+
+       u32 aux_error_num;
+       u32 retry_cnt;
+       bool cmd_busy;
+       bool native;
+       bool read;
+       bool no_send_addr;
+       bool no_send_stop;
+       u32 offset;
+       u32 segment;
+       u32 isr;
+
+       struct drm_dp_aux dp_aux;
+};
+
+static const char *dp_aux_get_error(u32 aux_error)
+{
+       switch (aux_error) {
+       case DP_AUX_ERR_NONE:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
+       case DP_AUX_ERR_ADDR:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
+       case DP_AUX_ERR_TOUT:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
+       case DP_AUX_ERR_NACK:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
+       case DP_AUX_ERR_DEFER:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
+       case DP_AUX_ERR_NACK_DEFER:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
+       default:
+               return "unknown";
+       }
+}
+
+static u32 dp_aux_write(struct dp_aux_private *aux,
+                       struct drm_dp_aux_msg *msg)
+{
+       u32 data[4], reg, len;
+       u8 *msgdata = msg->buffer;
+       int const AUX_CMD_FIFO_LEN = 128;
+       int i = 0;
+
+       if (aux->read)
+               len = 4;
+       else
+               len = msg->size + 4;
+
+       /*
+        * cmd fifo only has depth of 144 bytes
+        * limit buf length to 128 bytes here
+        */
+       if (len > AUX_CMD_FIFO_LEN) {
+               DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
+               return 0;
+       }
+
+       /* Pack cmd and write to HW */
+       data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+       if (aux->read)
+               data[0] |=  BIT(4); /* R/W */
+
+       data[1] = (msg->address >> 8) & 0xff;   /* addr[15:8] */
+       data[2] = msg->address & 0xff;          /* addr[7:0] */
+       data[3] = (msg->size - 1) & 0xff;       /* len[7:0] */
+
+       for (i = 0; i < len; i++) {
+               reg = (i < 4) ? data[i] : msgdata[i - 4];
+               /* index = 0, write */
+               reg = (((reg) << DP_AUX_DATA_OFFSET)
+                      & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE;
+               if (i == 0)
+                       reg |= DP_AUX_DATA_INDEX_WRITE;
+               aux->catalog->aux_data = reg;
+               dp_catalog_aux_write_data(aux->catalog);
+       }
+
+       dp_catalog_aux_clear_trans(aux->catalog, false);
+       dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+
+       reg = 0; /* Transaction number == 1 */
+       if (!aux->native) { /* i2c */
+               reg |= DP_AUX_TRANS_CTRL_I2C;
+
+               if (aux->no_send_addr)
+                       reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
+
+               if (aux->no_send_stop)
+                       reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
+       }
+
+       reg |= DP_AUX_TRANS_CTRL_GO;
+       aux->catalog->aux_data = reg;
+       dp_catalog_aux_write_trans(aux->catalog);
+
+       return len;
+}
+
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+                             struct drm_dp_aux_msg *msg)
+{
+       u32 ret, len, timeout;
+       int aux_timeout_ms = HZ/4;
+
+       reinit_completion(&aux->comp);
+
+       len = dp_aux_write(aux, msg);
+       if (len == 0) {
+               DRM_ERROR("DP AUX write failed\n");
+               return -EINVAL;
+       }
+
+       timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+       if (!timeout) {
+               DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write"));
+               return -ETIMEDOUT;
+       }
+
+       if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+               ret = len;
+       } else {
+               DRM_ERROR_RATELIMITED("aux err: %s\n",
+                       dp_aux_get_error(aux->aux_error_num));
+
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+               struct drm_dp_aux_msg *msg)
+{
+       u32 data;
+       u8 *dp;
+       u32 i, actual_i;
+       u32 len = msg->size;
+
+       dp_catalog_aux_clear_trans(aux->catalog, true);
+
+       data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
+       data |= DP_AUX_DATA_READ;  /* read */
+
+       aux->catalog->aux_data = data;
+       dp_catalog_aux_write_data(aux->catalog);
+
+       dp = msg->buffer;
+
+       /* discard first byte */
+       data = dp_catalog_aux_read_data(aux->catalog);
+
+       for (i = 0; i < len; i++) {
+               data = dp_catalog_aux_read_data(aux->catalog);
+               *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
+
+               actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
+               if (i != actual_i)
+                       DRM_ERROR("Index mismatch: expected %d, found %d\n",
+                               i, actual_i);
+       }
+}
+
+static void dp_aux_native_handler(struct dp_aux_private *aux)
+{
+       u32 isr = aux->isr;
+
+       if (isr & DP_INTR_AUX_I2C_DONE)
+               aux->aux_error_num = DP_AUX_ERR_NONE;
+       else if (isr & DP_INTR_WRONG_ADDR)
+               aux->aux_error_num = DP_AUX_ERR_ADDR;
+       else if (isr & DP_INTR_TIMEOUT)
+               aux->aux_error_num = DP_AUX_ERR_TOUT;
+       if (isr & DP_INTR_NACK_DEFER)
+               aux->aux_error_num = DP_AUX_ERR_NACK;
+       if (isr & DP_INTR_AUX_ERROR) {
+               aux->aux_error_num = DP_AUX_ERR_PHY;
+               dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+       }
+
+       complete(&aux->comp);
+}
+
+static void dp_aux_i2c_handler(struct dp_aux_private *aux)
+{
+       u32 isr = aux->isr;
+
+       if (isr & DP_INTR_AUX_I2C_DONE) {
+               if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
+                       aux->aux_error_num = DP_AUX_ERR_NACK;
+               else
+                       aux->aux_error_num = DP_AUX_ERR_NONE;
+       } else {
+               if (isr & DP_INTR_WRONG_ADDR)
+                       aux->aux_error_num = DP_AUX_ERR_ADDR;
+               else if (isr & DP_INTR_TIMEOUT)
+                       aux->aux_error_num = DP_AUX_ERR_TOUT;
+               if (isr & DP_INTR_NACK_DEFER)
+                       aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+               if (isr & DP_INTR_I2C_NACK)
+                       aux->aux_error_num = DP_AUX_ERR_NACK;
+               if (isr & DP_INTR_I2C_DEFER)
+                       aux->aux_error_num = DP_AUX_ERR_DEFER;
+               if (isr & DP_INTR_AUX_ERROR) {
+                       aux->aux_error_num = DP_AUX_ERR_PHY;
+                       dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+               }
+       }
+
+       complete(&aux->comp);
+}
+
+static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+                                            struct drm_dp_aux_msg *input_msg)
+{
+       u32 edid_address = 0x50;
+       u32 segment_address = 0x30;
+       bool i2c_read = input_msg->request &
+               (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+       u8 *data;
+
+       if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
+               (input_msg->address != segment_address)))
+               return;
+
+
+       data = input_msg->buffer;
+       if (input_msg->address == segment_address)
+               aux->segment = *data;
+       else
+               aux->offset = *data;
+}
+
+/**
+ * dp_aux_transfer_helper() - helper function for EDID read transactions
+ *
+ * @aux: DP AUX private structure
+ * @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the segment to sink
+ *
+ * return: void
+ *
+ * This helper function is used to fix EDID reads for non-compliant
+ * sinks that do not handle the i2c middle-of-transaction flag correctly.
+ */
+static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+                                  struct drm_dp_aux_msg *input_msg,
+                                  bool send_seg)
+{
+       struct drm_dp_aux_msg helper_msg;
+       u32 message_size = 0x10;
+       u32 segment_address = 0x30;
+       u32 const edid_block_length = 0x80;
+       bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
+       bool i2c_read = input_msg->request &
+               (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+       if (!i2c_mot || !i2c_read || (input_msg->size == 0))
+               return;
+
+       /*
+        * Sending the segment value and EDID offset will be performed
+        * from the DRM upstream EDID driver for each block. Avoid
+        * duplicate AUX transactions related to this while reading the
+        * first 16 bytes of each block.
+        */
+       if (!(aux->offset % edid_block_length) || !send_seg)
+               goto end;
+
+       aux->read = false;
+       aux->cmd_busy = true;
+       aux->no_send_addr = true;
+       aux->no_send_stop = true;
+
+       /*
+        * Send the segment address for every i2c read in which the
+        * middle-of-tranaction flag is set. This is required to support EDID
+        * reads of more than 2 blocks as the segment address is reset to 0
+        * since we are overriding the middle-of-transaction flag for read
+        * transactions.
+        */
+
+       if (aux->segment) {
+               memset(&helper_msg, 0, sizeof(helper_msg));
+               helper_msg.address = segment_address;
+               helper_msg.buffer = &aux->segment;
+               helper_msg.size = 1;
+               dp_aux_cmd_fifo_tx(aux, &helper_msg);
+       }
+
+       /*
+        * Send the offset address for every i2c read in which the
+        * middle-of-transaction flag is set. This will ensure that the sink
+        * will update its read pointer and return the correct portion of the
+        * EDID buffer in the subsequent i2c read trasntion triggered in the
+        * native AUX transfer function.
+        */
+       memset(&helper_msg, 0, sizeof(helper_msg));
+       helper_msg.address = input_msg->address;
+       helper_msg.buffer = &aux->offset;
+       helper_msg.size = 1;
+       dp_aux_cmd_fifo_tx(aux, &helper_msg);
+
+end:
+       aux->offset += message_size;
+       if (aux->offset == 0x80 || aux->offset == 0x100)
+               aux->segment = 0x0; /* reset segment at end of block */
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+                              struct drm_dp_aux_msg *msg)
+{
+       ssize_t ret;
+       int const aux_cmd_native_max = 16;
+       int const aux_cmd_i2c_max = 128;
+       int const retry_count = 5;
+       struct dp_aux_private *aux = container_of(dp_aux,
+               struct dp_aux_private, dp_aux);
+
+       mutex_lock(&aux->mutex);
+
+       aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+
+       /* Ignore address only message */
+       if ((msg->size == 0) || (msg->buffer == NULL)) {
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+               ret = msg->size;
+               goto unlock_exit;
+       }
+
+       /* msg sanity check */
+       if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+               (msg->size > aux_cmd_i2c_max)) {
+               DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
+                       __func__, msg->size, msg->request);
+               ret = -EINVAL;
+               goto unlock_exit;
+       }
+
+       dp_aux_update_offset_and_segment(aux, msg);
+       dp_aux_transfer_helper(aux, msg, true);
+
+       aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+       aux->cmd_busy = true;
+
+       if (aux->read) {
+               aux->no_send_addr = true;
+               aux->no_send_stop = false;
+       } else {
+               aux->no_send_addr = true;
+               aux->no_send_stop = true;
+       }
+
+       ret = dp_aux_cmd_fifo_tx(aux, msg);
+
+       if (ret < 0) {
+               if (aux->native) {
+                       aux->retry_cnt++;
+                       if (!(aux->retry_cnt % retry_count))
+                               dp_catalog_aux_update_cfg(aux->catalog);
+                       dp_catalog_aux_reset(aux->catalog);
+               }
+               usleep_range(400, 500); /* at least 400us to next try */
+               goto unlock_exit;
+       }
+
+       if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+               if (aux->read)
+                       dp_aux_cmd_fifo_rx(aux, msg);
+
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+       } else {
+               /* Reply defer to retry */
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+       }
+
+       /* Return requested size for success or retry */
+       ret = msg->size;
+       aux->retry_cnt = 0;
+
+unlock_exit:
+       aux->cmd_busy = false;
+       mutex_unlock(&aux->mutex);
+       return ret;
+}
+
+void dp_aux_isr(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       aux->isr = dp_catalog_aux_get_irq(aux->catalog);
+
+       if (!aux->cmd_busy)
+               return;
+
+       if (aux->native)
+               dp_aux_native_handler(aux);
+       else
+               dp_aux_i2c_handler(aux);
+}
+
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_update_cfg(aux->catalog);
+       dp_catalog_aux_reset(aux->catalog);
+}
+
+void dp_aux_init(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_enable(aux->catalog, true);
+       aux->retry_cnt = 0;
+}
+
+void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_enable(aux->catalog, false);
+}
+
+int dp_aux_register(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+       int ret;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       aux->dp_aux.name = "dpu_dp_aux";
+       aux->dp_aux.dev = aux->dev;
+       aux->dp_aux.transfer = dp_aux_transfer;
+       ret = drm_dp_aux_register(&aux->dp_aux);
+       if (ret) {
+               DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+{
+       drm_dp_aux_unregister(dp_aux);
+}
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
+{
+       struct dp_aux_private *aux;
+
+       if (!catalog) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+       if (!aux)
+               return ERR_PTR(-ENOMEM);
+
+       init_completion(&aux->comp);
+       aux->cmd_busy = false;
+       mutex_init(&aux->mutex);
+
+       aux->dev = dev;
+       aux->catalog = catalog;
+       aux->retry_cnt = 0;
+
+       return &aux->dp_aux;
+}
+
+void dp_aux_put(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux)
+               return;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       mutex_destroy(&aux->mutex);
+
+       devm_kfree(aux->dev, aux);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
new file mode 100644 (file)
index 0000000..f8b8ba9
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUX_H_
+#define _DP_AUX_H_
+
+#include "dp_catalog.h"
+#include <drm/drm_dp_helper.h>
+
+#define DP_AUX_ERR_NONE                0
+#define DP_AUX_ERR_ADDR                -1
+#define DP_AUX_ERR_TOUT                -2
+#define DP_AUX_ERR_NACK                -3
+#define DP_AUX_ERR_DEFER       -4
+#define DP_AUX_ERR_NACK_DEFER  -5
+#define DP_AUX_ERR_PHY         -6
+
+int dp_aux_register(struct drm_dp_aux *dp_aux);
+void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+void dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_init(struct drm_dp_aux *dp_aux);
+void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog);
+void dp_aux_put(struct drm_dp_aux *aux);
+
+#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644 (file)
index 0000000..b15b4ce
--- /dev/null
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/rational.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/rational.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_catalog.h"
+#include "dp_reg.h"
+
+#define POLLING_SLEEP_US                       1000
+#define POLLING_TIMEOUT_US                     10000
+
+#define SCRAMBLER_RESET_COUNT_VALUE            0xFC
+
+#define DP_INTERRUPT_STATUS_ACK_SHIFT  1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define MSM_DP_CONTROLLER_AHB_OFFSET   0x0000
+#define MSM_DP_CONTROLLER_AHB_SIZE     0x0200
+#define MSM_DP_CONTROLLER_AUX_OFFSET   0x0200
+#define MSM_DP_CONTROLLER_AUX_SIZE     0x0200
+#define MSM_DP_CONTROLLER_LINK_OFFSET  0x0400
+#define MSM_DP_CONTROLLER_LINK_SIZE    0x0C00
+#define MSM_DP_CONTROLLER_P0_OFFSET    0x1000
+#define MSM_DP_CONTROLLER_P0_SIZE      0x0400
+
+#define DP_INTERRUPT_STATUS1 \
+       (DP_INTR_AUX_I2C_DONE| \
+       DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+       DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+       DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+       DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+       (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+       (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+       (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+       DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+       (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+       (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+struct dp_catalog_private {
+       struct device *dev;
+       struct dp_io *io;
+       u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
+       struct dp_catalog dp_catalog;
+       u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
+};
+
+static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_aux(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+       /*
+        * To make sure aux reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+       /*
+        * To make sure phy reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_p0(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_P0_OFFSET;
+       /*
+        * To make sure interface reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+                              u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_P0_OFFSET;
+       /*
+        * To make sure interface reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_link(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+       /*
+        * To make sure link reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+/* aux related catalog functions */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       return dp_read_aux(catalog, REG_DP_AUX_DATA);
+}
+
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
+       return 0;
+}
+
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
+       return 0;
+}
+
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+{
+       u32 data;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (read) {
+               data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+               data &= ~DP_AUX_TRANS_CTRL_GO;
+               dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+       } else {
+               dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+       }
+       return 0;
+}
+
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+       return 0;
+}
+
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+{
+       u32 aux_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+       aux_ctrl |= DP_AUX_CTRL_RESET;
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+
+       aux_ctrl &= ~DP_AUX_CTRL_RESET;
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+{
+       u32 aux_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+       if (enable) {
+               dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+               dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+               aux_ctrl |= DP_AUX_CTRL_ENABLE;
+       } else {
+               aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+       }
+
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       struct dp_io *dp_io = catalog->io;
+       struct phy *phy = dp_io->phy;
+
+       phy_calibrate(phy);
+}
+
+static void dump_regs(void __iomem *base, int len)
+{
+       int i;
+       u32 x0, x4, x8, xc;
+       u32 addr_off = 0;
+
+       len = DIV_ROUND_UP(len, 16);
+       for (i = 0; i < len; i++) {
+               x0 = readl_relaxed(base + addr_off);
+               x4 = readl_relaxed(base + addr_off + 0x04);
+               x8 = readl_relaxed(base + addr_off + 0x08);
+               xc = readl_relaxed(base + addr_off + 0x0c);
+
+               pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
+               addr_off += 16;
+       }
+}
+
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
+{
+       u32 offset, len;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       pr_info("AHB regs\n");
+       offset = MSM_DP_CONTROLLER_AHB_OFFSET;
+       len = MSM_DP_CONTROLLER_AHB_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("AUXCLK regs\n");
+       offset = MSM_DP_CONTROLLER_AUX_OFFSET;
+       len = MSM_DP_CONTROLLER_AUX_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("LCLK regs\n");
+       offset = MSM_DP_CONTROLLER_LINK_OFFSET;
+       len = MSM_DP_CONTROLLER_LINK_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("P0CLK regs\n");
+       offset = MSM_DP_CONTROLLER_P0_OFFSET;
+       len = MSM_DP_CONTROLLER_P0_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+}
+
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 intr, intr_ack;
+
+       intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+       intr &= ~DP_INTERRUPT_STATUS1_MASK;
+       intr_ack = (intr & DP_INTERRUPT_STATUS1)
+                       << DP_INTERRUPT_STATUS_ACK_SHIFT;
+       dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+                       DP_INTERRUPT_STATUS1_MASK);
+
+       return intr;
+
+}
+
+/* controller related catalog functions */
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+                               u32 dp_tu, u32 valid_boundary,
+                               u32 valid_boundary2)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+       dp_write_link(catalog, REG_DP_TU, dp_tu);
+       dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+}
+
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+}
+
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+
+       dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+}
+
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+       u32 ln_mapping;
+
+       ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+       ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+       ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+       ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+       dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+                       ln_mapping);
+}
+
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+                                               bool enable)
+{
+       u32 mainlink_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (enable) {
+               /*
+                * To make sure link reg writes happens before other operation,
+                * dp_write_link() function uses writel()
+                */
+               mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+
+               mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+                                               DP_MAINLINK_CTRL_ENABLE);
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+                                       DP_MAINLINK_FB_BOUNDARY_SEL);
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+       } else {
+               mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+       }
+}
+
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+                                       u32 colorimetry_cfg,
+                                       u32 test_bits_depth)
+{
+       u32 misc_val;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+       /* clear bpp bits */
+       misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+       misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+       misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+       /* Configure clock to synchronous mode */
+       misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+       DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val);
+       dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+}
+
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+                                       u32 rate, u32 stream_rate_khz,
+                                       bool fixed_nvid)
+{
+       u32 pixel_m, pixel_n;
+       u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+       u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+       u32 const link_rate_hbr2 = 540000;
+       u32 const link_rate_hbr3 = 810000;
+       unsigned long den, num;
+
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (rate == link_rate_hbr3)
+               pixel_div = 6;
+       else if (rate == 1620000 || rate == 270000)
+               pixel_div = 2;
+       else if (rate == link_rate_hbr2)
+               pixel_div = 4;
+       else
+               DRM_ERROR("Invalid pixel mux divider\n");
+
+       dispcc_input_rate = (rate * 10) / pixel_div;
+
+       rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+                       (unsigned long)(1 << 16) - 1,
+                       (unsigned long)(1 << 16) - 1, &den, &num);
+
+       den = ~(den - num);
+       den = den & 0xFFFF;
+       pixel_m = num;
+       pixel_n = den;
+
+       mvid = (pixel_m & 0xFFFF) * 5;
+       nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+       if (nvid < nvid_fixed) {
+               u32 temp;
+
+               temp = (nvid_fixed / nvid) * nvid;
+               mvid = (nvid_fixed / nvid) * mvid;
+               nvid = temp;
+       }
+
+       if (link_rate_hbr2 == rate)
+               nvid *= 2;
+
+       if (link_rate_hbr3 == rate)
+               nvid *= 3;
+
+       DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+       dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+       dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+       dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+}
+
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
+                                       u32 pattern)
+{
+       int bit, ret;
+       u32 data;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       bit = BIT(pattern - 1);
+       DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern);
+       dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+
+       bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+       /* Poll for mainlink ready status */
+       ret = readx_poll_timeout(readl, catalog->io->dp_controller.base +
+                                       MSM_DP_CONTROLLER_LINK_OFFSET +
+                                       REG_DP_MAINLINK_READY,
+                                       data, data & bit,
+                                       POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+       if (ret < 0) {
+               DRM_ERROR("set pattern for link_train=%d failed\n", pattern);
+               return ret;
+       }
+       return 0;
+}
+
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+{
+       u32 sw_reset;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+
+       sw_reset |= DP_SW_RESET;
+       dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+
+       sw_reset &= ~DP_SW_RESET;
+       dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+}
+
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+{
+       u32 data;
+       int ret;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       /* Poll for mainlink ready status */
+       ret = readl_poll_timeout(catalog->io->dp_controller.base +
+                               MSM_DP_CONTROLLER_LINK_OFFSET +
+                               REG_DP_MAINLINK_READY,
+                               data, data & DP_MAINLINK_READY_FOR_VIDEO,
+                               POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+       if (ret < 0) {
+               DRM_ERROR("mainlink not ready\n");
+               return false;
+       }
+
+       return true;
+}
+
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+                                               bool enable)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (enable) {
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+                               DP_INTERRUPT_STATUS1_MASK);
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+                               DP_INTERRUPT_STATUS2_MASK);
+       } else {
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+       }
+}
+
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+                       u32 intr_mask, bool en)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+
+       config = (en ? config | intr_mask : config & ~intr_mask);
+
+       dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+                               config & DP_DP_HPD_INT_MASK);
+}
+
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+       /* enable HPD interrupts */
+       dp_catalog_hpd_config_intr(dp_catalog,
+               DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
+               | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+
+       /* Configure REFTIMER and enable it */
+       reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
+       dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+       /* Enable HPD */
+       dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       int isr = 0;
+
+       isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+       dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+                                (isr & DP_DP_HPD_INT_MASK));
+
+       return isr;
+}
+
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 intr, intr_ack;
+
+       intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+       intr &= ~DP_INTERRUPT_STATUS2_MASK;
+       intr_ack = (intr & DP_INTERRUPT_STATUS2)
+                       << DP_INTERRUPT_STATUS_ACK_SHIFT;
+       dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+                       intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+       return intr;
+}
+
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+                       DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+       dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+}
+
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
+               u8 v_level, u8 p_level)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       struct dp_io *dp_io = catalog->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       /* TODO: Update for all lanes instead of just first one */
+       opts_dp->voltage[0] = v_level;
+       opts_dp->pre[0] = p_level;
+       opts_dp->set_voltages = 1;
+       phy_configure(phy, &dp_io->phy_opts);
+       opts_dp->set_voltages = 0;
+
+       return 0;
+}
+
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+                       u32 pattern)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 value = 0x0;
+
+       /* Make sure to clear the current pattern before starting a new one */
+       dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+
+       switch (pattern) {
+       case DP_PHY_TEST_PATTERN_D10_2:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+               break;
+       case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+               value &= ~(1 << 16);
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               value |= SCRAMBLER_RESET_COUNT_VALUE;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+                                       DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                                       DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+               break;
+       case DP_PHY_TEST_PATTERN_PRBS7:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_PRBS7);
+               break;
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+               /* 00111110000011111000001111100000 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+                               0x3E0F83E0);
+               /* 00001111100000111110000011111000 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+                               0x0F83E0F8);
+               /* 1111100000111110 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+                               0x0000F83E);
+               break;
+       case DP_PHY_TEST_PATTERN_CP2520:
+               value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+
+               value = DP_HBR2_ERM_PATTERN;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                               value);
+               value |= SCRAMBLER_RESET_COUNT_VALUE;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+                                       DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                                       DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+               value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               value |= DP_MAINLINK_CTRL_ENABLE;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+               break;
+       case DP_PHY_TEST_PATTERN_SEL_MASK:
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+                               DP_MAINLINK_CTRL_ENABLE);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+               break;
+       default:
+               DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+               break;
+       }
+}
+
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+}
+
+/* panel related catalog functions */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
+                               dp_catalog->total);
+       dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
+                               dp_catalog->sync_start);
+       dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
+                               dp_catalog->width_blanking);
+       dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+       return 0;
+}
+
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+                               struct drm_display_mode *drm_mode)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 hsync_period, vsync_period;
+       u32 display_v_start, display_v_end;
+       u32 hsync_start_x, hsync_end_x;
+       u32 v_sync_width;
+       u32 hsync_ctl;
+       u32 display_hctl;
+
+       /* TPG config parameters*/
+       hsync_period = drm_mode->htotal;
+       vsync_period = drm_mode->vtotal;
+
+       display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+                                       hsync_period);
+       display_v_end = ((vsync_period - (drm_mode->vsync_start -
+                                       drm_mode->vdisplay))
+                                       * hsync_period) - 1;
+
+       display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+       display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+       hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+       hsync_end_x = hsync_period - (drm_mode->hsync_start -
+                                       drm_mode->hdisplay) - 1;
+
+       v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+       hsync_ctl = (hsync_period << 16) |
+                       (drm_mode->hsync_end - drm_mode->hsync_start);
+       display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+       dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+       dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+                       hsync_period);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+                       hsync_period);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+       dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+       dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+       dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+                               DP_TPG_CHECKERED_RECT_PATTERN);
+       dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+                               DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+                               DP_TPG_VIDEO_CONFIG_RGB);
+       dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+                               DP_BIST_ENABLE_DPBIST_EN);
+       dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+                               DP_TIMING_ENGINE_EN_EN);
+       DRM_DEBUG_DP("%s: enabled tpg\n", __func__);
+}
+
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+       dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+       dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+       struct dp_catalog_private *catalog;
+
+       if (!io) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+       if (!catalog)
+               return ERR_PTR(-ENOMEM);
+
+       catalog->dev = dev;
+       catalog->io = io;
+
+       return &catalog->dp_catalog;
+}
+
+void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+       enum dp_catalog_audio_sdp_type sdp;
+       enum dp_catalog_audio_header_type header;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_map = catalog->audio_map;
+       sdp     = dp_catalog->sdp_type;
+       header  = dp_catalog->sdp_header;
+
+       dp_catalog->audio_data = dp_read_link(catalog,
+                       sdp_map[sdp][header]);
+}
+
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+       enum dp_catalog_audio_sdp_type sdp;
+       enum dp_catalog_audio_header_type header;
+       u32 data;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_map = catalog->audio_map;
+       sdp     = dp_catalog->sdp_type;
+       header  = dp_catalog->sdp_header;
+       data    = dp_catalog->audio_data;
+
+       dp_write_link(catalog, sdp_map[sdp][header], data);
+}
+
+void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 acr_ctrl, select;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       select = dp_catalog->audio_data;
+       acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+       DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+
+       dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+}
+
+void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       bool enable;
+       u32 audio_ctrl;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       enable = !!dp_catalog->audio_data;
+       audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+
+       if (enable)
+               audio_ctrl |= BIT(0);
+       else
+               audio_ctrl &= ~BIT(0);
+
+       DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl);
+
+       dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+       /* make sure audio engine is disabled */
+       wmb();
+}
+
+void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 sdp_cfg = 0;
+       u32 sdp_cfg2 = 0;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+       /* AUDIO_TIMESTAMP_SDP_EN */
+       sdp_cfg |= BIT(1);
+       /* AUDIO_STREAM_SDP_EN */
+       sdp_cfg |= BIT(2);
+       /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+       sdp_cfg |= BIT(5);
+       /* AUDIO_ISRC_SDP_EN  */
+       sdp_cfg |= BIT(6);
+       /* AUDIO_INFOFRAME_SDP_EN  */
+       sdp_cfg |= BIT(20);
+
+       DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg);
+
+       dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+
+       sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+       /* IFRM_REGSRC -> Do not use reg values */
+       sdp_cfg2 &= ~BIT(0);
+       /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+       sdp_cfg2 &= ~BIT(1);
+
+       DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2);
+
+       dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
+}
+
+void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+
+       static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
+               {
+                       MMSS_DP_AUDIO_STREAM_0,
+                       MMSS_DP_AUDIO_STREAM_1,
+                       MMSS_DP_AUDIO_STREAM_1,
+               },
+               {
+                       MMSS_DP_AUDIO_TIMESTAMP_0,
+                       MMSS_DP_AUDIO_TIMESTAMP_1,
+                       MMSS_DP_AUDIO_TIMESTAMP_1,
+               },
+               {
+                       MMSS_DP_AUDIO_INFOFRAME_0,
+                       MMSS_DP_AUDIO_INFOFRAME_1,
+                       MMSS_DP_AUDIO_INFOFRAME_1,
+               },
+               {
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_0,
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+               },
+               {
+                       MMSS_DP_AUDIO_ISRC_0,
+                       MMSS_DP_AUDIO_ISRC_1,
+                       MMSS_DP_AUDIO_ISRC_1,
+               },
+       };
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       catalog->audio_map = sdp_map;
+}
+
+void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 mainlink_levels, safe_to_exit_level;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       safe_to_exit_level = dp_catalog->audio_data;
+       mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+       mainlink_levels &= 0xFE0;
+       mainlink_levels |= safe_to_exit_level;
+
+       DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+                        mainlink_levels, safe_to_exit_level);
+
+       dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644 (file)
index 0000000..4b7666f
--- /dev/null
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include <drm/drm_modes.h>
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD            BIT(0)
+#define DP_INTR_AUX_I2C_DONE   BIT(3)
+#define DP_INTR_WRONG_ADDR     BIT(6)
+#define DP_INTR_TIMEOUT                BIT(9)
+#define DP_INTR_NACK_DEFER     BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK       BIT(18)
+#define DP_INTR_I2C_DEFER      BIT(21)
+#define DP_INTR_PLL_UNLOCKED   BIT(24)
+#define DP_INTR_AUX_ERROR      BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO                BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT      BIT(3)
+#define DP_INTR_FRAME_END              BIT(6)
+#define DP_INTR_CRC_UPDATED            BIT(9)
+
+#define DP_AUX_CFG_MAX_VALUE_CNT 3
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+       PHY_AUX_CFG0,
+       PHY_AUX_CFG1,
+       PHY_AUX_CFG2,
+       PHY_AUX_CFG3,
+       PHY_AUX_CFG4,
+       PHY_AUX_CFG5,
+       PHY_AUX_CFG6,
+       PHY_AUX_CFG7,
+       PHY_AUX_CFG8,
+       PHY_AUX_CFG9,
+       PHY_AUX_CFG_MAX,
+};
+
+enum dp_catalog_audio_sdp_type {
+       DP_AUDIO_SDP_STREAM,
+       DP_AUDIO_SDP_TIMESTAMP,
+       DP_AUDIO_SDP_INFOFRAME,
+       DP_AUDIO_SDP_COPYMANAGEMENT,
+       DP_AUDIO_SDP_ISRC,
+       DP_AUDIO_SDP_MAX,
+};
+
+enum dp_catalog_audio_header_type {
+       DP_AUDIO_SDP_HEADER_1,
+       DP_AUDIO_SDP_HEADER_2,
+       DP_AUDIO_SDP_HEADER_3,
+       DP_AUDIO_SDP_HEADER_MAX,
+};
+
+struct dp_catalog {
+       u32 aux_data;
+       u32 total;
+       u32 sync_start;
+       u32 width_blanking;
+       u32 dp_active;
+       enum dp_catalog_audio_sdp_type sdp_type;
+       enum dp_catalog_audio_header_type sdp_header;
+       u32 audio_data;
+};
+
+/* AUX APIs */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+
+/* DP Controller APIs */
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+                               u32 stream_rate_khz, bool fixed_nvid);
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern);
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+                       u32 intr_mask, bool en);
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
+                               u8 p_level);
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+                               u32 dp_tu, u32 valid_boundary,
+                               u32 valid_boundary2);
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+                               u32 pattern);
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+
+/* DP Panel APIs */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+                               struct drm_display_mode *drm_mode);
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+
+/* DP Audio APIs */
+void dp_catalog_audio_get_header(struct dp_catalog *catalog);
+void dp_catalog_audio_set_header(struct dp_catalog *catalog);
+void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
+void dp_catalog_audio_init(struct dp_catalog *catalog);
+void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644 (file)
index 0000000..2e3e191
--- /dev/null
@@ -0,0 +1,1869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_ctrl.h"
+#include "dp_link.h"
+
+#define DP_KHZ_TO_HZ 1000
+#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES        (30 * HZ / 1000) /* 30 ms */
+#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
+
+#define MR_LINK_TRAINING1  0x8
+#define MR_LINK_SYMBOL_ERM 0x80
+#define MR_LINK_PRBS7 0x100
+#define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4  0x40
+
+enum {
+       DP_TRAINING_NONE,
+       DP_TRAINING_1,
+       DP_TRAINING_2,
+};
+
+struct dp_tu_calc_input {
+       u64 lclk;        /* 162, 270, 540 and 810 */
+       u64 pclk_khz;    /* in KHz */
+       u64 hactive;     /* active h-width */
+       u64 hporch;      /* bp + fp + pulse */
+       int nlanes;      /* no.of.lanes */
+       int bpp;         /* bits */
+       int pixel_enc;   /* 444, 420, 422 */
+       int dsc_en;     /* dsc on/off */
+       int async_en;   /* async mode */
+       int fec_en;     /* fec */
+       int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
+       int num_of_dsc_slices; /* number of slices per line */
+};
+
+struct dp_vc_tu_mapping_table {
+       u32 vic;
+       u8 lanes;
+       u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+       u8 bpp;
+       u8 valid_boundary_link;
+       u16 delay_start_link;
+       bool boundary_moderation_en;
+       u8 valid_lower_boundary_link;
+       u8 upper_boundary_count;
+       u8 lower_boundary_count;
+       u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+       struct dp_ctrl dp_ctrl;
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_panel *panel;
+       struct dp_link *link;
+       struct dp_power *power;
+       struct dp_parser *parser;
+       struct dp_catalog *catalog;
+
+       struct completion idle_comp;
+       struct completion video_comp;
+};
+
+struct dp_cr_status {
+       u8 lane_0_1;
+       u8 lane_2_3;
+};
+
+#define DP_LANE0_1_CR_DONE     0x11
+
+static int dp_aux_link_configure(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 values[2];
+       int err;
+
+       values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+       values[1] = link->num_lanes;
+
+       if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+               values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+       err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       reinit_completion(&ctrl->idle_comp);
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+
+       if (!wait_for_completion_timeout(&ctrl->idle_comp,
+                       IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
+               pr_warn("PUSH_IDLE pattern timedout\n");
+
+       pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+       u32 config = 0, tbd;
+       u8 *dpcd = ctrl->panel->dpcd;
+
+       /* Default-> LSCLK DIV: 1/4 LCLK  */
+       config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
+
+       /* Scrambler reset enable */
+       if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+               config |= DP_CONFIGURATION_CTRL_ASSR;
+
+       tbd = dp_link_get_test_bits_depth(ctrl->link,
+                       ctrl->panel->dp_mode.bpp);
+
+       if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+               pr_debug("BIT_DEPTH not set. Configure default\n");
+               tbd = DP_TEST_BIT_DEPTH_8;
+       }
+
+       config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+       /* Num of Lanes */
+       config |= ((ctrl->link->link_params.num_lanes - 1)
+                       << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT);
+
+       if (drm_dp_enhanced_frame_cap(dpcd))
+               config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+       config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */
+
+       /* sync clock & static Mvid */
+       config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN;
+       config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK;
+
+       dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+}
+
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+       u32 cc, tb;
+
+       dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+       dp_ctrl_config_ctrl(ctrl);
+
+       tb = dp_link_get_test_bits_depth(ctrl->link,
+               ctrl->panel->dp_mode.bpp);
+       cc = dp_link_get_colorimetry_config(ctrl->link);
+       dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+       dp_panel_timing_cfg(ctrl->panel);
+}
+
+/*
+ * The structure and few functions present below are IP/Hardware
+ * specific implementation. Most of the implementation will not
+ * have coding comments
+ */
+struct tu_algo_data {
+       s64 lclk_fp;
+       s64 pclk_fp;
+       s64 lwidth;
+       s64 lwidth_fp;
+       s64 hbp_relative_to_pclk;
+       s64 hbp_relative_to_pclk_fp;
+       int nlanes;
+       int bpp;
+       int pixelEnc;
+       int dsc_en;
+       int async_en;
+       int bpc;
+
+       uint delay_start_link_extra_pixclk;
+       int extra_buffer_margin;
+       s64 ratio_fp;
+       s64 original_ratio_fp;
+
+       s64 err_fp;
+       s64 n_err_fp;
+       s64 n_n_err_fp;
+       int tu_size;
+       int tu_size_desired;
+       int tu_size_minus1;
+
+       int valid_boundary_link;
+       s64 resulting_valid_fp;
+       s64 total_valid_fp;
+       s64 effective_valid_fp;
+       s64 effective_valid_recorded_fp;
+       int n_tus;
+       int n_tus_per_lane;
+       int paired_tus;
+       int remainder_tus;
+       int remainder_tus_upper;
+       int remainder_tus_lower;
+       int extra_bytes;
+       int filler_size;
+       int delay_start_link;
+
+       int extra_pclk_cycles;
+       int extra_pclk_cycles_in_link_clk;
+       s64 ratio_by_tu_fp;
+       s64 average_valid2_fp;
+       int new_valid_boundary_link;
+       int remainder_symbols_exist;
+       int n_symbols;
+       s64 n_remainder_symbols_per_lane_fp;
+       s64 last_partial_tu_fp;
+       s64 TU_ratio_err_fp;
+
+       int n_tus_incl_last_incomplete_tu;
+       int extra_pclk_cycles_tmp;
+       int extra_pclk_cycles_in_link_clk_tmp;
+       int extra_required_bytes_new_tmp;
+       int filler_size_tmp;
+       int lower_filler_size_tmp;
+       int delay_start_link_tmp;
+
+       bool boundary_moderation_en;
+       int boundary_mod_lower_err;
+       int upper_boundary_count;
+       int lower_boundary_count;
+       int i_upper_boundary_count;
+       int i_lower_boundary_count;
+       int valid_lower_boundary_link;
+       int even_distribution_BF;
+       int even_distribution_legacy;
+       int even_distribution;
+       int min_hblank_violated;
+       s64 delay_start_time_fp;
+       s64 hbp_time_fp;
+       s64 hactive_time_fp;
+       s64 diff_abs_fp;
+
+       s64 ratio;
+};
+
+static int _tu_param_compare(s64 a, s64 b)
+{
+       u32 a_sign;
+       u32 b_sign;
+       s64 a_temp, b_temp, minus_1;
+
+       if (a == b)
+               return 0;
+
+       minus_1 = drm_fixp_from_fraction(-1, 1);
+
+       a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
+
+       b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
+
+       if (a_sign > b_sign)
+               return 2;
+       else if (b_sign > a_sign)
+               return 1;
+
+       if (!a_sign && !b_sign) { /* positive */
+               if (a > b)
+                       return 1;
+               else
+                       return 2;
+       } else { /* negative */
+               a_temp = drm_fixp_mul(a, minus_1);
+               b_temp = drm_fixp_mul(b, minus_1);
+
+               if (a_temp > b_temp)
+                       return 2;
+               else
+                       return 1;
+       }
+}
+
+static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+                                       struct tu_algo_data *tu)
+{
+       int nlanes = in->nlanes;
+       int dsc_num_slices = in->num_of_dsc_slices;
+       int dsc_num_bytes  = 0;
+       int numerator;
+       s64 pclk_dsc_fp;
+       s64 dwidth_dsc_fp;
+       s64 hbp_dsc_fp;
+
+       int tot_num_eoc_symbols = 0;
+       int tot_num_hor_bytes   = 0;
+       int tot_num_dummy_bytes = 0;
+       int dwidth_dsc_bytes    = 0;
+       int  eoc_bytes           = 0;
+
+       s64 temp1_fp, temp2_fp, temp3_fp;
+
+       tu->lclk_fp              = drm_fixp_from_fraction(in->lclk, 1);
+       tu->pclk_fp              = drm_fixp_from_fraction(in->pclk_khz, 1000);
+       tu->lwidth               = in->hactive;
+       tu->hbp_relative_to_pclk = in->hporch;
+       tu->nlanes               = in->nlanes;
+       tu->bpp                  = in->bpp;
+       tu->pixelEnc             = in->pixel_enc;
+       tu->dsc_en               = in->dsc_en;
+       tu->async_en             = in->async_en;
+       tu->lwidth_fp            = drm_fixp_from_fraction(in->hactive, 1);
+       tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
+
+       if (tu->pixelEnc == 420) {
+               temp1_fp = drm_fixp_from_fraction(2, 1);
+               tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
+               tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
+               tu->hbp_relative_to_pclk_fp =
+                               drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
+       }
+
+       if (tu->pixelEnc == 422) {
+               switch (tu->bpp) {
+               case 24:
+                       tu->bpp = 16;
+                       tu->bpc = 8;
+                       break;
+               case 30:
+                       tu->bpp = 20;
+                       tu->bpc = 10;
+                       break;
+               default:
+                       tu->bpp = 16;
+                       tu->bpc = 8;
+                       break;
+               }
+       } else {
+               tu->bpc = tu->bpp/3;
+       }
+
+       if (!in->dsc_en)
+               goto fec_check;
+
+       temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
+       temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
+       temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
+
+       temp1_fp = drm_fixp_from_fraction(8, 1);
+       temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+
+       numerator = drm_fixp2int(temp3_fp);
+
+       dsc_num_bytes  = numerator / dsc_num_slices;
+       eoc_bytes           = dsc_num_bytes % nlanes;
+       tot_num_eoc_symbols = nlanes * dsc_num_slices;
+       tot_num_hor_bytes   = dsc_num_bytes * dsc_num_slices;
+       tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
+
+       if (dsc_num_bytes == 0)
+               pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
+
+       dwidth_dsc_bytes = (tot_num_hor_bytes +
+                               tot_num_eoc_symbols +
+                               (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
+
+       dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
+
+       temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
+       temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
+       pclk_dsc_fp = temp1_fp;
+
+       temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
+       temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
+       hbp_dsc_fp = temp2_fp;
+
+       /* output */
+       tu->pclk_fp = pclk_dsc_fp;
+       tu->lwidth_fp = dwidth_dsc_fp;
+       tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
+
+fec_check:
+       if (in->fec_en) {
+               temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
+               tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
+       }
+}
+
+static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
+{
+       s64 temp1_fp, temp2_fp, temp, temp1, temp2;
+       int compare_result_1, compare_result_2, compare_result_3;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+
+       tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+       temp = (tu->i_upper_boundary_count *
+                               tu->new_valid_boundary_link +
+                               tu->i_lower_boundary_count *
+                               (tu->new_valid_boundary_link-1));
+       tu->average_valid2_fp = drm_fixp_from_fraction(temp,
+                                       (tu->i_upper_boundary_count +
+                                       tu->i_lower_boundary_count));
+
+       temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+       temp2_fp = tu->lwidth_fp;
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+       tu->n_tus = drm_fixp2int(temp2_fp);
+       if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+               tu->n_tus += 1;
+
+       temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
+       temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu->n_remainder_symbols_per_lane_fp = temp2_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       tu->last_partial_tu_fp =
+                       drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
+                                       temp1_fp);
+
+       if (tu->n_remainder_symbols_per_lane_fp != 0)
+               tu->remainder_symbols_exist = 1;
+       else
+               tu->remainder_symbols_exist = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
+       tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
+
+       tu->paired_tus = (int)((tu->n_tus_per_lane) /
+                                       (tu->i_upper_boundary_count +
+                                        tu->i_lower_boundary_count));
+
+       tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
+                                               (tu->i_upper_boundary_count +
+                                               tu->i_lower_boundary_count);
+
+       if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
+               tu->remainder_tus_upper = tu->i_upper_boundary_count;
+               tu->remainder_tus_lower = tu->remainder_tus -
+                                               tu->i_upper_boundary_count;
+       } else {
+               tu->remainder_tus_upper = tu->remainder_tus;
+               tu->remainder_tus_lower = 0;
+       }
+
+       temp = tu->paired_tus * (tu->i_upper_boundary_count *
+                               tu->new_valid_boundary_link +
+                               tu->i_lower_boundary_count *
+                               (tu->new_valid_boundary_link - 1)) +
+                               (tu->remainder_tus_upper *
+                                tu->new_valid_boundary_link) +
+                               (tu->remainder_tus_lower *
+                               (tu->new_valid_boundary_link - 1));
+       tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
+
+       if (tu->remainder_symbols_exist) {
+               temp1_fp = tu->total_valid_fp +
+                               tu->n_remainder_symbols_per_lane_fp;
+               temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+               temp2_fp = temp2_fp + tu->last_partial_tu_fp;
+               temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
+       } else {
+               temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+               temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
+       }
+       tu->effective_valid_fp = temp1_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+       tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+       tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
+
+       tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+       temp2_fp = tu->lwidth_fp;
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+
+       if (temp2_fp)
+               tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
+       else
+               tu->n_tus_incl_last_incomplete_tu = 0;
+
+       temp1 = 0;
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+       temp1_fp = tu->average_valid2_fp - temp2_fp;
+       temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               temp1 = drm_fixp2int_ceil(temp1_fp);
+
+       temp = tu->i_upper_boundary_count * tu->nlanes;
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(temp, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp2_fp)
+               temp2 = drm_fixp2int_ceil(temp2_fp);
+       else
+               temp2 = 0;
+       tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
+
+       temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
+       temp2_fp = drm_fixp_from_fraction(
+       tu->extra_required_bytes_new_tmp, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu->extra_pclk_cycles_tmp = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
+       temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+       temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp1_fp)
+               tu->extra_pclk_cycles_in_link_clk_tmp =
+                                               drm_fixp2int_ceil(temp1_fp);
+       else
+               tu->extra_pclk_cycles_in_link_clk_tmp = 0;
+
+       tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
+
+       tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
+
+       tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
+                                       tu->lower_filler_size_tmp +
+                                       tu->extra_buffer_margin;
+
+       temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
+       tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+       compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
+       if (compare_result_1 == 2)
+               compare_result_1 = 1;
+       else
+               compare_result_1 = 0;
+
+       compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
+       if (compare_result_2 == 2)
+               compare_result_2 = 1;
+       else
+               compare_result_2 = 0;
+
+       compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
+                                       tu->delay_start_time_fp);
+       if (compare_result_3 == 2)
+               compare_result_3 = 0;
+       else
+               compare_result_3 = 1;
+
+       if (((tu->even_distribution == 1) ||
+                       ((tu->even_distribution_BF == 0) &&
+                       (tu->even_distribution_legacy == 0))) &&
+                       tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
+                       compare_result_2 &&
+                       (compare_result_1 || (tu->min_hblank_violated == 1)) &&
+                       (tu->new_valid_boundary_link - 1) > 0 &&
+                       compare_result_3 &&
+                       (tu->delay_start_link_tmp <= 1023)) {
+               tu->upper_boundary_count = tu->i_upper_boundary_count;
+               tu->lower_boundary_count = tu->i_lower_boundary_count;
+               tu->err_fp = tu->n_n_err_fp;
+               tu->boundary_moderation_en = true;
+               tu->tu_size_desired = tu->tu_size;
+               tu->valid_boundary_link = tu->new_valid_boundary_link;
+               tu->effective_valid_recorded_fp = tu->effective_valid_fp;
+               tu->even_distribution_BF = 1;
+               tu->delay_start_link = tu->delay_start_link_tmp;
+       } else if (tu->boundary_mod_lower_err == 0) {
+               compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
+                                                       tu->diff_abs_fp);
+               if (compare_result_1 == 2)
+                       tu->boundary_mod_lower_err = 1;
+       }
+}
+
+static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
+                                  struct dp_vc_tu_mapping_table *tu_table)
+{
+       struct tu_algo_data tu;
+       int compare_result_1, compare_result_2;
+       u64 temp = 0;
+       s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
+
+       s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
+       s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
+       s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
+       s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
+
+       u8 DP_BRUTE_FORCE = 1;
+       s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
+       uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
+       uint HBLANK_MARGIN = 4;
+
+       memset(&tu, 0, sizeof(tu));
+
+       dp_panel_update_tu_timings(in, &tu);
+
+       tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
+
+       temp1_fp = drm_fixp_from_fraction(4, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp);
+       temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp);
+       tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp);
+
+       tu.original_ratio_fp = tu.ratio_fp;
+       tu.boundary_moderation_en = false;
+       tu.upper_boundary_count = 0;
+       tu.lower_boundary_count = 0;
+       tu.i_upper_boundary_count = 0;
+       tu.i_lower_boundary_count = 0;
+       tu.valid_lower_boundary_link = 0;
+       tu.even_distribution_BF = 0;
+       tu.even_distribution_legacy = 0;
+       tu.even_distribution = 0;
+       tu.delay_start_time_fp = 0;
+
+       tu.err_fp = drm_fixp_from_fraction(1000, 1);
+       tu.n_err_fp = 0;
+       tu.n_n_err_fp = 0;
+
+       tu.ratio = drm_fixp2int(tu.ratio_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp);
+       if (temp2_fp != 0 &&
+                       !tu.ratio && tu.dsc_en == 0) {
+               tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp);
+               tu.ratio = drm_fixp2int(tu.ratio_fp);
+               if (tu.ratio)
+                       tu.ratio_fp = drm_fixp_from_fraction(1, 1);
+       }
+
+       if (tu.ratio > 1)
+               tu.ratio = 1;
+
+       if (tu.ratio == 1)
+               goto tu_size_calc;
+
+       compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp);
+       if (!compare_result_1 || compare_result_1 == 1)
+               compare_result_1 = 1;
+       else
+               compare_result_1 = 0;
+
+       compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp);
+       if (!compare_result_2 || compare_result_2 == 2)
+               compare_result_2 = 1;
+       else
+               compare_result_2 = 0;
+
+       if (tu.dsc_en && compare_result_1 && compare_result_2) {
+               HBLANK_MARGIN += 4;
+               DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n",
+                               HBLANK_MARGIN);
+       }
+
+tu_size_calc:
+       for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+               temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1);
+               temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+               temp = drm_fixp2int_ceil(temp2_fp);
+               temp1_fp = drm_fixp_from_fraction(temp, 1);
+               tu.n_err_fp = temp1_fp - temp2_fp;
+
+               if (tu.n_err_fp < tu.err_fp) {
+                       tu.err_fp = tu.n_err_fp;
+                       tu.tu_size_desired = tu.tu_size;
+               }
+       }
+
+       tu.tu_size_minus1 = tu.tu_size_desired - 1;
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+       tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = tu.lwidth_fp;
+       temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu.n_tus = drm_fixp2int(temp2_fp);
+       if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+               tu.n_tus += 1;
+
+       tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0;
+       DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n",
+               tu.valid_boundary_link, tu.n_tus);
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       temp = drm_fixp2int(temp2_fp);
+       if (temp && temp2_fp)
+               tu.extra_bytes = drm_fixp2int_ceil(temp2_fp);
+       else
+               tu.extra_bytes = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1);
+       temp2_fp = drm_fixp_from_fraction(8, tu.bpp);
+       temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp && temp1_fp)
+               tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu.extra_pclk_cycles = drm_fixp2int(temp1_fp);
+
+       temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+       temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
+
+       tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link;
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+
+       tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk +
+                               tu.filler_size + tu.extra_buffer_margin;
+
+       tu.resulting_valid_fp =
+                       drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+       tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+
+       temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
+       temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp;
+       tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+       tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+       compare_result_1 = _tu_param_compare(tu.hbp_time_fp,
+                                       tu.delay_start_time_fp);
+       if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
+               tu.min_hblank_violated = 1;
+
+       tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp);
+
+       compare_result_2 = _tu_param_compare(tu.hactive_time_fp,
+                                               tu.delay_start_time_fp);
+       if (compare_result_2 == 2)
+               tu.min_hblank_violated = 1;
+
+       tu.delay_start_time_fp = 0;
+
+       /* brute force */
+
+       tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
+       tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp;
+
+       temp = drm_fixp2int(tu.diff_abs_fp);
+       if (!temp && tu.diff_abs_fp <= 0xffff)
+               tu.diff_abs_fp = 0;
+
+       /* if(diff_abs < 0) diff_abs *= -1 */
+       if (tu.diff_abs_fp < 0)
+               tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1);
+
+       tu.boundary_mod_lower_err = 0;
+       if ((tu.diff_abs_fp != 0 &&
+                       ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
+                        (tu.even_distribution_legacy == 0) ||
+                        (DP_BRUTE_FORCE == 1))) ||
+                       (tu.min_hblank_violated == 1)) {
+               do {
+                       tu.err_fp = drm_fixp_from_fraction(1000, 1);
+
+                       temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+                       temp2_fp = drm_fixp_from_fraction(
+                                       tu.delay_start_link_extra_pixclk, 1);
+                       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+                       if (temp1_fp)
+                               tu.extra_buffer_margin =
+                                       drm_fixp2int_ceil(temp1_fp);
+                       else
+                               tu.extra_buffer_margin = 0;
+
+                       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+                       temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+
+                       if (temp1_fp)
+                               tu.n_symbols = drm_fixp2int_ceil(temp1_fp);
+                       else
+                               tu.n_symbols = 0;
+
+                       for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+                               for (tu.i_upper_boundary_count = 1;
+                                       tu.i_upper_boundary_count <= 15;
+                                       tu.i_upper_boundary_count++) {
+                                       for (tu.i_lower_boundary_count = 1;
+                                               tu.i_lower_boundary_count <= 15;
+                                               tu.i_lower_boundary_count++) {
+                                               _tu_valid_boundary_calc(&tu);
+                                       }
+                               }
+                       }
+                       tu.delay_start_link_extra_pixclk--;
+               } while (tu.boundary_moderation_en != true &&
+                       tu.boundary_mod_lower_err == 1 &&
+                       tu.delay_start_link_extra_pixclk != 0);
+
+               if (tu.boundary_moderation_en == true) {
+                       temp1_fp = drm_fixp_from_fraction(
+                                       (tu.upper_boundary_count *
+                                       tu.valid_boundary_link +
+                                       tu.lower_boundary_count *
+                                       (tu.valid_boundary_link - 1)), 1);
+                       temp2_fp = drm_fixp_from_fraction(
+                                       (tu.upper_boundary_count +
+                                       tu.lower_boundary_count), 1);
+                       tu.resulting_valid_fp =
+                                       drm_fixp_div(temp1_fp, temp2_fp);
+
+                       temp1_fp = drm_fixp_from_fraction(
+                                       tu.tu_size_desired, 1);
+                       tu.ratio_by_tu_fp =
+                               drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+
+                       tu.valid_lower_boundary_link =
+                               tu.valid_boundary_link - 1;
+
+                       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+                       temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+                       temp2_fp = drm_fixp_div(temp1_fp,
+                                               tu.resulting_valid_fp);
+                       tu.n_tus = drm_fixp2int(temp2_fp);
+
+                       tu.tu_size_minus1 = tu.tu_size_desired - 1;
+                       tu.even_distribution_BF = 1;
+
+                       temp1_fp =
+                               drm_fixp_from_fraction(tu.tu_size_desired, 1);
+                       temp2_fp =
+                               drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+                       tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+               }
+       }
+
+       temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp);
+
+       if (temp2_fp)
+               temp = drm_fixp2int_ceil(temp2_fp);
+       else
+               temp = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
+       temp1_fp = drm_fixp_from_fraction(temp, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+       temp = drm_fixp2int(temp2_fp);
+
+       if (tu.async_en)
+               tu.delay_start_link += (int)temp;
+
+       temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+       tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+       /* OUTPUTS */
+       tu_table->valid_boundary_link       = tu.valid_boundary_link;
+       tu_table->delay_start_link          = tu.delay_start_link;
+       tu_table->boundary_moderation_en    = tu.boundary_moderation_en;
+       tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link;
+       tu_table->upper_boundary_count      = tu.upper_boundary_count;
+       tu_table->lower_boundary_count      = tu.lower_boundary_count;
+       tu_table->tu_size_minus1            = tu.tu_size_minus1;
+
+       DRM_DEBUG_DP("TU: valid_boundary_link: %d\n",
+                               tu_table->valid_boundary_link);
+       DRM_DEBUG_DP("TU: delay_start_link: %d\n",
+                               tu_table->delay_start_link);
+       DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n",
+                       tu_table->boundary_moderation_en);
+       DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n",
+                       tu_table->valid_lower_boundary_link);
+       DRM_DEBUG_DP("TU: upper_boundary_count: %d\n",
+                       tu_table->upper_boundary_count);
+       DRM_DEBUG_DP("TU: lower_boundary_count: %d\n",
+                       tu_table->lower_boundary_count);
+       DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+               struct dp_vc_tu_mapping_table *tu_table)
+{
+       struct dp_tu_calc_input in;
+       struct drm_display_mode *drm_mode;
+
+       drm_mode = &ctrl->panel->dp_mode.drm_mode;
+
+       in.lclk = ctrl->link->link_params.rate / 1000;
+       in.pclk_khz = drm_mode->clock;
+       in.hactive = drm_mode->hdisplay;
+       in.hporch = drm_mode->htotal - drm_mode->hdisplay;
+       in.nlanes = ctrl->link->link_params.num_lanes;
+       in.bpp = ctrl->panel->dp_mode.bpp;
+       in.pixel_enc = 444;
+       in.dsc_en = 0;
+       in.async_en = 0;
+       in.fec_en = 0;
+       in.num_of_dsc_slices = 0;
+       in.compress_ratio = 100;
+
+       _dp_ctrl_calc_tu(&in, tu_table);
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+       u32 dp_tu = 0x0;
+       u32 valid_boundary = 0x0;
+       u32 valid_boundary2 = 0x0;
+       struct dp_vc_tu_mapping_table tu_calc_table;
+
+       dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+       dp_tu |= tu_calc_table.tu_size_minus1;
+       valid_boundary |= tu_calc_table.valid_boundary_link;
+       valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+       valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+       valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+       valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+       if (tu_calc_table.boundary_moderation_en)
+               valid_boundary2 |= BIT(0);
+
+       pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+                       dp_tu, valid_boundary, valid_boundary2);
+
+       dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+                               dp_tu, valid_boundary, valid_boundary2);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       if (!wait_for_completion_timeout(&ctrl->video_comp,
+                               WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) {
+               DRM_ERROR("wait4video timedout\n");
+               ret = -ETIMEDOUT;
+       }
+       return ret;
+}
+
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+       struct dp_link *link = ctrl->link;
+       int ret = 0, lane, lane_cnt;
+       u8 buf[4];
+       u32 max_level_reached = 0;
+       u32 voltage_swing_level = link->phy_params.v_level;
+       u32 pre_emphasis_level = link->phy_params.p_level;
+
+       ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+               voltage_swing_level, pre_emphasis_level);
+
+       if (ret)
+               return ret;
+
+       if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+               DRM_DEBUG_DP("max. voltage swing level reached %d\n",
+                               voltage_swing_level);
+               max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+       }
+
+       if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+               DRM_DEBUG_DP("max. pre-emphasis level reached %d\n",
+                               pre_emphasis_level);
+               max_level_reached  |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       }
+
+       pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+       lane_cnt = ctrl->link->link_params.num_lanes;
+       for (lane = 0; lane < lane_cnt; lane++)
+               buf[lane] = voltage_swing_level | pre_emphasis_level
+                               | max_level_reached;
+
+       DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level
+                                       | pre_emphasis_level);
+       ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
+                                       buf, lane_cnt);
+       if (ret == lane_cnt)
+               ret = 0;
+
+       return ret;
+}
+
+static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+               u8 pattern)
+{
+       u8 buf;
+       int ret = 0;
+
+       DRM_DEBUG_DP("sink: pattern=%x\n", pattern);
+
+       buf = pattern;
+
+       if (pattern && pattern != DP_TRAINING_PATTERN_4)
+               buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+       ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
+       return ret == 1;
+}
+
+static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+                                   u8 *link_status)
+{
+       int len = 0;
+       u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
+       u32 link_status_read_max_retries = 100;
+
+       while (--link_status_read_max_retries) {
+               len = drm_dp_dpcd_read_link_status(ctrl->aux,
+                       link_status);
+               if (len != DP_LINK_STATUS_SIZE) {
+                       DRM_ERROR("DP link status read failed, err: %d\n", len);
+                       return len;
+               }
+
+               if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
+                       return 0;
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int tries, old_v_level, ret = 0;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+       int const maximum_retries = 4;
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       *training_step = DP_TRAINING_1;
+
+       ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1);
+       if (ret)
+               return ret;
+       dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+               DP_LINK_SCRAMBLING_DISABLE);
+
+       ret = dp_ctrl_update_vx_px(ctrl);
+       if (ret)
+               return ret;
+
+       tries = 0;
+       old_v_level = ctrl->link->phy_params.v_level;
+       for (tries = 0; tries < maximum_retries; tries++) {
+               drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
+
+               ret = dp_ctrl_read_link_status(ctrl, link_status);
+               if (ret)
+                       return ret;
+
+               cr->lane_0_1 = link_status[0];
+               cr->lane_2_3 = link_status[1];
+
+               if (drm_dp_clock_recovery_ok(link_status,
+                       ctrl->link->link_params.num_lanes)) {
+                       return 0;
+               }
+
+               if (ctrl->link->phy_params.v_level >=
+                       DP_TRAIN_VOLTAGE_SWING_MAX) {
+                       DRM_ERROR_RATELIMITED("max v_level reached\n");
+                       return -EAGAIN;
+               }
+
+               if (old_v_level != ctrl->link->phy_params.v_level) {
+                       tries = 0;
+                       old_v_level = ctrl->link->phy_params.v_level;
+               }
+
+               DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n");
+
+               dp_link_adjust_levels(ctrl->link, link_status);
+               ret = dp_ctrl_update_vx_px(ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       DRM_ERROR("max tries reached\n");
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       switch (ctrl->link->link_params.rate) {
+       case 810000:
+               ctrl->link->link_params.rate = 540000;
+               break;
+       case 540000:
+               ctrl->link->link_params.rate = 270000;
+               break;
+       case 270000:
+               ctrl->link->link_params.rate = 162000;
+               break;
+       case 162000:
+       default:
+               ret = -EINVAL;
+               break;
+       };
+
+       if (!ret)
+               DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
+
+       return ret;
+}
+
+static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+{
+
+       if (ctrl->link->link_params.num_lanes == 1)
+               return -1;
+
+       ctrl->link->link_params.num_lanes /= 2;
+       ctrl->link->link_params.rate = ctrl->panel->link_info.rate;
+
+       ctrl->link->phy_params.p_level = 0;
+       ctrl->link->phy_params.v_level = 0;
+
+       return 0;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+       dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+       drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+}
+
+static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int tries = 0, ret = 0;
+       char pattern;
+       int const maximum_retries = 5;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       *training_step = DP_TRAINING_2;
+
+       if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+               pattern = DP_TRAINING_PATTERN_3;
+       else
+               pattern = DP_TRAINING_PATTERN_2;
+
+       ret = dp_ctrl_update_vx_px(ctrl);
+       if (ret)
+               return ret;
+
+       ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
+       if (ret)
+               return ret;
+
+       dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+
+       for (tries = 0; tries <= maximum_retries; tries++) {
+               drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+
+               ret = dp_ctrl_read_link_status(ctrl, link_status);
+               if (ret)
+                       return ret;
+               cr->lane_0_1 = link_status[0];
+               cr->lane_2_3 = link_status[1];
+
+               if (drm_dp_channel_eq_ok(link_status,
+                       ctrl->link->link_params.num_lanes)) {
+                       return 0;
+               }
+
+               dp_link_adjust_levels(ctrl->link, link_status);
+               ret = dp_ctrl_update_vx_px(ctrl);
+               if (ret)
+                       return ret;
+
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int ret = 0;
+       u8 encoding = DP_SET_ANSI_8B10B;
+       struct dp_link_info link_info = {0};
+
+       dp_ctrl_config_ctrl(ctrl);
+
+       link_info.num_lanes = ctrl->link->link_params.num_lanes;
+       link_info.rate = ctrl->link->link_params.rate;
+       link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+
+       dp_aux_link_configure(ctrl->aux, &link_info);
+       drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+                               &encoding, 1);
+
+       ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
+       if (ret) {
+               DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+               goto end;
+       }
+
+       /* print success info as this is a result of user initiated action */
+       DRM_DEBUG_DP("link training #1 successful\n");
+
+       ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
+       if (ret) {
+               DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+               goto end;
+       }
+
+       /* print success info as this is a result of user initiated action */
+       DRM_DEBUG_DP("link training #2 successful\n");
+
+end:
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int ret = 0;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+               return ret;
+
+       /*
+        * As part of previous calls, DP controller state might have
+        * transitioned to PUSH_IDLE. In order to start transmitting
+        * a link training pattern, we have to first do soft reset.
+        */
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ret = dp_ctrl_link_train(ctrl, cr, training_step);
+
+       return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+                       enum dp_pm_type module, char *name, unsigned long rate)
+{
+       u32 num = ctrl->parser->mp[module].num_clk;
+       struct dss_clk *cfg = ctrl->parser->mp[module].clk_config;
+
+       while (num && strcmp(cfg->clk_name, name)) {
+               num--;
+               cfg++;
+       }
+
+       DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name);
+
+       if (num)
+               cfg->rate = rate;
+       else
+               DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
+                               name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_io *dp_io = &ctrl->parser->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       opts_dp->lanes = ctrl->link->link_params.num_lanes;
+       opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+       dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
+                                       ctrl->link->link_params.rate * 1000);
+
+       phy_configure(phy, &dp_io->phy_opts);
+       phy_power_on(phy);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
+       if (ret)
+               DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
+
+       DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+               ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+       return ret;
+}
+
+static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
+                                       ctrl->dp_ctrl.pixel_rate * 1000);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+       if (ret)
+               DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
+
+       DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+                       ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+       return ret;
+}
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("Invalid input data\n");
+               return -EINVAL;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+
+       ctrl->dp_ctrl.orientation = flip;
+
+       dp_catalog_ctrl_phy_reset(ctrl->catalog);
+       phy_init(phy);
+       dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
+
+       return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @dp_ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("Invalid input data\n");
+               return;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
+
+       DRM_DEBUG_DP("Host deinitialized successfully\n");
+}
+
+static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
+{
+       u8 *dpcd = ctrl->panel->dpcd;
+       u32 edid_quirks = 0;
+
+       edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
+       /*
+        * For better interop experience, used a fixed NVID=0x8000
+        * whenever connected to a VGA dongle downstream.
+        */
+       if (drm_dp_is_branch(dpcd))
+               return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
+                               DP_DPCD_QUIRK_CONSTANT_N));
+
+       return false;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_io *dp_io = &ctrl->parser->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+       opts_dp->lanes = ctrl->link->link_params.num_lanes;
+       phy_configure(phy, &dp_io->phy_opts);
+       /*
+        * Disable and re-enable the mainlink clock since the
+        * link clock might have been adjusted as part of the
+        * link maintenance.
+        */
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
+               return ret;
+       }
+       phy_power_off(phy);
+       /* hw recommended delay before re-enabling clocks */
+       msleep(20);
+
+       ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_cr_status cr;
+       int training_step = DP_TRAINING_NONE;
+
+       dp_ctrl_push_idle(&ctrl->dp_ctrl);
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+       if (ret)
+               goto end;
+
+       dp_ctrl_clear_training_pattern(ctrl);
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+       ret = dp_ctrl_wait4video_ready(ctrl);
+end:
+       return ret;
+}
+
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+               DRM_DEBUG_DP("no test pattern selected by sink\n");
+               return ret;
+       }
+
+       /*
+        * The global reset will need DP link related clocks to be
+        * running. Add the global reset just before disabling the
+        * link clocks and core clocks.
+        */
+       ret = dp_ctrl_off(&ctrl->dp_ctrl);
+       if (ret) {
+               DRM_ERROR("failed to disable DP controller\n");
+               return ret;
+       }
+
+       ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+       if (!ret)
+               ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+       else
+               DRM_ERROR("failed to enable DP link controller\n");
+
+       return ret;
+}
+
+static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+{
+       bool success = false;
+       u32 pattern_sent = 0x0;
+       u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
+
+       DRM_DEBUG_DP("request: 0x%x\n", pattern_requested);
+
+       if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+                       ctrl->link->phy_params.v_level,
+                       ctrl->link->phy_params.p_level)) {
+               DRM_ERROR("Failed to set v/p levels\n");
+               return false;
+       }
+       dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+       dp_ctrl_update_vx_px(ctrl);
+       dp_link_send_test_response(ctrl->link);
+
+       pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+
+       switch (pattern_sent) {
+       case MR_LINK_TRAINING1:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_D10_2);
+               break;
+       case MR_LINK_SYMBOL_ERM:
+               success = ((pattern_requested ==
+                       DP_PHY_TEST_PATTERN_ERROR_COUNT) ||
+                               (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_CP2520));
+               break;
+       case MR_LINK_PRBS7:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_PRBS7);
+               break;
+       case MR_LINK_CUSTOM80:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_80BIT_CUSTOM);
+               break;
+       case MR_LINK_TRAINING4:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_SEL_MASK);
+               break;
+       default:
+               success = false;
+       }
+
+       DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed",
+                                               pattern_requested);
+       return success;
+}
+
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       u32 sink_request = 0x0;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       sink_request = ctrl->link->sink_request;
+
+       if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               DRM_DEBUG_DP("PHY_TEST_PATTERN request\n");
+               if (dp_ctrl_process_phy_test_request(ctrl)) {
+                       DRM_ERROR("process phy_test_req failed\n");
+                       return;
+               }
+       }
+
+       if (sink_request & DP_LINK_STATUS_UPDATED) {
+               if (dp_ctrl_link_maintenance(ctrl)) {
+                       DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+                       return;
+               }
+       }
+
+       if (sink_request & DP_TEST_LINK_TRAINING) {
+               dp_link_send_test_response(ctrl->link);
+               if (dp_ctrl_link_maintenance(ctrl)) {
+                       DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+                       return;
+               }
+       }
+}
+
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+{
+       int rc = 0;
+       struct dp_ctrl_private *ctrl;
+       u32 rate = 0;
+       int link_train_max_retries = 5;
+       u32 const phy_cts_pixel_clk_khz = 148500;
+       struct dp_cr_status cr;
+       unsigned int training_step;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       rate = ctrl->panel->link_info.rate;
+
+       dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               DRM_DEBUG_DP("using phy test link parameters\n");
+               if (!ctrl->panel->dp_mode.drm_mode.clock)
+                       ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+       } else {
+               ctrl->link->link_params.rate = rate;
+               ctrl->link->link_params.num_lanes =
+                       ctrl->panel->link_info.num_lanes;
+               ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+       }
+
+       DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+               ctrl->link->link_params.rate,
+               ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+       rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+       if (rc)
+               return rc;
+
+       ctrl->link->phy_params.p_level = 0;
+       ctrl->link->phy_params.v_level = 0;
+
+       while (--link_train_max_retries &&
+               !atomic_read(&ctrl->dp_ctrl.aborted)) {
+               rc = dp_ctrl_reinitialize_mainlink(ctrl);
+               if (rc) {
+                       DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+                                       rc);
+                       break;
+               }
+
+               training_step = DP_TRAINING_NONE;
+               rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+               if (rc == 0) {
+                       /* training completed successfully */
+                       break;
+               } else if (training_step == DP_TRAINING_1) {
+                       /* link train_1 failed */
+                       rc = dp_ctrl_link_rate_down_shift(ctrl);
+                       if (rc < 0) { /* already in RBR = 1.6G */
+                               if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+                                       /*
+                                        * some lanes are ready,
+                                        * reduce lane number
+                                        */
+                                       rc = dp_ctrl_link_lane_down_shift(ctrl);
+                                       if (rc < 0) { /* lane == 1 already */
+                                               /* end with failure */
+                                               break;
+                                       }
+                               } else {
+                                       /* end with failure */
+                                       break; /* lane == 1 already */
+                               }
+                       }
+               } else if (training_step == DP_TRAINING_2) {
+                       /* link train_2 failed, lower lane rate */
+                       rc = dp_ctrl_link_lane_down_shift(ctrl);
+                       if (rc < 0) {
+                               /* end with failure */
+                               break; /* lane == 1 already */
+                       }
+               }
+       }
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+               return rc;
+
+       /* stop txing train pattern */
+       dp_ctrl_clear_training_pattern(ctrl);
+
+       /*
+        * keep transmitting idle pattern until video ready
+        * to avoid main link from loss of sync
+        */
+       if (rc == 0)  /* link train successfully */
+               dp_ctrl_push_idle(dp_ctrl);
+
+       return rc;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+{
+       u32 rate = 0;
+       int ret = 0;
+       bool mainlink_ready = false;
+       struct dp_ctrl_private *ctrl;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       rate = ctrl->panel->link_info.rate;
+
+       ctrl->link->link_params.rate = rate;
+       ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+               ctrl->link->link_params.rate,
+               ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+       if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
+               ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+               if (ret) {
+                       DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
+                       goto end;
+               }
+       }
+
+       ret = dp_ctrl_enable_stream_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+               goto end;
+       }
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               dp_ctrl_send_phy_test_pattern(ctrl);
+               return 0;
+       }
+
+       /*
+        * Set up transfer unit values and set controller state to send
+        * video.
+        */
+       dp_ctrl_configure_source_params(ctrl);
+
+       dp_catalog_ctrl_config_msa(ctrl->catalog,
+               ctrl->link->link_params.rate,
+               ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
+
+       reinit_completion(&ctrl->video_comp);
+
+       dp_ctrl_setup_tr_unit(ctrl);
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+       ret = dp_ctrl_wait4video_ready(ctrl);
+       if (ret)
+               return ret;
+
+       mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+       DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+
+end:
+       return ret;
+}
+
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+       int ret = 0;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+       if (ret)
+               DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+       }
+
+       phy_power_off(phy);
+       phy_exit(phy);
+
+       DRM_DEBUG_DP("DP off done\n");
+       return ret;
+}
+
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       u32 isr;
+
+       if (!dp_ctrl)
+               return;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+
+       if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
+               DRM_DEBUG_DP("dp_video_ready\n");
+               complete(&ctrl->video_comp);
+       }
+
+       if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
+               DRM_DEBUG_DP("idle_patterns_sent\n");
+               complete(&ctrl->idle_comp);
+       }
+}
+
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+                       struct dp_panel *panel, struct drm_dp_aux *aux,
+                       struct dp_power *power, struct dp_catalog *catalog,
+                       struct dp_parser *parser)
+{
+       struct dp_ctrl_private *ctrl;
+
+       if (!dev || !panel || !aux ||
+           !link || !catalog) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+       if (!ctrl) {
+               DRM_ERROR("Mem allocation failure\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init_completion(&ctrl->idle_comp);
+       init_completion(&ctrl->video_comp);
+
+       /* in parameters */
+       ctrl->parser   = parser;
+       ctrl->panel    = panel;
+       ctrl->power    = power;
+       ctrl->aux      = aux;
+       ctrl->link     = link;
+       ctrl->catalog  = catalog;
+       ctrl->dev      = dev;
+
+       return &ctrl->dp_ctrl;
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644 (file)
index 0000000..f60ba93
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+       bool orientation;
+       atomic_t aborted;
+       u32 pixel_rate;
+};
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+                       struct dp_panel *panel, struct drm_dp_aux *aux,
+                       struct dp_power *power, struct dp_catalog *catalog,
+                       struct dp_parser *parser);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
new file mode 100644 (file)
index 0000000..84670bc
--- /dev/null
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_file.h>
+
+#include "dp_parser.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_ctrl.h"
+#include "dp_debug.h"
+#include "dp_display.h"
+
+#define DEBUG_NAME "msm_dp"
+
+struct dp_debug_private {
+       struct dentry *root;
+
+       struct dp_usbpd *usbpd;
+       struct dp_link *link;
+       struct dp_panel *panel;
+       struct drm_connector **connector;
+       struct device *dev;
+       struct drm_device *drm_dev;
+
+       struct dp_debug dp_debug;
+};
+
+static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
+{
+       if (rc >= *max_size) {
+               DRM_ERROR("buffer overflow\n");
+               return -EINVAL;
+       }
+       *len += rc;
+       *max_size = SZ_4K - *len;
+
+       return 0;
+}
+
+static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
+               size_t count, loff_t *ppos)
+{
+       struct dp_debug_private *debug = file->private_data;
+       char *buf;
+       u32 len = 0, rc = 0;
+       u64 lclk = 0;
+       u32 max_size = SZ_4K;
+       u32 link_params_rate;
+       struct drm_display_mode *drm_mode;
+
+       if (!debug)
+               return -ENODEV;
+
+       if (*ppos)
+               return 0;
+
+       buf = kzalloc(SZ_4K, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       drm_mode = &debug->panel->dp_mode.drm_mode;
+
+       rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
+                       debug->panel->max_pclk_khz);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdrm_dp_link\n\t\trate = %u\n",
+                       debug->panel->link_info.rate);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                        "\t\tnum_lanes = %u\n",
+                       debug->panel->link_info.num_lanes);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tcapabilities = %lu\n",
+                       debug->panel->link_info.capabilities);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+                       drm_mode->hdisplay,
+                       drm_mode->vdisplay);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tback_porch = %dx%d\n",
+                       drm_mode->htotal - drm_mode->hsync_end,
+                       drm_mode->vtotal - drm_mode->vsync_end);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tfront_porch = %dx%d\n",
+                       drm_mode->hsync_start - drm_mode->hdisplay,
+                       drm_mode->vsync_start - drm_mode->vdisplay);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tsync_width = %dx%d\n",
+                       drm_mode->hsync_end - drm_mode->hsync_start,
+                       drm_mode->vsync_end - drm_mode->vsync_start);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tactive_low = %dx%d\n",
+                       debug->panel->dp_mode.h_active_low,
+                       debug->panel->dp_mode.v_active_low);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\th_skew = %d\n",
+                       drm_mode->hskew);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\trefresh rate = %d\n",
+                       drm_mode_vrefresh(drm_mode));
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tpixel clock khz = %d\n",
+                       drm_mode->clock);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tbpp = %d\n",
+                       debug->panel->dp_mode.bpp);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       /* Link Information */
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_link:\n\t\ttest_requested = %d\n",
+                       debug->link->sink_request);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tnum_lanes = %d\n",
+                       debug->link->link_params.num_lanes);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       link_params_rate = debug->link->link_params.rate;
+       rc = snprintf(buf + len, max_size,
+                       "\t\tbw_code = %d\n",
+                       drm_dp_link_rate_to_bw_code(link_params_rate));
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       lclk = debug->link->link_params.rate * 1000;
+       rc = snprintf(buf + len, max_size,
+                       "\t\tlclk = %lld\n", lclk);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tv_level = %d\n",
+                       debug->link->phy_params.v_level);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tp_level = %d\n",
+                       debug->link->phy_params.p_level);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       if (copy_to_user(user_buff, buf, len))
+               goto error;
+
+       *ppos += len;
+
+       kfree(buf);
+       return len;
+ error:
+       kfree(buf);
+       return -EINVAL;
+}
+
+static int dp_test_data_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev;
+       struct dp_debug_private *debug;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       u32 bpc;
+
+       debug = m->private;
+       dev = debug->drm_dev;
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       bpc = debug->link->test_video.test_bit_depth;
+                       seq_printf(m, "hdisplay: %d\n",
+                                       debug->link->test_video.test_h_width);
+                       seq_printf(m, "vdisplay: %d\n",
+                                       debug->link->test_video.test_v_height);
+                                       seq_printf(m, "bpc: %u\n",
+                                       dp_link_bit_depth_to_bpc(bpc));
+               } else
+                       seq_puts(m, "0");
+       }
+
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+
+static int dp_test_type_show(struct seq_file *m, void *data)
+{
+       struct dp_debug_private *debug = m->private;
+       struct drm_device *dev = debug->drm_dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected)
+                       seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+               else
+                       seq_puts(m, "0");
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+
+static ssize_t dp_test_active_write(struct file *file,
+               const char __user *ubuf,
+               size_t len, loff_t *offp)
+{
+       char *input_buffer;
+       int status = 0;
+       struct dp_debug_private *debug;
+       struct drm_device *dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       int val = 0;
+
+       debug = ((struct seq_file *)file->private_data)->private;
+       dev = debug->drm_dev;
+
+       if (len == 0)
+               return 0;
+
+       input_buffer = memdup_user_nul(ubuf, len);
+       if (IS_ERR(input_buffer))
+               return PTR_ERR(input_buffer);
+
+       DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       status = kstrtoint(input_buffer, 10, &val);
+                       if (status < 0)
+                               break;
+                       DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+                       /* To prevent erroneous activation of the compliance
+                        * testing code, only accept an actual value of 1 here
+                        */
+                       if (val == 1)
+                               debug->panel->video_test = true;
+                       else
+                               debug->panel->video_test = false;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+       kfree(input_buffer);
+       if (status < 0)
+               return status;
+
+       *offp += len;
+       return len;
+}
+
+static int dp_test_active_show(struct seq_file *m, void *data)
+{
+       struct dp_debug_private *debug = m->private;
+       struct drm_device *dev = debug->drm_dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       if (debug->panel->video_test)
+                               seq_puts(m, "1");
+                       else
+                               seq_puts(m, "0");
+               } else
+                       seq_puts(m, "0");
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+
+static int dp_test_active_open(struct inode *inode,
+               struct file *file)
+{
+       return single_open(file, dp_test_active_show,
+                       inode->i_private);
+}
+
+static const struct file_operations dp_debug_fops = {
+       .open = simple_open,
+       .read = dp_debug_read_info,
+};
+
+static const struct file_operations test_active_fops = {
+       .owner = THIS_MODULE,
+       .open = dp_test_active_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = dp_test_active_write
+};
+
+static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+{
+       int rc = 0;
+       struct dp_debug_private *debug = container_of(dp_debug,
+                       struct dp_debug_private, dp_debug);
+       struct dentry *file;
+       struct dentry *test_active;
+       struct dentry *test_data, *test_type;
+
+       file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
+                       debug, &dp_debug_fops);
+       if (IS_ERR_OR_NULL(file)) {
+               rc = PTR_ERR(file);
+               DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_active = debugfs_create_file("msm_dp_test_active", 0444,
+                       minor->debugfs_root,
+                       debug, &test_active_fops);
+       if (IS_ERR_OR_NULL(test_active)) {
+               rc = PTR_ERR(test_active);
+               DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_data = debugfs_create_file("msm_dp_test_data", 0444,
+                       minor->debugfs_root,
+                       debug, &dp_test_data_fops);
+       if (IS_ERR_OR_NULL(test_data)) {
+               rc = PTR_ERR(test_data);
+               DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_type = debugfs_create_file("msm_dp_test_type", 0444,
+                       minor->debugfs_root,
+                       debug, &dp_test_type_fops);
+       if (IS_ERR_OR_NULL(test_type)) {
+               rc = PTR_ERR(test_type);
+               DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       debug->root = minor->debugfs_root;
+
+       return rc;
+}
+
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector, struct drm_minor *minor)
+{
+       int rc = 0;
+       struct dp_debug_private *debug;
+       struct dp_debug *dp_debug;
+
+       if (!dev || !panel || !usbpd || !link) {
+               DRM_ERROR("invalid input\n");
+               rc = -EINVAL;
+               goto error;
+       }
+
+       debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
+       if (!debug) {
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       debug->dp_debug.debug_en = false;
+       debug->usbpd = usbpd;
+       debug->link = link;
+       debug->panel = panel;
+       debug->dev = dev;
+       debug->drm_dev = minor->dev;
+       debug->connector = connector;
+
+       dp_debug = &debug->dp_debug;
+       dp_debug->vdisplay = 0;
+       dp_debug->hdisplay = 0;
+       dp_debug->vrefresh = 0;
+
+       rc = dp_debug_init(dp_debug, minor);
+       if (rc) {
+               devm_kfree(dev, debug);
+               goto error;
+       }
+
+       return dp_debug;
+ error:
+       return ERR_PTR(rc);
+}
+
+static int dp_debug_deinit(struct dp_debug *dp_debug)
+{
+       struct dp_debug_private *debug;
+
+       if (!dp_debug)
+               return -EINVAL;
+
+       debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+       debugfs_remove_recursive(debug->root);
+
+       return 0;
+}
+
+void dp_debug_put(struct dp_debug *dp_debug)
+{
+       struct dp_debug_private *debug;
+
+       if (!dp_debug)
+               return;
+
+       debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+       dp_debug_deinit(dp_debug);
+
+       devm_kfree(debug->dev, debug);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
new file mode 100644 (file)
index 0000000..7eaedfb
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DEBUG_H_
+#define _DP_DEBUG_H_
+
+#include "dp_panel.h"
+#include "dp_link.h"
+
+/**
+ * struct dp_debug
+ * @debug_en: specifies whether debug mode enabled
+ * @vdisplay: used to filter out vdisplay value
+ * @hdisplay: used to filter out hdisplay value
+ * @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
+ */
+struct dp_debug {
+       bool debug_en;
+       int aspect_ratio;
+       int vdisplay;
+       int hdisplay;
+       int vrefresh;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dp_debug_get() - configure and get the DisplayPlot debug module data
+ *
+ * @dev: device instance of the caller
+ * @panel: instance of panel module
+ * @usbpd: instance of usbpd module
+ * @link: instance of link module
+ * @connector: double pointer to display connector
+ * @minor: pointer to drm minor number after device registration
+ * return: pointer to allocated debug module data
+ *
+ * This function sets up the debug module and provides a way
+ * for debugfs input to be communicated with existing modules
+ */
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector,
+               struct drm_minor *minor);
+
+/**
+ * dp_debug_put()
+ *
+ * Cleans up dp_debug instance
+ *
+ * @dp_debug: instance of dp_debug
+ */
+void dp_debug_put(struct dp_debug *dp_debug);
+
+#else
+
+static inline
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector, struct drm_minor *minor)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void dp_debug_put(struct dp_debug *dp_debug)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644 (file)
index 0000000..e175aa3
--- /dev/null
@@ -0,0 +1,1463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_hpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_reg.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+#include "dp_drm.h"
+#include "dp_audio.h"
+#include "dp_debug.h"
+
+static struct msm_dp *g_dp_display;
+#define HPD_STRING_SIZE 30
+
+enum {
+       ISR_DISCONNECTED,
+       ISR_CONNECT_PENDING,
+       ISR_CONNECTED,
+       ISR_HPD_REPLUG_COUNT,
+       ISR_IRQ_HPD_PULSE_COUNT,
+       ISR_HPD_LO_GLITH_COUNT,
+};
+
+/* event thread connection state */
+enum {
+       ST_DISCONNECTED,
+       ST_CONNECT_PENDING,
+       ST_CONNECTED,
+       ST_DISCONNECT_PENDING,
+       ST_SUSPEND_PENDING,
+       ST_SUSPENDED,
+};
+
+enum {
+       EV_NO_EVENT,
+       /* hpd events */
+       EV_HPD_INIT_SETUP,
+       EV_HPD_PLUG_INT,
+       EV_IRQ_HPD_INT,
+       EV_HPD_REPLUG_INT,
+       EV_HPD_UNPLUG_INT,
+       EV_USER_NOTIFICATION,
+       EV_CONNECT_PENDING_TIMEOUT,
+       EV_DISCONNECT_PENDING_TIMEOUT,
+};
+
+#define EVENT_TIMEOUT  (HZ/10) /* 100ms */
+#define DP_EVENT_Q_MAX 8
+
+#define DP_TIMEOUT_5_SECOND    (5000/EVENT_TIMEOUT)
+#define DP_TIMEOUT_NONE                0
+
+#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
+
+struct dp_event {
+       u32 event_id;
+       u32 data;
+       u32 delay;
+};
+
+struct dp_display_private {
+       char *name;
+       int irq;
+
+       /* state variables */
+       bool core_initialized;
+       bool hpd_irq_on;
+       bool audio_supported;
+
+       struct platform_device *pdev;
+       struct dentry *root;
+
+       struct dp_usbpd   *usbpd;
+       struct dp_parser  *parser;
+       struct dp_power   *power;
+       struct dp_catalog *catalog;
+       struct drm_dp_aux *aux;
+       struct dp_link    *link;
+       struct dp_panel   *panel;
+       struct dp_ctrl    *ctrl;
+       struct dp_debug   *debug;
+
+       struct dp_usbpd_cb usbpd_cb;
+       struct dp_display_mode dp_mode;
+       struct msm_dp dp_display;
+
+       /* wait for audio signaling */
+       struct completion audio_comp;
+
+       /* event related only access by event thread */
+       struct mutex event_mutex;
+       wait_queue_head_t event_q;
+       atomic_t hpd_state;
+       u32 event_pndx;
+       u32 event_gndx;
+       struct dp_event event_list[DP_EVENT_Q_MAX];
+       spinlock_t event_lock;
+
+       struct completion resume_comp;
+
+       struct dp_audio *audio;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+       {.compatible = "qcom,sc7180-dp"},
+       {}
+};
+
+static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+                                               u32 data, u32 delay)
+{
+       unsigned long flag;
+       struct dp_event *todo;
+       int pndx;
+
+       spin_lock_irqsave(&dp_priv->event_lock, flag);
+       pndx = dp_priv->event_pndx + 1;
+       pndx %= DP_EVENT_Q_MAX;
+       if (pndx == dp_priv->event_gndx) {
+               pr_err("event_q is full: pndx=%d gndx=%d\n",
+                       dp_priv->event_pndx, dp_priv->event_gndx);
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+               return -EPERM;
+       }
+       todo = &dp_priv->event_list[dp_priv->event_pndx++];
+       dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+       todo->event_id = event;
+       todo->data = data;
+       todo->delay = delay;
+       wake_up(&dp_priv->event_q);
+       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+       return 0;
+}
+
+static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+{
+       unsigned long flag;
+       struct dp_event *todo;
+       u32     gndx;
+
+       spin_lock_irqsave(&dp_priv->event_lock, flag);
+       if (dp_priv->event_pndx == dp_priv->event_gndx) {
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+               return -ENOENT;
+       }
+
+       gndx = dp_priv->event_gndx;
+       while (dp_priv->event_pndx != gndx) {
+               todo = &dp_priv->event_list[gndx];
+               if (todo->event_id == event) {
+                       todo->event_id = EV_NO_EVENT;   /* deleted */
+                       todo->delay = 0;
+               }
+               gndx++;
+               gndx %= DP_EVENT_Q_MAX;
+       }
+       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+       return 0;
+}
+
+void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       complete_all(&dp->audio_comp);
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+                          void *data)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+       struct drm_device *drm;
+       struct msm_drm_private *priv;
+
+       drm = dev_get_drvdata(master);
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+               return -EINVAL;
+       }
+
+       dp->dp_display.drm_dev = drm;
+       priv = drm->dev_private;
+       priv->dp = &(dp->dp_display);
+
+       rc = dp->parser->parse(dp->parser);
+       if (rc) {
+               DRM_ERROR("device tree parsing failed\n");
+               goto end;
+       }
+
+       rc = dp_aux_register(dp->aux);
+       if (rc) {
+               DRM_ERROR("DRM DP AUX register failed\n");
+               goto end;
+       }
+
+       rc = dp_power_client_init(dp->power);
+       if (rc) {
+               DRM_ERROR("Power client create failed\n");
+               goto end;
+       }
+
+       rc = dp_register_audio_driver(dev, dp->audio);
+       if (rc)
+               DRM_ERROR("Audio registration Dp failed\n");
+
+end:
+       return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+                             void *data)
+{
+       struct dp_display_private *dp;
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct msm_drm_private *priv = drm->dev_private;
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("Invalid DP driver data\n");
+               return;
+       }
+
+       dp_power_client_deinit(dp->power);
+       dp_aux_unregister(dp->aux);
+       priv->dp = NULL;
+}
+
+static const struct component_ops dp_display_comp_ops = {
+       .bind = dp_display_bind,
+       .unbind = dp_display_unbind,
+};
+
+static bool dp_display_is_ds_bridge(struct dp_panel *panel)
+{
+       return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DWN_STRM_PORT_PRESENT);
+}
+
+static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
+{
+       return dp_display_is_ds_bridge(dp->panel) &&
+               (dp->link->sink_count == 0);
+}
+
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+       struct drm_connector *connector;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       connector = dp->dp_display.connector;
+       drm_helper_hpd_irq_event(connector->dev);
+}
+
+static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+                                           bool hpd)
+{
+       static bool encoder_mode_set;
+       struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
+       struct msm_kms *kms = priv->kms;
+
+       if ((hpd && dp->dp_display.is_connected) ||
+                       (!hpd && !dp->dp_display.is_connected)) {
+               DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
+               return 0;
+       }
+
+       /* reset video pattern flag on disconnect */
+       if (!hpd)
+               dp->panel->video_test = false;
+
+       dp->dp_display.is_connected = hpd;
+
+       if (dp->dp_display.is_connected && dp->dp_display.encoder
+                               && !encoder_mode_set
+                               && kms->funcs->set_encoder_mode) {
+               kms->funcs->set_encoder_mode(kms,
+                               dp->dp_display.encoder, false);
+               DRM_DEBUG_DP("set_encoder_mode() Completed\n");
+               encoder_mode_set = true;
+       }
+
+       dp_display_send_hpd_event(&dp->dp_display);
+
+       return 0;
+}
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+       int rc = 0;
+       struct edid *edid;
+
+       dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
+
+       rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
+       if (rc)
+               goto end;
+
+       dp_link_process_request(dp->link);
+
+       edid = dp->panel->edid;
+
+       dp->audio_supported = drm_detect_monitor_audio(edid);
+       dp_panel_handle_sink_request(dp->panel);
+
+       dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+       dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+
+       rc = dp_ctrl_on_link(dp->ctrl);
+       if (rc) {
+               DRM_ERROR("failed to complete DP link training\n");
+               goto end;
+       }
+
+       dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+
+
+end:
+       return rc;
+}
+
+static void dp_display_host_init(struct dp_display_private *dp)
+{
+       bool flip = false;
+
+       if (dp->core_initialized) {
+               DRM_DEBUG_DP("DP core already initialized\n");
+               return;
+       }
+
+       if (dp->usbpd->orientation == ORIENTATION_CC2)
+               flip = true;
+
+       dp_power_init(dp->power, flip);
+       dp_ctrl_host_init(dp->ctrl, flip);
+       dp_aux_init(dp->aux);
+       dp->core_initialized = true;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               rc = -EINVAL;
+               goto end;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               rc = -ENODEV;
+               goto end;
+       }
+
+       dp_display_host_init(dp);
+
+       /*
+        * set sink to normal operation mode -- D0
+        * before dpcd read
+        */
+       dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+       rc = dp_display_process_hpd_high(dp);
+end:
+       return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               rc = -EINVAL;
+               return rc;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               rc = -ENODEV;
+               return rc;
+       }
+
+       dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+
+       return rc;
+}
+
+static void dp_display_handle_video_request(struct dp_display_private *dp)
+{
+       if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+               dp->panel->video_test = true;
+               dp_link_send_test_response(dp->link);
+       }
+}
+
+static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+{
+       u32 sink_request;
+
+       sink_request = dp->link->sink_request;
+
+       if (sink_request & DS_PORT_STATUS_CHANGED) {
+               dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+               if (dp_display_is_sink_count_zero(dp)) {
+                       DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+                       return 0;
+               }
+
+               return dp_display_process_hpd_high(dp);
+       }
+
+       dp_ctrl_handle_sink_request(dp->ctrl);
+
+       if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+               dp_display_handle_video_request(dp);
+
+       return 0;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               return -EINVAL;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               return -ENODEV;
+       }
+
+       /* check for any test request issued by sink */
+       rc = dp_link_process_request(dp->link);
+       if (!rc)
+               dp_display_handle_irq_hpd(dp);
+
+       return rc;
+}
+
+static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+{
+       struct dp_usbpd *hpd = dp->usbpd;
+       u32 state;
+       u32 tout = DP_TIMEOUT_5_SECOND;
+       int ret;
+
+       if (!hpd)
+               return 0;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_DISCONNECT_PENDING) {
+               /* wait until ST_DISCONNECTED */
+               dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_SUSPENDED)
+               tout = DP_TIMEOUT_NONE;
+
+       atomic_set(&dp->hpd_state, ST_CONNECT_PENDING);
+
+       hpd->hpd_high = 1;
+
+       ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+       if (ret) {      /* failed */
+               hpd->hpd_high = 0;
+               atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+       }
+
+       /* start sanity checking */
+       dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+
+       mutex_unlock(&dp->event_mutex);
+
+       /* uevent will complete connection part */
+       return 0;
+};
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data);
+static int dp_display_disable(struct dp_display_private *dp, u32 data);
+
+static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_CONNECT_PENDING) {
+               dp_display_enable(dp, 0);
+               atomic_set(&dp->hpd_state, ST_CONNECTED);
+       }
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+               bool plugged)
+{
+       if (dp_display->plugged_cb && dp_display->codec_dev)
+               dp_display->plugged_cb(dp_display->codec_dev, plugged);
+}
+
+static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+{
+       struct dp_usbpd *hpd = dp->usbpd;
+       u32 state;
+
+       if (!hpd)
+               return 0;
+
+       mutex_lock(&dp->event_mutex);
+
+       state = atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_CONNECT_PENDING) {
+               /* wait until CONNECTED */
+               dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING);
+
+       /* disable HPD plug interrupt until disconnect is done */
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
+                               | DP_DP_IRQ_HPD_INT_MASK, false);
+
+       hpd->hpd_high = 0;
+
+       /*
+        * We don't need separate work for disconnect as
+        * connect/attention interrupts are disabled
+        */
+       dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
+
+       /* start sanity checking */
+       dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+
+       /* signal the disconnect event early to ensure proper teardown */
+       dp_display_handle_plugged_change(g_dp_display, false);
+       reinit_completion(&dp->audio_comp);
+
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+                                       DP_DP_IRQ_HPD_INT_MASK, true);
+
+       /* uevent will complete disconnection part */
+       mutex_unlock(&dp->event_mutex);
+       return 0;
+}
+
+static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_DISCONNECT_PENDING) {
+               dp_display_disable(dp, 0);
+               atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+       }
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       /* irq_hpd can happen at either connected or disconnected state */
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       dp_display_usbpd_attention_cb(&dp->pdev->dev);
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+       dp_debug_put(dp->debug);
+       dp_ctrl_put(dp->ctrl);
+       dp_panel_put(dp->panel);
+       dp_aux_put(dp->aux);
+       dp_audio_put(dp->audio);
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+       int rc = 0;
+       struct device *dev = &dp->pdev->dev;
+       struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+       struct dp_panel_in panel_in = {
+               .dev = dev,
+       };
+
+       /* Callback APIs used for cable status change event */
+       cb->configure  = dp_display_usbpd_configure_cb;
+       cb->disconnect = dp_display_usbpd_disconnect_cb;
+       cb->attention  = dp_display_usbpd_attention_cb;
+
+       dp->usbpd = dp_hpd_get(dev, cb);
+       if (IS_ERR(dp->usbpd)) {
+               rc = PTR_ERR(dp->usbpd);
+               DRM_ERROR("failed to initialize hpd, rc = %d\n", rc);
+               dp->usbpd = NULL;
+               goto error;
+       }
+
+       dp->parser = dp_parser_get(dp->pdev);
+       if (IS_ERR(dp->parser)) {
+               rc = PTR_ERR(dp->parser);
+               DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
+               dp->parser = NULL;
+               goto error;
+       }
+
+       dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+       if (IS_ERR(dp->catalog)) {
+               rc = PTR_ERR(dp->catalog);
+               DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
+               dp->catalog = NULL;
+               goto error;
+       }
+
+       dp->power = dp_power_get(dp->parser);
+       if (IS_ERR(dp->power)) {
+               rc = PTR_ERR(dp->power);
+               DRM_ERROR("failed to initialize power, rc = %d\n", rc);
+               dp->power = NULL;
+               goto error;
+       }
+
+       dp->aux = dp_aux_get(dev, dp->catalog);
+       if (IS_ERR(dp->aux)) {
+               rc = PTR_ERR(dp->aux);
+               DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
+               dp->aux = NULL;
+               goto error;
+       }
+
+       dp->link = dp_link_get(dev, dp->aux);
+       if (IS_ERR(dp->link)) {
+               rc = PTR_ERR(dp->link);
+               DRM_ERROR("failed to initialize link, rc = %d\n", rc);
+               dp->link = NULL;
+               goto error_link;
+       }
+
+       panel_in.aux = dp->aux;
+       panel_in.catalog = dp->catalog;
+       panel_in.link = dp->link;
+
+       dp->panel = dp_panel_get(&panel_in);
+       if (IS_ERR(dp->panel)) {
+               rc = PTR_ERR(dp->panel);
+               DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
+               dp->panel = NULL;
+               goto error_link;
+       }
+
+       dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+                              dp->power, dp->catalog, dp->parser);
+       if (IS_ERR(dp->ctrl)) {
+               rc = PTR_ERR(dp->ctrl);
+               DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
+               dp->ctrl = NULL;
+               goto error_ctrl;
+       }
+
+       dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
+       if (IS_ERR(dp->audio)) {
+               rc = PTR_ERR(dp->audio);
+               pr_err("failed to initialize audio, rc = %d\n", rc);
+               dp->audio = NULL;
+               goto error_audio;
+       }
+
+       return rc;
+
+error_audio:
+       dp_ctrl_put(dp->ctrl);
+error_ctrl:
+       dp_panel_put(dp->panel);
+error_link:
+       dp_aux_put(dp->aux);
+error:
+       return rc;
+}
+
+static int dp_display_set_mode(struct msm_dp *dp_display,
+                              struct dp_display_mode *mode)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp->panel->dp_mode.drm_mode = mode->drm_mode;
+       dp->panel->dp_mode.bpp = mode->bpp;
+       dp->panel->dp_mode.capabilities = mode->capabilities;
+       dp_panel_init_panel_info(dp->panel);
+       return 0;
+}
+
+static int dp_display_prepare(struct msm_dp *dp)
+{
+       return 0;
+}
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data)
+{
+       int rc = 0;
+       struct msm_dp *dp_display;
+
+       dp_display = g_dp_display;
+
+       if (dp_display->power_on) {
+               DRM_DEBUG_DP("Link already setup, return\n");
+               return 0;
+       }
+
+       rc = dp_ctrl_on_stream(dp->ctrl);
+       if (!rc)
+               dp_display->power_on = true;
+
+       /* complete resume_comp regardless it is armed or not */
+       complete(&dp->resume_comp);
+       return rc;
+}
+
+static int dp_display_post_enable(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+       u32 rate;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       rate = dp->link->link_params.rate;
+
+       if (dp->audio_supported) {
+               dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate);
+               dp->audio->lane_count = dp->link->link_params.num_lanes;
+       }
+
+       /* signal the connect event late to synchronize video and display */
+       dp_display_handle_plugged_change(dp_display, true);
+       return 0;
+}
+
+static int dp_display_disable(struct dp_display_private *dp, u32 data)
+{
+       struct msm_dp *dp_display;
+
+       dp_display = g_dp_display;
+
+       if (!dp_display->power_on)
+               return -EINVAL;
+
+       /* wait only if audio was enabled */
+       if (dp_display->audio_enabled) {
+               if (!wait_for_completion_timeout(&dp->audio_comp,
+                               HZ * 5))
+                       DRM_ERROR("audio comp timeout\n");
+       }
+
+       dp_display->audio_enabled = false;
+
+       dp_ctrl_off(dp->ctrl);
+
+       dp->core_initialized = false;
+
+       dp_display->power_on = false;
+
+       return 0;
+}
+
+static int dp_display_unprepare(struct msm_dp *dp)
+{
+       return 0;
+}
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+               hdmi_codec_plugged_cb fn, struct device *codec_dev)
+{
+       bool plugged;
+
+       dp_display->plugged_cb = fn;
+       dp_display->codec_dev = codec_dev;
+       plugged = dp_display->is_connected;
+       dp_display_handle_plugged_change(dp_display, plugged);
+
+       return 0;
+}
+
+int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz)
+{
+       const u32 num_components = 3, default_bpp = 24;
+       struct dp_display_private *dp_display;
+       struct dp_link_info *link_info;
+       u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+
+       if (!dp || !mode_pclk_khz || !dp->connector) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+       link_info = &dp_display->panel->link_info;
+
+       mode_bpp = dp->connector->display_info.bpc * num_components;
+       if (!mode_bpp)
+               mode_bpp = default_bpp;
+
+       mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+                       mode_bpp, mode_pclk_khz);
+
+       mode_rate_khz = mode_pclk_khz * mode_bpp;
+       supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+       if (mode_rate_khz > supported_rate_khz)
+               return MODE_BAD;
+
+       return MODE_OK;
+}
+
+int dp_display_get_modes(struct msm_dp *dp,
+                               struct dp_display_mode *dp_mode)
+{
+       struct dp_display_private *dp_display;
+       int ret = 0;
+
+       if (!dp) {
+               DRM_ERROR("invalid params\n");
+               return 0;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       ret = dp_panel_get_modes(dp_display->panel,
+               dp->connector, dp_mode);
+       if (dp_mode->drm_mode.clock)
+               dp->max_pclk_khz = dp_mode->drm_mode.clock;
+       return ret;
+}
+
+bool dp_display_check_video_test(struct msm_dp *dp)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       return dp_display->panel->video_test;
+}
+
+int dp_display_get_test_bpp(struct msm_dp *dp)
+{
+       struct dp_display_private *dp_display;
+
+       if (!dp) {
+               DRM_ERROR("invalid params\n");
+               return 0;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       return dp_link_bit_depth_to_bpp(
+               dp_display->link->test_video.test_bit_depth);
+}
+
+static void dp_display_config_hpd(struct dp_display_private *dp)
+{
+
+       dp_display_host_init(dp);
+       dp_catalog_ctrl_hpd_config(dp->catalog);
+
+       /* Enable interrupt first time
+        * we are leaving dp clocks on during disconnect
+        * and never disable interrupt
+        */
+       enable_irq(dp->irq);
+}
+
+static int hpd_event_thread(void *data)
+{
+       struct dp_display_private *dp_priv;
+       unsigned long flag;
+       struct dp_event *todo;
+       int timeout_mode = 0;
+
+       dp_priv = (struct dp_display_private *)data;
+
+       while (1) {
+               if (timeout_mode) {
+                       wait_event_timeout(dp_priv->event_q,
+                               (dp_priv->event_pndx == dp_priv->event_gndx),
+                                               EVENT_TIMEOUT);
+               } else {
+                       wait_event_interruptible(dp_priv->event_q,
+                               (dp_priv->event_pndx != dp_priv->event_gndx));
+               }
+               spin_lock_irqsave(&dp_priv->event_lock, flag);
+               todo = &dp_priv->event_list[dp_priv->event_gndx];
+               if (todo->delay) {
+                       struct dp_event *todo_next;
+
+                       dp_priv->event_gndx++;
+                       dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+
+                       /* re enter delay event into q */
+                       todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
+                       dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+                       todo_next->event_id = todo->event_id;
+                       todo_next->data = todo->data;
+                       todo_next->delay = todo->delay - 1;
+
+                       /* clean up older event */
+                       todo->event_id = EV_NO_EVENT;
+                       todo->delay = 0;
+
+                       /* switch to timeout mode */
+                       timeout_mode = 1;
+                       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+                       continue;
+               }
+
+               /* timeout with no events in q */
+               if (dp_priv->event_pndx == dp_priv->event_gndx) {
+                       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+                       continue;
+               }
+
+               dp_priv->event_gndx++;
+               dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+               timeout_mode = 0;
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+               switch (todo->event_id) {
+               case EV_HPD_INIT_SETUP:
+                       dp_display_config_hpd(dp_priv);
+                       break;
+               case EV_HPD_PLUG_INT:
+                       dp_hpd_plug_handle(dp_priv, todo->data);
+                       break;
+               case EV_HPD_UNPLUG_INT:
+                       dp_hpd_unplug_handle(dp_priv, todo->data);
+                       break;
+               case EV_IRQ_HPD_INT:
+                       dp_irq_hpd_handle(dp_priv, todo->data);
+                       break;
+               case EV_HPD_REPLUG_INT:
+                       /* do nothing */
+                       break;
+               case EV_USER_NOTIFICATION:
+                       dp_display_send_hpd_notification(dp_priv,
+                                               todo->data);
+                       break;
+               case EV_CONNECT_PENDING_TIMEOUT:
+                       dp_connect_pending_timeout(dp_priv,
+                                               todo->data);
+                       break;
+               case EV_DISCONNECT_PENDING_TIMEOUT:
+                       dp_disconnect_pending_timeout(dp_priv,
+                                               todo->data);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+{
+       init_waitqueue_head(&dp_priv->event_q);
+       spin_lock_init(&dp_priv->event_lock);
+
+       kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+}
+
+static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+{
+       struct dp_display_private *dp = dev_id;
+       irqreturn_t ret = IRQ_HANDLED;
+       u32 hpd_isr_status;
+
+       if (!dp) {
+               DRM_ERROR("invalid data\n");
+               return IRQ_NONE;
+       }
+
+       hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+
+       if (hpd_isr_status & 0x0F) {
+               /* hpd related interrupts */
+               if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
+                       hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+                       dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+               }
+
+               if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
+                       /* delete connect pending event first */
+                       dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
+                       dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+               }
+
+               if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
+                       dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+
+               if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
+                       dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+       }
+
+       /* DP controller isr */
+       dp_ctrl_isr(dp->ctrl);
+
+       /* DP aux isr */
+       dp_aux_isr(dp->aux);
+
+       return ret;
+}
+
+int dp_display_request_irq(struct msm_dp *dp_display)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+       if (dp->irq < 0) {
+               rc = dp->irq;
+               DRM_ERROR("failed to get irq: %d\n", rc);
+               return rc;
+       }
+
+       rc = devm_request_irq(&dp->pdev->dev, dp->irq,
+                       dp_display_irq_handler,
+                       IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+       if (rc < 0) {
+               DRM_ERROR("failed to request IRQ%u: %d\n",
+                               dp->irq, rc);
+               return rc;
+       }
+       disable_irq(dp->irq);
+
+       return 0;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!pdev || !pdev->dev.of_node) {
+               DRM_ERROR("pdev not found\n");
+               return -ENODEV;
+       }
+
+       dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+       if (!dp)
+               return -ENOMEM;
+
+       dp->pdev = pdev;
+       dp->name = "drm_dp";
+
+       rc = dp_init_sub_modules(dp);
+       if (rc) {
+               DRM_ERROR("init sub module failed\n");
+               return -EPROBE_DEFER;
+       }
+
+       mutex_init(&dp->event_mutex);
+
+       init_completion(&dp->resume_comp);
+
+       g_dp_display = &dp->dp_display;
+
+       /* Store DP audio handle inside DP display */
+       g_dp_display->dp_audio = dp->audio;
+
+       init_completion(&dp->audio_comp);
+
+       platform_set_drvdata(pdev, g_dp_display);
+
+       rc = component_add(&pdev->dev, &dp_display_comp_ops);
+       if (rc) {
+               DRM_ERROR("component add failed, rc=%d\n", rc);
+               dp_display_deinit_sub_modules(dp);
+       }
+
+       return rc;
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+
+       dp_display_deinit_sub_modules(dp);
+
+       component_del(&pdev->dev, &dp_display_comp_ops);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static int dp_pm_resume(struct device *dev)
+{
+       return 0;
+}
+
+static int dp_pm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dp_display_private *dp = platform_get_drvdata(pdev);
+
+       if (!dp) {
+               DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+               return -EINVAL;
+       }
+
+       atomic_set(&dp->hpd_state, ST_SUSPENDED);
+
+       return 0;
+}
+
+static int dp_pm_prepare(struct device *dev)
+{
+       return 0;
+}
+
+static void dp_pm_complete(struct device *dev)
+{
+
+}
+
+static const struct dev_pm_ops dp_pm_ops = {
+       .suspend = dp_pm_suspend,
+       .resume =  dp_pm_resume,
+       .prepare = dp_pm_prepare,
+       .complete = dp_pm_complete,
+};
+
+static struct platform_driver dp_display_driver = {
+       .probe  = dp_display_probe,
+       .remove = dp_display_remove,
+       .driver = {
+               .name = "msm-dp-display",
+               .of_match_table = dp_dt_match,
+               .suppress_bind_attrs = true,
+               .pm = &dp_pm_ops,
+       },
+};
+
+int __init msm_dp_register(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&dp_display_driver);
+       if (ret)
+               DRM_ERROR("Dp display driver register failed");
+
+       return ret;
+}
+
+void __exit msm_dp_unregister(void)
+{
+       platform_driver_unregister(&dp_display_driver);
+}
+
+void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+
+       if (!dp_display)
+               return;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp_hpd_event_setup(dp);
+
+       dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+}
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+{
+       struct dp_display_private *dp;
+       struct device *dev;
+       int rc;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+       dev = &dp->pdev->dev;
+
+       dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
+                                       dp->link, &dp->dp_display.connector,
+                                       minor);
+       if (IS_ERR(dp->debug)) {
+               rc = PTR_ERR(dp->debug);
+               DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
+               dp->debug = NULL;
+       }
+}
+
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+                       struct drm_encoder *encoder)
+{
+       struct msm_drm_private *priv;
+       int ret;
+
+       if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
+               return -EINVAL;
+
+       priv = dev->dev_private;
+       dp_display->drm_dev = dev;
+
+       ret = dp_display_request_irq(dp_display);
+       if (ret) {
+               DRM_ERROR("request_irq failed, ret=%d\n", ret);
+               return ret;
+       }
+
+       dp_display->encoder = encoder;
+
+       dp_display->connector = dp_drm_connector_init(dp_display);
+       if (IS_ERR(dp_display->connector)) {
+               ret = PTR_ERR(dp_display->connector);
+               DRM_DEV_ERROR(dev->dev,
+                       "failed to create dp connector: %d\n", ret);
+               dp_display->connector = NULL;
+               return ret;
+       }
+
+       priv->connectors[priv->num_connectors++] = dp_display->connector;
+       return 0;
+}
+
+static int dp_display_wait4resume_done(struct dp_display_private *dp)
+{
+       int ret = 0;
+
+       reinit_completion(&dp->resume_comp);
+       if (!wait_for_completion_timeout(&dp->resume_comp,
+                               WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) {
+               DRM_ERROR("wait4resume_done timedout\n");
+               ret = -ETIMEDOUT;
+       }
+       return ret;
+}
+
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       int rc = 0;
+       struct dp_display_private *dp_display;
+       u32 state;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+       if (!dp_display->dp_mode.drm_mode.clock) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dp_display->event_mutex);
+
+       rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+       if (rc) {
+               DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
+               mutex_unlock(&dp_display->event_mutex);
+               return rc;
+       }
+
+       rc = dp_display_prepare(dp);
+       if (rc) {
+               DRM_ERROR("DP display prepare failed, rc=%d\n", rc);
+               mutex_unlock(&dp_display->event_mutex);
+               return rc;
+       }
+
+       state =  atomic_read(&dp_display->hpd_state);
+       if (state == ST_SUSPENDED) {
+               /* start link training */
+               dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0);
+               mutex_unlock(&dp_display->event_mutex);
+
+               /* wait until dp interface is up */
+               goto resume_done;
+       }
+
+       dp_display_enable(dp_display, 0);
+
+       rc = dp_display_post_enable(dp);
+       if (rc) {
+               DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
+               dp_display_disable(dp_display, 0);
+               dp_display_unprepare(dp);
+       }
+
+       dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
+
+       if (state == ST_SUSPEND_PENDING)
+               dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
+
+       /* completed connection */
+       atomic_set(&dp_display->hpd_state, ST_CONNECTED);
+
+       mutex_unlock(&dp_display->event_mutex);
+
+       return rc;
+
+resume_done:
+       dp_display_wait4resume_done(dp_display);
+       return rc;
+}
+
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       dp_ctrl_push_idle(dp_display->ctrl);
+
+       return 0;
+}
+
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       int rc = 0;
+       u32 state;
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       mutex_lock(&dp_display->event_mutex);
+
+       dp_display_disable(dp_display, 0);
+
+       rc = dp_display_unprepare(dp);
+       if (rc)
+               DRM_ERROR("DP display unprepare failed, rc=%d\n", rc);
+
+       dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
+
+       state =  atomic_read(&dp_display->hpd_state);
+       if (state == ST_DISCONNECT_PENDING) {
+               /* completed disconnection */
+               atomic_set(&dp_display->hpd_state, ST_DISCONNECTED);
+       } else {
+               atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING);
+       }
+
+       mutex_unlock(&dp_display->event_mutex);
+       return rc;
+}
+
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+
+       if (dp_display_check_video_test(dp))
+               dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+       else /* Default num_components per pixel = 3 */
+               dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+
+       if (!dp_display->dp_mode.bpp)
+               dp_display->dp_mode.bpp = 24; /* Default bpp */
+
+       drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+
+       dp_display->dp_mode.v_active_low =
+               !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+
+       dp_display->dp_mode.h_active_low =
+               !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644 (file)
index 0000000..6092ba1
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include "dp_panel.h"
+#include <sound/hdmi-codec.h>
+
+struct msm_dp {
+       struct drm_device *drm_dev;
+       struct device *codec_dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       bool is_connected;
+       bool audio_enabled;
+       bool power_on;
+
+       hdmi_codec_plugged_cb plugged_cb;
+
+       u32 max_pclk_khz;
+
+       u32 max_dp_lanes;
+       struct dp_audio *dp_audio;
+};
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+               hdmi_codec_plugged_cb fn, struct device *codec_dev);
+int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz);
+int dp_display_get_modes(struct msm_dp *dp_display,
+               struct dp_display_mode *dp_mode);
+int dp_display_request_irq(struct msm_dp *dp_display);
+bool dp_display_check_video_test(struct msm_dp *dp_display);
+int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644 (file)
index 0000000..764f4b8
--- /dev/null
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_drm.h"
+
+struct dp_connector {
+       struct drm_connector base;
+       struct msm_dp *dp_display;
+};
+#define to_dp_connector(x) container_of(x, struct dp_connector, base)
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @conn: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * Returns: Connector 'is connected' status
+ */
+static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+               bool force)
+{
+       struct msm_dp *dp;
+
+       dp = to_dp_connector(conn)->dp_display;
+
+       DRM_DEBUG_DP("is_connected = %s\n",
+               (dp->is_connected) ? "true" : "false");
+
+       return (dp->is_connected) ? connector_status_connected :
+                                       connector_status_disconnected;
+}
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * Returns: Number of modes added
+ */
+static int dp_connector_get_modes(struct drm_connector *connector)
+{
+       int rc = 0;
+       struct msm_dp *dp;
+       struct dp_display_mode *dp_mode = NULL;
+       struct drm_display_mode *m, drm_mode;
+
+       if (!connector)
+               return 0;
+
+       dp = to_dp_connector(connector)->dp_display;
+
+       dp_mode = kzalloc(sizeof(*dp_mode),  GFP_KERNEL);
+       if (!dp_mode)
+               return 0;
+
+       /* pluggable case assumes EDID is read when HPD */
+       if (dp->is_connected) {
+               /*
+                *The get_modes() function might return one mode that is stored
+                * in dp_mode when compliance test is in progress. If not, the
+                * return value is equal to the total number of modes supported
+                * by the sink
+                */
+               rc = dp_display_get_modes(dp, dp_mode);
+               if (rc <= 0) {
+                       DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
+                       kfree(dp_mode);
+                       return rc;
+               }
+               if (dp_mode->drm_mode.clock) { /* valid DP mode */
+                       memset(&drm_mode, 0x0, sizeof(drm_mode));
+                       drm_mode_copy(&drm_mode, &dp_mode->drm_mode);
+                       m = drm_mode_duplicate(connector->dev, &drm_mode);
+                       if (!m) {
+                               DRM_ERROR("failed to add mode %ux%u\n",
+                                      drm_mode.hdisplay,
+                                      drm_mode.vdisplay);
+                               kfree(dp_mode);
+                               return 0;
+                       }
+                       drm_mode_probed_add(connector, m);
+               }
+       } else {
+               DRM_DEBUG_DP("No sink connected\n");
+       }
+       kfree(dp_mode);
+       return rc;
+}
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * Returns: Validity status for specified mode
+ */
+static enum drm_mode_status dp_connector_mode_valid(
+               struct drm_connector *connector,
+               struct drm_display_mode *mode)
+{
+       struct msm_dp *dp_disp;
+
+       dp_disp = to_dp_connector(connector)->dp_display;
+
+       if ((dp_disp->max_pclk_khz <= 0) ||
+                       (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) ||
+                       (mode->clock > dp_disp->max_pclk_khz))
+               return MODE_BAD;
+
+       return dp_display_validate_mode(dp_disp, mode->clock);
+}
+
+static const struct drm_connector_funcs dp_connector_funcs = {
+       .detect = dp_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dp_connector_helper_funcs = {
+       .get_modes = dp_connector_get_modes,
+       .mode_valid = dp_connector_mode_valid,
+};
+
+/* connector initialization */
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
+{
+       struct drm_connector *connector = NULL;
+       struct dp_connector *dp_connector;
+       int ret;
+
+       dp_connector = devm_kzalloc(dp_display->drm_dev->dev,
+                                       sizeof(*dp_connector),
+                                       GFP_KERNEL);
+       if (!dp_connector)
+               return ERR_PTR(-ENOMEM);
+
+       dp_connector->dp_display = dp_display;
+
+       connector = &dp_connector->base;
+
+       ret = drm_connector_init(dp_display->drm_dev, connector,
+                       &dp_connector_funcs,
+                       DRM_MODE_CONNECTOR_DisplayPort);
+       if (ret)
+               return ERR_PTR(ret);
+
+       drm_connector_helper_add(connector, &dp_connector_helper_funcs);
+
+       /*
+        * Enable HPD to let hpd event is handled when cable is connected.
+        */
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+       drm_connector_attach_encoder(connector, dp_display->encoder);
+
+       return connector;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644 (file)
index 0000000..c27bfce
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
+
+#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
new file mode 100644 (file)
index 0000000..5b8fe32
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "dp_hpd.h"
+
+/* DP specific VDM commands */
+#define DP_USBPD_VDM_STATUS    0x10
+#define DP_USBPD_VDM_CONFIGURE 0x11
+
+/* USBPD-TypeC specific Macros */
+#define VDM_VERSION            0x0
+#define USB_C_DP_SID           0xFF01
+
+struct dp_hpd_private {
+       struct device *dev;
+       struct dp_usbpd_cb *dp_cb;
+       struct dp_usbpd dp_usbpd;
+};
+
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+{
+       int rc = 0;
+       struct dp_hpd_private *hpd_priv;
+
+       hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
+                                       dp_usbpd);
+
+       dp_usbpd->hpd_high = hpd;
+
+       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
+                               && !hpd_priv->dp_cb->disconnect) {
+               pr_err("hpd dp_cb not initialized\n");
+               return -EINVAL;
+       }
+       if (hpd)
+               hpd_priv->dp_cb->configure(hpd_priv->dev);
+       else
+               hpd_priv->dp_cb->disconnect(hpd_priv->dev);
+
+       return rc;
+}
+
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
+{
+       struct dp_hpd_private *dp_hpd;
+
+       if (!cb) {
+               pr_err("invalid cb data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
+       if (!dp_hpd)
+               return ERR_PTR(-ENOMEM);
+
+       dp_hpd->dev = dev;
+       dp_hpd->dp_cb = cb;
+
+       dp_hpd->dp_usbpd.connect = dp_hpd_connect;
+
+       return &dp_hpd->dp_usbpd;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
new file mode 100644 (file)
index 0000000..5bc5bb6
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_HPD_H_
+#define _DP_HPD_H_
+
+//#include <linux/usb/usbpd.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum plug_orientation {
+       ORIENTATION_NONE,
+       ORIENTATION_CC1,
+       ORIENTATION_CC2,
+};
+
+/**
+ * struct dp_usbpd - DisplayPort status
+ *
+ * @orientation: plug orientation configuration
+ * @low_pow_st: low power state
+ * @adaptor_dp_en: adaptor functionality enabled
+ * @multi_func: multi-function preferred
+ * @usb_config_req: request to switch to usb
+ * @exit_dp_mode: request exit from displayport mode
+ * @hpd_high: Hot Plug Detect signal is high.
+ * @hpd_irq: Change in the status since last message
+ * @alt_mode_cfg_done: bool to specify alt mode status
+ * @debug_en: bool to specify debug mode
+ * @connect: simulate disconnect or connect for debug mode
+ */
+struct dp_usbpd {
+       enum plug_orientation orientation;
+       bool low_pow_st;
+       bool adaptor_dp_en;
+       bool multi_func;
+       bool usb_config_req;
+       bool exit_dp_mode;
+       bool hpd_high;
+       bool hpd_irq;
+       bool alt_mode_cfg_done;
+       bool debug_en;
+
+       int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+};
+
+/**
+ * struct dp_usbpd_cb - callback functions provided by the client
+ *
+ * @configure: called by usbpd module when PD communication has
+ * been completed and the usb peripheral has been configured on
+ * dp mode.
+ * @disconnect: notify the cable disconnect issued by usb.
+ * @attention: notify any attention message issued by usb.
+ */
+struct dp_usbpd_cb {
+       int (*configure)(struct device *dev);
+       int (*disconnect)(struct device *dev);
+       int (*attention)(struct device *dev);
+};
+
+/**
+ * dp_hpd_get() - setup hpd module
+ *
+ * @dev: device instance of the caller
+ * @cb: struct containing callback function pointers.
+ *
+ * This function allows the client to initialize the usbpd
+ * module. The module will communicate with HPD module.
+ */
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
+
+int dp_hpd_register(struct dp_usbpd *dp_usbpd);
+void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
+
+#endif /* _DP_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644 (file)
index 0000000..c811da5
--- /dev/null
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <drm/drm_print.h>
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_TEST_REQUEST_MASK           0x7F
+
+enum audio_sample_rate {
+       AUDIO_SAMPLE_RATE_32_KHZ        = 0x00,
+       AUDIO_SAMPLE_RATE_44_1_KHZ      = 0x01,
+       AUDIO_SAMPLE_RATE_48_KHZ        = 0x02,
+       AUDIO_SAMPLE_RATE_88_2_KHZ      = 0x03,
+       AUDIO_SAMPLE_RATE_96_KHZ        = 0x04,
+       AUDIO_SAMPLE_RATE_176_4_KHZ     = 0x05,
+       AUDIO_SAMPLE_RATE_192_KHZ       = 0x06,
+};
+
+enum audio_pattern_type {
+       AUDIO_TEST_PATTERN_OPERATOR_DEFINED     = 0x00,
+       AUDIO_TEST_PATTERN_SAWTOOTH             = 0x01,
+};
+
+struct dp_link_request {
+       u32 test_requested;
+       u32 test_link_rate;
+       u32 test_lane_count;
+};
+
+struct dp_link_private {
+       u32 prev_sink_count;
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_link dp_link;
+
+       struct dp_link_request request;
+       struct mutex psm_mutex;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+};
+
+static int dp_aux_link_power_up(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 value;
+       int err;
+
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D0;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+
+static int dp_aux_link_power_down(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 value;
+       int err;
+
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D3;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+       int ret = 0;
+       u8 data;
+       u32 const max_audio_period = 0xA;
+
+       /* TEST_AUDIO_PERIOD_CH_XX */
+       if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) {
+               DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /* Period - Bits 3:0 */
+       data = data & 0xF;
+       if ((int)data > max_audio_period) {
+               DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = data;
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+       int ret = 0;
+       struct dp_link_test_audio *req = &link->dp_link.test_audio;
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_1 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_2 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret);
+
+       /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_3 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_4 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_5 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_6 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_7 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_8 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+       int const max_audio_pattern_type = 0x1;
+
+       rlen = drm_dp_dpcd_readb(link->aux,
+                               DP_TEST_AUDIO_PATTERN_TYPE, &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Audio Pattern Type - Bits 7:0 */
+       if ((int)data > max_audio_pattern_type) {
+               DRM_ERROR("invalid audio pattern type = 0x%x\n", data);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       link->dp_link.test_audio.test_audio_pattern_type = data;
+       DRM_DEBUG_DP("audio pattern type = 0x%x\n", data);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+       int const max_audio_sampling_rate = 0x6;
+       int const max_audio_channel_count = 0x8;
+       int sampling_rate = 0x0;
+       int channel_count = 0x0;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Sampling Rate - Bits 3:0 */
+       sampling_rate = data & 0xF;
+       if (sampling_rate > max_audio_sampling_rate) {
+               DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n",
+                               sampling_rate, max_audio_sampling_rate);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /* Channel Count - Bits 7:4 */
+       channel_count = ((data & 0xF0) >> 4) + 1;
+       if (channel_count > max_audio_channel_count) {
+               DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n",
+                               channel_count, max_audio_channel_count);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+       link->dp_link.test_audio.test_audio_channel_count = channel_count;
+       DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n",
+                                       sampling_rate, channel_count);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+       int ret = 0;
+
+       ret = dp_link_parse_audio_mode(link);
+       if (ret)
+               goto exit;
+
+       ret = dp_link_parse_audio_pattern_type(link);
+       if (ret)
+               goto exit;
+
+       ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+       return ret;
+}
+
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+       switch (pattern) {
+       case DP_NO_TEST_PATTERN:
+       case DP_COLOR_RAMP:
+       case DP_BLACK_AND_WHITE_VERTICAL_LINES:
+       case DP_COLOR_SQUARE:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @tbd: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+       /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+       case DP_TEST_BIT_DEPTH_8:
+       case DP_TEST_BIT_DEPTH_10:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+                                       int addr, int len, u32 *val)
+{
+       u8 bp[2];
+       int rlen;
+
+       if (len != 2)
+               return -EINVAL;
+
+       /* Read the requested video link pattern (Byte 0x221). */
+       rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+       if (rlen < len) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+
+       *val = bp[1] | (bp[0] << 8);
+
+       return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+                                       int addr, int len,
+                                       u32 *val1, u32 *val2)
+{
+       u8 bp[2];
+       int rlen;
+
+       if (len != 2)
+               return -EINVAL;
+
+       /* Read the requested video link pattern (Byte 0x221). */
+       rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+       if (rlen < len) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+
+       *val1 = (bp[0] & BIT(7)) >> 7;
+       *val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+       return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+                                       int addr, u32 *val)
+{
+       u8 bp;
+       u32 len = 1;
+       int rlen;
+
+       rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len);
+       if (rlen < 1) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+       *val = bp;
+
+       return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+       int ret = 0;
+       ssize_t rlen;
+       u8 bp;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link video pattern. rlen=%zd\n",
+                       rlen);
+               return rlen;
+       }
+
+       if (!dp_link_is_video_pattern_valid(bp)) {
+               DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
+               ret = -EINVAL;
+               return ret;
+       }
+
+       link->dp_link.test_video.test_video_pattern = bp;
+
+       /* Read the requested color bit depth and dynamic range (Byte 0x232) */
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Dynamic Range */
+       link->dp_link.test_video.test_dyn_range =
+                       (bp & DP_TEST_DYNAMIC_RANGE_CEA);
+
+       /* Color bit depth */
+       bp &= DP_TEST_BIT_DEPTH_MASK;
+       if (!dp_link_is_bit_depth_valid(bp)) {
+               DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
+               ret = -EINVAL;
+               return ret;
+       }
+
+       link->dp_link.test_video.test_bit_depth = bp;
+
+       /* resolution timing params */
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+                       &link->dp_link.test_video.test_h_total);
+       if (ret) {
+               DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+                       &link->dp_link.test_video.test_v_total);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+                       &link->dp_link.test_video.test_h_start);
+       if (ret) {
+               DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+                       &link->dp_link.test_video.test_v_start);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+                       &link->dp_link.test_video.test_hsync_pol,
+                       &link->dp_link.test_video.test_hsync_width);
+       if (ret) {
+               DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+                       &link->dp_link.test_video.test_vsync_pol,
+                       &link->dp_link.test_video.test_vsync_width);
+       if (ret) {
+               DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+                       &link->dp_link.test_video.test_h_width);
+       if (ret) {
+               DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+                       &link->dp_link.test_video.test_v_height);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_height\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+               &link->dp_link.test_video.test_rr_d);
+       link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+       if (ret) {
+               DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+               &link->dp_link.test_video.test_rr_n);
+       if (ret) {
+               DRM_ERROR("failed to parse test_rr_n\n");
+               return ret;
+       }
+
+       DRM_DEBUG_DP("link video pattern = 0x%x\n"
+               "link dynamic range = 0x%x\n"
+               "link bit depth = 0x%x\n"
+               "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
+               "TEST_H_START = %d, TEST_V_START = %d\n"
+               "TEST_HSYNC_POL = %d\n"
+               "TEST_HSYNC_WIDTH = %d\n"
+               "TEST_VSYNC_POL = %d\n"
+               "TEST_VSYNC_WIDTH = %d\n"
+               "TEST_H_WIDTH = %d\n"
+               "TEST_V_HEIGHT = %d\n"
+               "TEST_REFRESH_DENOMINATOR = %d\n"
+                "TEST_REFRESH_NUMERATOR = %d\n",
+               link->dp_link.test_video.test_video_pattern,
+               link->dp_link.test_video.test_dyn_range,
+               link->dp_link.test_video.test_bit_depth,
+               link->dp_link.test_video.test_h_total,
+               link->dp_link.test_video.test_v_total,
+               link->dp_link.test_video.test_h_start,
+               link->dp_link.test_video.test_v_start,
+               link->dp_link.test_video.test_hsync_pol,
+               link->dp_link.test_video.test_hsync_width,
+               link->dp_link.test_video.test_vsync_pol,
+               link->dp_link.test_video.test_vsync_width,
+               link->dp_link.test_video.test_h_width,
+               link->dp_link.test_video.test_v_height,
+               link->dp_link.test_video.test_rr_d,
+               link->dp_link.test_video.test_rr_n);
+
+       return ret;
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+       u8 bp;
+       ssize_t rlen;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE,  &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       if (!is_link_rate_valid(bp)) {
+               DRM_ERROR("invalid link rate = 0x%x\n", bp);
+               return -EINVAL;
+       }
+
+       link->request.test_link_rate = bp;
+       DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate);
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen);
+               return rlen;
+       }
+       bp &= DP_MAX_LANE_COUNT_MASK;
+
+       if (!is_lane_count_valid(bp)) {
+               DRM_ERROR("invalid lane count = 0x%x\n", bp);
+               return -EINVAL;
+       }
+
+       link->request.test_lane_count = bp;
+       DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count);
+       return 0;
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+       u8 data;
+       ssize_t rlen;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN,
+                                       &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+
+       DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data);
+
+       switch (data) {
+       case DP_PHY_TEST_PATTERN_SEL_MASK:
+       case DP_PHY_TEST_PATTERN_NONE:
+       case DP_PHY_TEST_PATTERN_D10_2:
+       case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+       case DP_PHY_TEST_PATTERN_PRBS7:
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+       case DP_PHY_TEST_PATTERN_CP2520:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+       u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
+                               DP_TEST_LINK_AUDIO_PATTERN |
+                               DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+
+       return ((link & video_audio_test) &&
+               !(link & ~video_audio_test));
+}
+
+/**
+ * dp_link_parse_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+
+       /**
+        * Read the device service IRQ vector (Byte 0x201) to determine
+        * whether an automated link has been requested by the sink.
+        */
+       rlen = drm_dp_dpcd_readb(link->aux,
+                               DP_DEVICE_SERVICE_IRQ_VECTOR, &data);
+       if (rlen < 0) {
+               DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       DRM_DEBUG_DP("device service irq vector = 0x%x\n", data);
+
+       if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
+               DRM_DEBUG_DP("no test requested\n");
+               return 0;
+       }
+
+       /**
+        * Read the link request byte (Byte 0x218) to determine what type
+        * of automated link has been requested by the sink.
+        */
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data);
+       if (rlen < 0) {
+               DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
+               DRM_DEBUG_DP("link 0x%x not supported\n", data);
+               goto end;
+       }
+
+       DRM_DEBUG_DP("Test:(0x%x) requested\n", data);
+       link->request.test_requested = data;
+       if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
+               ret = dp_link_parse_phy_test_params(link);
+               if (ret)
+                       goto end;
+               ret = dp_link_parse_link_training_params(link);
+               if (ret)
+                       goto end;
+       }
+
+       if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
+               ret = dp_link_parse_link_training_params(link);
+               if (ret)
+                       goto end;
+       }
+
+       if (dp_link_is_video_audio_test_requested(
+                       link->request.test_requested)) {
+               ret = dp_link_parse_video_pattern_params(link);
+               if (ret)
+                       goto end;
+
+               ret = dp_link_parse_audio_pattern_params(link);
+       }
+end:
+       /*
+        * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
+        * a DP_TEST_NAK.
+        */
+       if (ret) {
+               link->dp_link.test_response = DP_TEST_NAK;
+       } else {
+               if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
+                       link->dp_link.test_response = DP_TEST_ACK;
+               else
+                       link->dp_link.test_response =
+                               DP_TEST_EDID_CHECKSUM_WRITE;
+       }
+
+       return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ * @dp_link: pointer to link module data
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static int dp_link_parse_sink_count(struct dp_link *dp_link)
+{
+       ssize_t rlen;
+       bool cp_ready;
+
+       struct dp_link_private *link = container_of(dp_link,
+                       struct dp_link_private, dp_link);
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
+                                &link->dp_link.sink_count);
+       if (rlen < 0) {
+               DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
+
+       link->dp_link.sink_count =
+               DP_GET_SINK_COUNT(link->dp_link.sink_count);
+
+       DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n",
+               link->dp_link.sink_count, cp_ready);
+       return 0;
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+       int len = 0;
+
+       link->prev_sink_count = link->dp_link.sink_count;
+       dp_link_parse_sink_count(&link->dp_link);
+
+       len = drm_dp_dpcd_read_link_status(link->aux,
+               link->link_status);
+       if (len < DP_LINK_STATUS_SIZE)
+               DRM_ERROR("DP link status read failed\n");
+       dp_link_parse_request(link);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+       if (link->request.test_requested != DP_TEST_LINK_TRAINING)
+               return -EINVAL;
+
+       DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
+                       DP_TEST_LINK_TRAINING,
+                       link->request.test_link_rate,
+                       link->request.test_lane_count);
+
+       link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+       link->dp_link.link_params.rate = link->request.test_link_rate;
+
+       return 0;
+}
+
+bool dp_link_send_test_response(struct dp_link *dp_link)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return false;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
+                       dp_link->test_response);
+
+       return ret == 1;
+}
+
+int dp_link_psm_config(struct dp_link *dp_link,
+                             struct dp_link_info *link_info, bool enable)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       mutex_lock(&link->psm_mutex);
+       if (enable)
+               ret = dp_aux_link_power_down(link->aux, link_info);
+       else
+               ret = dp_aux_link_power_up(link->aux, link_info);
+
+       if (ret)
+               DRM_ERROR("Failed to %s low power mode\n", enable ?
+                                                       "enter" : "exit");
+       else
+               dp_link->psm_enabled = enable;
+
+       mutex_unlock(&link->psm_mutex);
+       return ret;
+}
+
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return false;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
+                                               checksum);
+       return ret == 1;
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+       int ret = 0;
+
+       DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
+               drm_dp_get_adjust_request_voltage(link->link_status, 0),
+               drm_dp_get_adjust_request_voltage(link->link_status, 1),
+               drm_dp_get_adjust_request_voltage(link->link_status, 2),
+               drm_dp_get_adjust_request_voltage(link->link_status, 3));
+
+       DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n",
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3));
+
+       /**
+        * Update the voltage and pre-emphasis levels as per DPCD request
+        * vector.
+        */
+       DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n",
+                       link->dp_link.phy_params.v_level,
+                       link->dp_link.phy_params.p_level);
+       link->dp_link.phy_params.v_level =
+               drm_dp_get_adjust_request_voltage(link->link_status, 0);
+       link->dp_link.phy_params.p_level =
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
+       DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
+                       link->dp_link.phy_params.v_level,
+                       link->dp_link.phy_params.p_level);
+
+       return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+               struct dp_link_private *link)
+{
+       int ret = 0;
+
+       if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
+               DRM_DEBUG_DP("no phy test\n");
+               return -EINVAL;
+       }
+
+       if (!is_link_rate_valid(link->request.test_link_rate) ||
+               !is_lane_count_valid(link->request.test_lane_count)) {
+               DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n",
+                               link->request.test_link_rate,
+                               link->request.test_lane_count);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n",
+                       link->dp_link.link_params.rate,
+                       link->dp_link.link_params.num_lanes);
+
+       DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n",
+                       link->request.test_link_rate,
+                       link->request.test_lane_count);
+
+       link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+       link->dp_link.link_params.rate = link->request.test_link_rate;
+
+       ret = dp_link_parse_vx_px(link);
+
+       if (ret)
+               DRM_ERROR("parse_vx_px failed. ret=%d\n", ret);
+
+       return ret;
+}
+
+static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+       return link_status[r - DP_LANE0_1_STATUS];
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+       if (!(get_link_status(link->link_status,
+                               DP_LANE_ALIGN_STATUS_UPDATED) &
+                               DP_LINK_STATUS_UPDATED) ||
+                       (drm_dp_clock_recovery_ok(link->link_status,
+                                       link->dp_link.link_params.num_lanes) &&
+                       drm_dp_channel_eq_ok(link->link_status,
+                                       link->dp_link.link_params.num_lanes)))
+               return -EINVAL;
+
+       DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
+                       drm_dp_clock_recovery_ok(link->link_status,
+                       link->dp_link.link_params.num_lanes),
+                       drm_dp_clock_recovery_ok(link->link_status,
+                       link->dp_link.link_params.num_lanes));
+
+       return 0;
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+       if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
+                                       DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+               goto reset;
+
+       if (link->prev_sink_count == link->dp_link.sink_count)
+               return -EINVAL;
+
+reset:
+       /* reset prev_sink_count */
+       link->prev_sink_count = link->dp_link.sink_count;
+
+       return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+       return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
+               && !(link->request.test_requested &
+               DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+       return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+       link->request = (const struct dp_link_request){ 0 };
+       link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
+       link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+       link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
+       link->dp_link.phy_params.phy_test_pattern_sel = 0;
+       link->dp_link.sink_request = 0;
+       link->dp_link.test_response = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @dp_link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+int dp_link_process_request(struct dp_link *dp_link)
+{
+       int ret = 0;
+       struct dp_link_private *link;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       dp_link_reset_data(link);
+
+       dp_link_parse_sink_status_field(link);
+
+       if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
+               dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+               return ret;
+       }
+
+       ret = dp_link_process_ds_port_status_change(link);
+       if (!ret) {
+               dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+               return ret;
+       }
+
+       ret = dp_link_process_link_training_request(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+               return ret;
+       }
+
+       ret = dp_link_process_phy_test_pattern_request(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+               return ret;
+       }
+
+       ret = dp_link_process_link_status_update(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+               return ret;
+       }
+
+       if (dp_link_is_video_pattern_requested(link)) {
+               ret = 0;
+               dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+       }
+
+       if (dp_link_is_audio_pattern_requested(link)) {
+               dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+       u32 cc;
+       struct dp_link_private *link;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       /*
+        * Unless a video pattern CTS test is ongoing, use RGB_VESA
+        * Only RGB_VESA and RGB_CEA supported for now
+        */
+       if (dp_link_is_video_pattern_requested(link))
+               cc = link->dp_link.test_video.test_dyn_range;
+       else
+               cc = DP_TEST_DYNAMIC_RANGE_VESA;
+
+       return cc;
+}
+
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+{
+       int i;
+       int v_max = 0, p_max = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       /* use the max level across lanes */
+       for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+               u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
+               u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
+                                                                        i);
+               DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
+                               i, data_v, data_p);
+               if (v_max < data_v)
+                       v_max = data_v;
+               if (p_max < data_p)
+                       p_max = data_p;
+       }
+
+       dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+       dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+       /**
+        * Adjust the voltage swing and pre-emphasis level combination to within
+        * the allowable range.
+        */
+       if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+               DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n",
+                       dp_link->phy_params.v_level,
+                       DP_TRAIN_VOLTAGE_SWING_MAX);
+               dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+       }
+
+       if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+               DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+                       dp_link->phy_params.p_level,
+                       DP_TRAIN_PRE_EMPHASIS_MAX);
+               dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+       }
+
+       if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+               && (dp_link->phy_params.v_level ==
+                       DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+               DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+                       dp_link->phy_params.p_level,
+                       DP_TRAIN_PRE_EMPHASIS_LVL_1);
+               dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+       }
+
+       DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n",
+               dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+
+       return 0;
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+       u32 tbd;
+
+       /*
+        * Few simplistic rules and assumptions made here:
+        *    1. Test bit depth is bit depth per color component
+        *    2. Assume 3 color components
+        */
+       switch (bpp) {
+       case 18:
+               tbd = DP_TEST_BIT_DEPTH_6;
+               break;
+       case 24:
+               tbd = DP_TEST_BIT_DEPTH_8;
+               break;
+       case 30:
+               tbd = DP_TEST_BIT_DEPTH_10;
+               break;
+       default:
+               tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+               break;
+       }
+
+       if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+               tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+       return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+{
+       struct dp_link_private *link;
+       struct dp_link *dp_link;
+
+       if (!dev || !aux) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+       if (!link)
+               return ERR_PTR(-ENOMEM);
+
+       link->dev   = dev;
+       link->aux   = aux;
+
+       mutex_init(&link->psm_mutex);
+       dp_link = &link->dp_link;
+
+       return dp_link;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644 (file)
index 0000000..49811b6
--- /dev/null
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+#define DS_PORT_STATUS_CHANGED 0x200
+#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
+#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+
+struct dp_link_info {
+       unsigned char revision;
+       unsigned int rate;
+       unsigned int num_lanes;
+       unsigned long capabilities;
+};
+
+enum dp_link_voltage_level {
+       DP_TRAIN_VOLTAGE_SWING_LVL_0    = 0,
+       DP_TRAIN_VOLTAGE_SWING_LVL_1    = 1,
+       DP_TRAIN_VOLTAGE_SWING_LVL_2    = 2,
+       DP_TRAIN_VOLTAGE_SWING_MAX      = DP_TRAIN_VOLTAGE_SWING_LVL_2,
+};
+
+enum dp_link_preemaphasis_level {
+       DP_TRAIN_PRE_EMPHASIS_LVL_0     = 0,
+       DP_TRAIN_PRE_EMPHASIS_LVL_1     = 1,
+       DP_TRAIN_PRE_EMPHASIS_LVL_2     = 2,
+       DP_TRAIN_PRE_EMPHASIS_MAX       = DP_TRAIN_PRE_EMPHASIS_LVL_2,
+};
+
+struct dp_link_test_video {
+       u32 test_video_pattern;
+       u32 test_bit_depth;
+       u32 test_dyn_range;
+       u32 test_h_total;
+       u32 test_v_total;
+       u32 test_h_start;
+       u32 test_v_start;
+       u32 test_hsync_pol;
+       u32 test_hsync_width;
+       u32 test_vsync_pol;
+       u32 test_vsync_width;
+       u32 test_h_width;
+       u32 test_v_height;
+       u32 test_rr_d;
+       u32 test_rr_n;
+};
+
+struct dp_link_test_audio {
+       u32 test_audio_sampling_rate;
+       u32 test_audio_channel_count;
+       u32 test_audio_pattern_type;
+       u32 test_audio_period_ch_1;
+       u32 test_audio_period_ch_2;
+       u32 test_audio_period_ch_3;
+       u32 test_audio_period_ch_4;
+       u32 test_audio_period_ch_5;
+       u32 test_audio_period_ch_6;
+       u32 test_audio_period_ch_7;
+       u32 test_audio_period_ch_8;
+};
+
+struct dp_link_phy_params {
+       u32 phy_test_pattern_sel;
+       u8 v_level;
+       u8 p_level;
+};
+
+struct dp_link {
+       u32 sink_request;
+       u32 test_response;
+       bool psm_enabled;
+
+       u8 sink_count;
+       struct dp_link_test_video test_video;
+       struct dp_link_test_audio test_audio;
+       struct dp_link_phy_params phy_params;
+       struct dp_link_info link_params;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+{
+       /*
+        * Few simplistic rules and assumptions made here:
+        *    1. Bit depth is per color component
+        *    2. If bit depth is unknown return 0
+        *    3. Assume 3 color components
+        */
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+               return 18;
+       case DP_TEST_BIT_DEPTH_8:
+               return 24;
+       case DP_TEST_BIT_DEPTH_10:
+               return 30;
+       case DP_TEST_BIT_DEPTH_UNKNOWN:
+       default:
+               return 0;
+       }
+}
+
+/**
+ * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
+ * @tbd: test bit depth
+ *
+ * Returns the bits per comp (bpc) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
+{
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+               return 6;
+       case DP_TEST_BIT_DEPTH_8:
+               return 8;
+       case DP_TEST_BIT_DEPTH_10:
+               return 10;
+       case DP_TEST_BIT_DEPTH_UNKNOWN:
+       default:
+               return 0;
+       }
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
+int dp_link_process_request(struct dp_link *dp_link);
+int dp_link_get_colorimetry_config(struct dp_link *dp_link);
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
+bool dp_link_send_test_response(struct dp_link *dp_link);
+int dp_link_psm_config(struct dp_link *dp_link,
+               struct dp_link_info *link_info, bool enable);
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644 (file)
index 0000000..18cec4f
--- /dev/null
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_panel.h"
+
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+struct dp_panel_private {
+       struct device *dev;
+       struct dp_panel dp_panel;
+       struct drm_dp_aux *aux;
+       struct dp_link *link;
+       struct dp_catalog *catalog;
+       bool panel_on;
+       bool aux_cfg_update_done;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       size_t len;
+       ssize_t rlen;
+       struct dp_panel_private *panel;
+       struct dp_link_info *link_info;
+       u8 *dpcd, major = 0, minor = 0, temp;
+       u32 offset = DP_DPCD_REV;
+
+       dpcd = dp_panel->dpcd;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       link_info = &dp_panel->link_info;
+
+       rlen = drm_dp_dpcd_read(panel->aux, offset,
+                       dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+       if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+               DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+               if (rlen == -ETIMEDOUT)
+                       rc = rlen;
+               else
+                       rc = -EINVAL;
+
+               goto end;
+       }
+
+       temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
+
+       /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
+       if (temp & BIT(7)) {
+               DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
+               offset = DPRX_EXTENDED_DPCD_FIELD;
+       }
+
+       rlen = drm_dp_dpcd_read(panel->aux, offset,
+               dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+       if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+               DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+               if (rlen == -ETIMEDOUT)
+                       rc = rlen;
+               else
+                       rc = -EINVAL;
+
+               goto end;
+       }
+
+       link_info->revision = dpcd[DP_DPCD_REV];
+       major = (link_info->revision >> 4) & 0x0f;
+       minor = link_info->revision & 0x0f;
+
+       link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+       link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+       if (link_info->num_lanes > dp_panel->max_dp_lanes)
+               link_info->num_lanes = dp_panel->max_dp_lanes;
+
+       /* Limit support upto HBR2 until HBR3 support is added */
+       if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
+               link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+
+       DRM_DEBUG_DP("version: %d.%d\n", major, minor);
+       DRM_DEBUG_DP("link_rate=%d\n", link_info->rate);
+       DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes);
+
+       if (drm_dp_enhanced_frame_cap(dpcd))
+               link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+       dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
+       dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
+
+       if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
+               dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
+               dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
+               len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
+
+               rlen = drm_dp_dpcd_read(panel->aux,
+                       DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
+               if (rlen < len) {
+                       DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
+                       rc = -EINVAL;
+                       goto end;
+               }
+       }
+
+end:
+       return rc;
+}
+
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+               u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+       struct dp_link_info *link_info;
+       const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+       u32 bpp = 0, data_rate_khz = 0;
+
+       bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+       link_info = &dp_panel->link_info;
+       data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+       while (bpp > min_supported_bpp) {
+               if (mode_pclk_khz * bpp <= data_rate_khz)
+                       break;
+               bpp -= 6;
+       }
+
+       return bpp;
+}
+
+static int dp_panel_update_modes(struct drm_connector *connector,
+       struct edid *edid)
+{
+       int rc = 0;
+
+       if (edid) {
+               rc = drm_connector_update_edid_property(connector, edid);
+               if (rc) {
+                       DRM_ERROR("failed to update edid property %d\n", rc);
+                       return rc;
+               }
+               rc = drm_add_edid_modes(connector, edid);
+               DRM_DEBUG_DP("%s -", __func__);
+               return rc;
+       }
+
+       rc = drm_connector_update_edid_property(connector, NULL);
+       if (rc)
+               DRM_ERROR("failed to update edid property %d\n", rc);
+
+       return rc;
+}
+
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+       struct drm_connector *connector)
+{
+       int rc = 0, bw_code;
+       int rlen, count;
+       struct dp_panel_private *panel;
+
+       if (!dp_panel || !connector) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       rc = dp_panel_read_dpcd(dp_panel);
+       bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+       if (rc || !is_link_rate_valid(bw_code) ||
+                       !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
+                       (bw_code > dp_panel->max_bw_code)) {
+               DRM_ERROR("read dpcd failed %d\n", rc);
+               return rc;
+       }
+
+       if (dp_panel->dfp_present) {
+               rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
+                               &count, 1);
+               if (rlen == 1) {
+                       count = DP_GET_SINK_COUNT(count);
+                       if (!count) {
+                               DRM_ERROR("no downstream ports connected\n");
+                               panel->link->sink_count = 0;
+                               rc = -ENOTCONN;
+                               goto end;
+                       }
+               }
+       }
+
+       kfree(dp_panel->edid);
+       dp_panel->edid = NULL;
+
+       dp_panel->edid = drm_get_edid(connector,
+                                             &panel->aux->ddc);
+       if (!dp_panel->edid) {
+               DRM_ERROR("panel edid read failed\n");
+
+               /* fail safe edid */
+               mutex_lock(&connector->dev->mode_config.mutex);
+               if (drm_add_modes_noedid(connector, 640, 480))
+                       drm_set_preferred_mode(connector, 640, 480);
+               mutex_unlock(&connector->dev->mode_config.mutex);
+       }
+
+       if (panel->aux_cfg_update_done) {
+               DRM_DEBUG_DP("read DPCD with updated AUX config\n");
+               rc = dp_panel_read_dpcd(dp_panel);
+               bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+               if (rc || !is_link_rate_valid(bw_code) ||
+                       !is_lane_count_valid(dp_panel->link_info.num_lanes)
+                       || (bw_code > dp_panel->max_bw_code)) {
+                       DRM_ERROR("read dpcd failed %d\n", rc);
+                       return rc;
+               }
+               panel->aux_cfg_update_done = false;
+       }
+end:
+       return rc;
+}
+
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+               u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+       struct dp_panel_private *panel;
+       u32 bpp = mode_edid_bpp;
+
+       if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+               DRM_ERROR("invalid input\n");
+               return 0;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       if (dp_panel->video_test)
+               bpp = dp_link_bit_depth_to_bpp(
+                               panel->link->test_video.test_bit_depth);
+       else
+               bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+                               mode_pclk_khz);
+
+       return bpp;
+}
+
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+       struct drm_connector *connector, struct dp_display_mode *mode)
+{
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       if (dp_panel->edid)
+               return dp_panel_update_modes(connector, dp_panel->edid);
+
+       return 0;
+}
+
+static u8 dp_panel_get_edid_checksum(struct edid *edid)
+{
+       struct edid *last_block;
+       u8 *raw_edid;
+       bool is_edid_corrupt;
+
+       if (!edid) {
+               DRM_ERROR("invalid edid input\n");
+               return 0;
+       }
+
+       raw_edid = (u8 *)edid;
+       raw_edid += (edid->extensions * EDID_LENGTH);
+       last_block = (struct edid *)raw_edid;
+
+       /* block type extension */
+       drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+       if (!is_edid_corrupt)
+               return last_block->checksum;
+
+       DRM_ERROR("Invalid block, no checksum\n");
+       return 0;
+}
+
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+{
+       struct dp_panel_private *panel;
+
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
+               u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+
+               dp_link_send_edid_checksum(panel->link, checksum);
+               dp_link_send_test_response(panel->link);
+       }
+}
+
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+
+       if (!panel->panel_on) {
+               DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n");
+               return;
+       }
+
+       if (!enable) {
+               dp_catalog_panel_tpg_disable(catalog);
+               return;
+       }
+
+       DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__);
+       dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+}
+
+void dp_panel_dump_regs(struct dp_panel *dp_panel)
+{
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+
+       dp_catalog_dump_regs(catalog);
+}
+
+int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       u32 data, total_ver, total_hor;
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+       struct drm_display_mode *drm_mode;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+       drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+
+       DRM_DEBUG_DP("width=%d hporch= %d %d %d\n",
+               drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
+               drm_mode->hsync_start - drm_mode->hdisplay,
+               drm_mode->hsync_end - drm_mode->hsync_start);
+
+       DRM_DEBUG_DP("height=%d vporch= %d %d %d\n",
+               drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
+               drm_mode->vsync_start - drm_mode->vdisplay,
+               drm_mode->vsync_end - drm_mode->vsync_start);
+
+       total_hor = drm_mode->htotal;
+
+       total_ver = drm_mode->vtotal;
+
+       data = total_ver;
+       data <<= 16;
+       data |= total_hor;
+
+       catalog->total = data;
+
+       data = (drm_mode->vtotal - drm_mode->vsync_start);
+       data <<= 16;
+       data |= (drm_mode->htotal - drm_mode->hsync_start);
+
+       catalog->sync_start = data;
+
+       data = drm_mode->vsync_end - drm_mode->vsync_start;
+       data <<= 16;
+       data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+       data |= drm_mode->hsync_end - drm_mode->hsync_start;
+       data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+
+       catalog->width_blanking = data;
+
+       data = drm_mode->vdisplay;
+       data <<= 16;
+       data |= drm_mode->hdisplay;
+
+       catalog->dp_active = data;
+
+       dp_catalog_panel_timing_cfg(catalog);
+       panel->panel_on = true;
+
+       return rc;
+}
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       struct drm_display_mode *drm_mode;
+
+       drm_mode = &dp_panel->dp_mode.drm_mode;
+
+       /*
+        * print resolution info as this is a result
+        * of user initiated action of cable connection
+        */
+       DRM_DEBUG_DP("SET NEW RESOLUTION:\n");
+       DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay,
+               drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
+       DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n",
+                       drm_mode->htotal - drm_mode->hsync_end,
+                       drm_mode->hsync_start - drm_mode->hdisplay,
+                       drm_mode->hsync_end - drm_mode->hsync_start);
+       DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n",
+                       drm_mode->vtotal - drm_mode->vsync_end,
+                       drm_mode->vsync_start - drm_mode->vdisplay,
+                       drm_mode->vsync_end - drm_mode->vsync_start);
+       DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock);
+       DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp);
+
+       dp_panel->dp_mode.bpp = max_t(u32, 18,
+                                       min_t(u32, dp_panel->dp_mode.bpp, 30));
+       DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
+
+       return rc;
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+{
+       struct dp_panel_private *panel;
+       struct dp_panel *dp_panel;
+
+       if (!in->dev || !in->catalog || !in->aux || !in->link) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+       if (!panel)
+               return ERR_PTR(-ENOMEM);
+
+       panel->dev = in->dev;
+       panel->aux = in->aux;
+       panel->catalog = in->catalog;
+       panel->link = in->link;
+
+       dp_panel = &panel->dp_panel;
+       dp_panel->max_bw_code = DP_LINK_BW_8_1;
+       panel->aux_cfg_update_done = false;
+
+       return dp_panel;
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+       if (!dp_panel)
+               return;
+
+       kfree(dp_panel->edid);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644 (file)
index 0000000..9023e5b
--- /dev/null
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include <drm/msm_drm.h>
+
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_hpd.h"
+
+struct edid;
+
+#define DPRX_EXTENDED_DPCD_FIELD       0x2200
+
+#define DP_DOWNSTREAM_PORTS            4
+#define DP_DOWNSTREAM_CAP_SIZE         4
+
+struct dp_display_mode {
+       struct drm_display_mode drm_mode;
+       u32 capabilities;
+       u32 bpp;
+       u32 h_active_low;
+       u32 v_active_low;
+};
+
+struct dp_panel_in {
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_link *link;
+       struct dp_catalog *catalog;
+};
+
+struct dp_panel {
+       /* dpcd raw data */
+       u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
+       u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
+       u32 ds_port_cnt;
+       u32 dfp_present;
+
+       struct dp_link_info link_info;
+       struct drm_dp_desc desc;
+       struct edid *edid;
+       struct drm_connector *connector;
+       struct dp_display_mode dp_mode;
+       bool video_test;
+
+       u32 vic;
+       u32 max_pclk_khz;
+       u32 max_dp_lanes;
+
+       u32 max_bw_code;
+};
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel);
+int dp_panel_deinit(struct dp_panel *dp_panel);
+int dp_panel_timing_cfg(struct dp_panel *dp_panel);
+void dp_panel_dump_regs(struct dp_panel *dp_panel);
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+               struct drm_connector *connector);
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+                       u32 mode_pclk_khz);
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+               struct drm_connector *connector, struct dp_display_mode *mode);
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+
+/**
+ * is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static inline bool is_link_rate_valid(u32 bw_code)
+{
+       return (bw_code == DP_LINK_BW_1_62 ||
+               bw_code == DP_LINK_BW_2_7 ||
+               bw_code == DP_LINK_BW_5_4 ||
+               bw_code == DP_LINK_BW_8_1);
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static inline bool is_lane_count_valid(u32 lane_count)
+{
+       return (lane_count == 1 ||
+               lane_count == 2 ||
+               lane_count == 4);
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
new file mode 100644 (file)
index 0000000..0519dd3
--- /dev/null
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drm_print.h>
+
+#include "dp_parser.h"
+#include "dp_reg.h"
+
+static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
+       .num = 2,
+       .regs = {
+               {"vdda-1p2", 21800, 4 },        /* 1.2 V */
+               {"vdda-0p9", 36000, 32 },       /* 0.9 V */
+       },
+};
+
+static int msm_dss_ioremap(struct platform_device *pdev,
+                               struct dss_io_data *io_data)
+{
+       struct resource *res = NULL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
+                       __builtin_return_address(0), __func__);
+               return -ENODEV;
+       }
+
+       io_data->len = (u32)resource_size(res);
+       io_data->base = ioremap(res->start, io_data->len);
+       if (!io_data->base) {
+               DRM_ERROR("%pS->%s: ioremap failed\n",
+                       __builtin_return_address(0), __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+       if (io_data->base) {
+               iounmap(io_data->base);
+               io_data->base = NULL;
+       }
+       io_data->len = 0;
+}
+
+static void dp_parser_unmap_io_resources(struct dp_parser *parser)
+{
+       struct dp_io *io = &parser->io;
+
+       msm_dss_iounmap(&io->dp_controller);
+}
+
+static int dp_parser_ctrl_res(struct dp_parser *parser)
+{
+       int rc = 0;
+       struct platform_device *pdev = parser->pdev;
+       struct dp_io *io = &parser->io;
+
+       rc = msm_dss_ioremap(pdev, &io->dp_controller);
+       if (rc) {
+               DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
+               goto err;
+       }
+
+       io->phy = devm_phy_get(&pdev->dev, "dp");
+       if (IS_ERR(io->phy)) {
+               rc = PTR_ERR(io->phy);
+               goto err;
+       }
+
+       return 0;
+err:
+       dp_parser_unmap_io_resources(parser);
+       return rc;
+}
+
+static int dp_parser_misc(struct dp_parser *parser)
+{
+       struct device_node *of_node = parser->pdev->dev.of_node;
+       int len = 0;
+       const char *data_lane_property = "data-lanes";
+
+       len = of_property_count_elems_of_size(of_node,
+                        data_lane_property, sizeof(u32));
+       if (len < 0) {
+               DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
+                               data_lane_property, DP_MAX_NUM_DP_LANES);
+               len = DP_MAX_NUM_DP_LANES;
+       }
+
+       parser->max_dp_lanes = len;
+       return 0;
+}
+
+static inline bool dp_parser_check_prefix(const char *clk_prefix,
+                                               const char *clk_name)
+{
+       return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
+}
+
+static int dp_parser_init_clk_data(struct dp_parser *parser)
+{
+       int num_clk, i, rc;
+       int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+       const char *clk_name;
+       struct device *dev = &parser->pdev->dev;
+       struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+       struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+       struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+       num_clk = of_property_count_strings(dev->of_node, "clock-names");
+       if (num_clk <= 0) {
+               DRM_ERROR("no clocks are defined\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_clk; i++) {
+               rc = of_property_read_string_index(dev->of_node,
+                               "clock-names", i, &clk_name);
+               if (rc < 0)
+                       return rc;
+
+               if (dp_parser_check_prefix("core", clk_name))
+                       core_clk_count++;
+
+               if (dp_parser_check_prefix("ctrl", clk_name))
+                       ctrl_clk_count++;
+
+               if (dp_parser_check_prefix("stream", clk_name))
+                       stream_clk_count++;
+       }
+
+       /* Initialize the CORE power module */
+       if (core_clk_count == 0) {
+               DRM_ERROR("no core clocks are defined\n");
+               return -EINVAL;
+       }
+
+       core_power->num_clk = core_clk_count;
+       core_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * core_power->num_clk,
+                       GFP_KERNEL);
+       if (!core_power->clk_config)
+               return -EINVAL;
+
+       /* Initialize the CTRL power module */
+       if (ctrl_clk_count == 0) {
+               DRM_ERROR("no ctrl clocks are defined\n");
+               return -EINVAL;
+       }
+
+       ctrl_power->num_clk = ctrl_clk_count;
+       ctrl_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * ctrl_power->num_clk,
+                       GFP_KERNEL);
+       if (!ctrl_power->clk_config) {
+               ctrl_power->num_clk = 0;
+               return -EINVAL;
+       }
+
+       /* Initialize the STREAM power module */
+       if (stream_clk_count == 0) {
+               DRM_ERROR("no stream (pixel) clocks are defined\n");
+               return -EINVAL;
+       }
+
+       stream_power->num_clk = stream_clk_count;
+       stream_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * stream_power->num_clk,
+                       GFP_KERNEL);
+       if (!stream_power->clk_config) {
+               stream_power->num_clk = 0;
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dp_parser_clock(struct dp_parser *parser)
+{
+       int rc = 0, i = 0;
+       int num_clk = 0;
+       int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
+       int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+       const char *clk_name;
+       struct device *dev = &parser->pdev->dev;
+       struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+       struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+       struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+       rc =  dp_parser_init_clk_data(parser);
+       if (rc) {
+               DRM_ERROR("failed to initialize power data %d\n", rc);
+               return -EINVAL;
+       }
+
+       core_clk_count = core_power->num_clk;
+       ctrl_clk_count = ctrl_power->num_clk;
+       stream_clk_count = stream_power->num_clk;
+
+       num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
+
+       for (i = 0; i < num_clk; i++) {
+               rc = of_property_read_string_index(dev->of_node, "clock-names",
+                               i, &clk_name);
+               if (rc) {
+                       DRM_ERROR("error reading clock-names %d\n", rc);
+                       return rc;
+               }
+               if (dp_parser_check_prefix("core", clk_name) &&
+                               core_clk_index < core_clk_count) {
+                       struct dss_clk *clk =
+                               &core_power->clk_config[core_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       clk->type = DSS_CLK_AHB;
+                       core_clk_index++;
+               } else if (dp_parser_check_prefix("stream", clk_name) &&
+                               stream_clk_index < stream_clk_count) {
+                       struct dss_clk *clk =
+                               &stream_power->clk_config[stream_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       clk->type = DSS_CLK_PCLK;
+                       stream_clk_index++;
+               } else if (dp_parser_check_prefix("ctrl", clk_name) &&
+                          ctrl_clk_index < ctrl_clk_count) {
+                       struct dss_clk *clk =
+                               &ctrl_power->clk_config[ctrl_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       ctrl_clk_index++;
+                       if (dp_parser_check_prefix("ctrl_link", clk_name) ||
+                           dp_parser_check_prefix("stream_pixel", clk_name))
+                               clk->type = DSS_CLK_PCLK;
+                       else
+                               clk->type = DSS_CLK_AHB;
+               }
+       }
+
+       DRM_DEBUG_DP("clock parsing successful\n");
+
+       return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser)
+{
+       int rc = 0;
+
+       if (!parser) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       rc = dp_parser_ctrl_res(parser);
+       if (rc)
+               return rc;
+
+       rc = dp_parser_misc(parser);
+       if (rc)
+               return rc;
+
+       rc = dp_parser_clock(parser);
+       if (rc)
+               return rc;
+
+       /* Map the corresponding regulator information according to
+        * version. Currently, since we only have one supported platform,
+        * mapping the regulator directly.
+        */
+       parser->regulator_cfg = &sdm845_dp_reg_cfg;
+
+       return 0;
+}
+
+struct dp_parser *dp_parser_get(struct platform_device *pdev)
+{
+       struct dp_parser *parser;
+
+       parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
+       if (!parser)
+               return ERR_PTR(-ENOMEM);
+
+       parser->parse = dp_parser_parse;
+       parser->pdev = pdev;
+
+       return parser;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
new file mode 100644 (file)
index 0000000..34b4962
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PARSER_H_
+#define _DP_PARSER_H_
+
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+
+#include "dpu_io_util.h"
+#include "msm_drv.h"
+
+#define DP_LABEL "MDSS DP DISPLAY"
+#define DP_MAX_PIXEL_CLK_KHZ   675000
+#define DP_MAX_NUM_DP_LANES    4
+
+enum dp_pm_type {
+       DP_CORE_PM,
+       DP_CTRL_PM,
+       DP_STREAM_PM,
+       DP_PHY_PM,
+       DP_MAX_PM
+};
+
+struct dss_io_data {
+       u32 len;
+       void __iomem *base;
+};
+
+static inline const char *dp_parser_pm_name(enum dp_pm_type module)
+{
+       switch (module) {
+       case DP_CORE_PM:        return "DP_CORE_PM";
+       case DP_CTRL_PM:        return "DP_CTRL_PM";
+       case DP_STREAM_PM:      return "DP_STREAM_PM";
+       case DP_PHY_PM:         return "DP_PHY_PM";
+       default:                return "???";
+       }
+}
+
+/**
+ * struct dp_display_data  - display related device tree data.
+ *
+ * @ctrl_node: referece to controller device
+ * @phy_node:  reference to phy device
+ * @is_active: is the controller currently active
+ * @name: name of the display
+ * @display_type: type of the display
+ */
+struct dp_display_data {
+       struct device_node *ctrl_node;
+       struct device_node *phy_node;
+       bool is_active;
+       const char *name;
+       const char *display_type;
+};
+
+/**
+ * struct dp_ctrl_resource - controller's IO related data
+ *
+ * @dp_controller: Display Port controller mapped memory address
+ * @phy_io: phy's mapped memory address
+ */
+struct dp_io {
+       struct dss_io_data dp_controller;
+       struct phy *phy;
+       union phy_configure_opts phy_opts;
+};
+
+/**
+ * struct dp_pinctrl - DP's pin control
+ *
+ * @pin: pin-controller's instance
+ * @state_active: active state pin control
+ * @state_hpd_active: hpd active state pin control
+ * @state_suspend: suspend state pin control
+ */
+struct dp_pinctrl {
+       struct pinctrl *pin;
+       struct pinctrl_state *state_active;
+       struct pinctrl_state *state_hpd_active;
+       struct pinctrl_state *state_suspend;
+};
+
+#define DP_DEV_REGULATOR_MAX   4
+
+/* Regulators for DP devices */
+struct dp_reg_entry {
+       char name[32];
+       int enable_load;
+       int disable_load;
+};
+
+struct dp_regulator_cfg {
+       int num;
+       struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
+};
+
+/**
+ * struct dp_parser - DP parser's data exposed to clients
+ *
+ * @pdev: platform data of the client
+ * @mp: gpio, regulator and clock related data
+ * @pinctrl: pin-control related data
+ * @disp_data: controller's display related data
+ * @parse: function to be called by client to parse device tree.
+ */
+struct dp_parser {
+       struct platform_device *pdev;
+       struct dss_module_power mp[DP_MAX_PM];
+       struct dp_pinctrl pinctrl;
+       struct dp_io io;
+       struct dp_display_data disp_data;
+       const struct dp_regulator_cfg *regulator_cfg;
+       u32 max_dp_lanes;
+
+       int (*parse)(struct dp_parser *parser);
+};
+
+/**
+ * dp_parser_get() - get the DP's device tree parser module
+ *
+ * @pdev: platform data of the client
+ * return: pointer to dp_parser structure.
+ *
+ * This function provides client capability to parse the
+ * device tree and populate the data structures. The data
+ * related to clock, regulators, pin-control and other
+ * can be parsed using this module.
+ */
+struct dp_parser *dp_parser_get(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
new file mode 100644 (file)
index 0000000..17c1fc6
--- /dev/null
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include "dp_power.h"
+#include "msm_drv.h"
+
+struct dp_power_private {
+       struct dp_parser *parser;
+       struct platform_device *pdev;
+       struct clk *link_clk_src;
+       struct clk *pixel_provider;
+       struct clk *link_provider;
+       struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX];
+
+       struct dp_power dp_power;
+};
+
+static void dp_power_regulator_disable(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       int num = power->parser->regulator_cfg->num;
+       int i;
+
+       DBG("");
+       for (i = num - 1; i >= 0; i--)
+               if (regs[i].disable_load >= 0)
+                       regulator_set_load(s[i].consumer,
+                                          regs[i].disable_load);
+
+       regulator_bulk_disable(num, s);
+}
+
+static int dp_power_regulator_enable(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       int num = power->parser->regulator_cfg->num;
+       int ret, i;
+
+       DBG("");
+       for (i = 0; i < num; i++) {
+               if (regs[i].enable_load >= 0) {
+                       ret = regulator_set_load(s[i].consumer,
+                                                regs[i].enable_load);
+                       if (ret < 0) {
+                               pr_err("regulator %d set op mode failed, %d\n",
+                                       i, ret);
+                               goto fail;
+                       }
+               }
+       }
+
+       ret = regulator_bulk_enable(num, s);
+       if (ret < 0) {
+               pr_err("regulator enable failed, %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       for (i--; i >= 0; i--)
+               regulator_set_load(s[i].consumer, regs[i].disable_load);
+       return ret;
+}
+
+static int dp_power_regulator_init(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       struct platform_device *pdev = power->pdev;
+       int num = power->parser->regulator_cfg->num;
+       int i, ret;
+
+       for (i = 0; i < num; i++)
+               s[i].supply = regs[i].name;
+
+       ret = devm_regulator_bulk_get(&pdev->dev, num, s);
+       if (ret < 0) {
+               pr_err("%s: failed to init regulator, ret=%d\n",
+                                               __func__, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dp_power_clk_init(struct dp_power_private *power)
+{
+       int rc = 0;
+       struct dss_module_power *core, *ctrl, *stream;
+       struct device *dev = &power->pdev->dev;
+
+       core = &power->parser->mp[DP_CORE_PM];
+       ctrl = &power->parser->mp[DP_CTRL_PM];
+       stream = &power->parser->mp[DP_STREAM_PM];
+
+       rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CORE_PM), rc);
+               return rc;
+       }
+
+       rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CTRL_PM), rc);
+               msm_dss_put_clk(core->clk_config, core->num_clk);
+               return -ENODEV;
+       }
+
+       rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CTRL_PM), rc);
+               msm_dss_put_clk(core->clk_config, core->num_clk);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int dp_power_clk_deinit(struct dp_power_private *power)
+{
+       struct dss_module_power *core, *ctrl, *stream;
+
+       core = &power->parser->mp[DP_CORE_PM];
+       ctrl = &power->parser->mp[DP_CTRL_PM];
+       stream = &power->parser->mp[DP_STREAM_PM];
+
+       if (!core || !ctrl || !stream) {
+               DRM_ERROR("invalid power_data\n");
+               return -EINVAL;
+       }
+
+       msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
+       msm_dss_put_clk(core->clk_config, core->num_clk);
+       msm_dss_put_clk(stream->clk_config, stream->num_clk);
+       return 0;
+}
+
+static int dp_power_clk_set_rate(struct dp_power_private *power,
+               enum dp_pm_type module, bool enable)
+{
+       int rc = 0;
+       struct dss_module_power *mp = &power->parser->mp[module];
+
+       if (enable) {
+               rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+               if (rc) {
+                       DRM_ERROR("failed to set clks rate.\n");
+                       return rc;
+               }
+       }
+
+       rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+       if (rc) {
+               DRM_ERROR("failed to %d clks, err: %d\n", enable, rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
+{
+       if (pm_type == DP_CORE_PM)
+               return dp_power->core_clks_on;
+
+       if (pm_type == DP_CTRL_PM)
+               return dp_power->link_clks_on;
+
+       if (pm_type == DP_STREAM_PM)
+               return dp_power->stream_clks_on;
+
+       return 0;
+}
+
+int dp_power_clk_enable(struct dp_power *dp_power,
+               enum dp_pm_type pm_type, bool enable)
+{
+       int rc = 0;
+       struct dp_power_private *power;
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
+                       pm_type != DP_STREAM_PM) {
+               DRM_ERROR("unsupported power module: %s\n",
+                               dp_parser_pm_name(pm_type));
+               return -EINVAL;
+       }
+
+       if (enable) {
+               if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
+                       DRM_DEBUG_DP("core clks already enabled\n");
+                       return 0;
+               }
+
+               if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
+                       DRM_DEBUG_DP("links clks already enabled\n");
+                       return 0;
+               }
+
+               if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
+                       DRM_DEBUG_DP("pixel clks already enabled\n");
+                       return 0;
+               }
+
+               if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
+                       DRM_DEBUG_DP("Enable core clks before link clks\n");
+
+                       rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
+                       if (rc) {
+                               DRM_ERROR("fail to enable clks: %s. err=%d\n",
+                                       dp_parser_pm_name(DP_CORE_PM), rc);
+                               return rc;
+                       }
+                       dp_power->core_clks_on = true;
+               }
+       }
+
+       rc = dp_power_clk_set_rate(power, pm_type, enable);
+       if (rc) {
+               DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
+                       enable ? "enable" : "disable",
+                       dp_parser_pm_name(pm_type), rc);
+                       return rc;
+       }
+
+       if (pm_type == DP_CORE_PM)
+               dp_power->core_clks_on = enable;
+       else if (pm_type == DP_STREAM_PM)
+               dp_power->stream_clks_on = enable;
+       else
+               dp_power->link_clks_on = enable;
+
+       DRM_DEBUG_DP("%s clocks for %s\n",
+                       enable ? "enable" : "disable",
+                       dp_parser_pm_name(pm_type));
+       DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n",
+               dp_power->stream_clks_on ? "on" : "off",
+               dp_power->link_clks_on ? "on" : "off",
+               dp_power->core_clks_on ? "on" : "off");
+
+       return 0;
+}
+
+int dp_power_client_init(struct dp_power *dp_power)
+{
+       int rc = 0;
+       struct dp_power_private *power;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return -EINVAL;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       pm_runtime_enable(&power->pdev->dev);
+
+       rc = dp_power_regulator_init(power);
+       if (rc) {
+               DRM_ERROR("failed to init regulators %d\n", rc);
+               goto error;
+       }
+
+       rc = dp_power_clk_init(power);
+       if (rc) {
+               DRM_ERROR("failed to init clocks %d\n", rc);
+               goto error;
+       }
+       return 0;
+
+error:
+       pm_runtime_disable(&power->pdev->dev);
+       return rc;
+}
+
+void dp_power_client_deinit(struct dp_power *dp_power)
+{
+       struct dp_power_private *power;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       dp_power_clk_deinit(power);
+       pm_runtime_disable(&power->pdev->dev);
+
+}
+
+int dp_power_init(struct dp_power *dp_power, bool flip)
+{
+       int rc = 0;
+       struct dp_power_private *power = NULL;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return -EINVAL;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       pm_runtime_get_sync(&power->pdev->dev);
+       rc = dp_power_regulator_enable(power);
+       if (rc) {
+               DRM_ERROR("failed to enable regulators, %d\n", rc);
+               goto exit;
+       }
+
+       rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
+       if (rc) {
+               DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
+               goto err_clk;
+       }
+
+       return 0;
+
+err_clk:
+       dp_power_regulator_disable(power);
+exit:
+       pm_runtime_put_sync(&power->pdev->dev);
+       return rc;
+}
+
+int dp_power_deinit(struct dp_power *dp_power)
+{
+       struct dp_power_private *power;
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+       dp_power_regulator_disable(power);
+       pm_runtime_put_sync(&power->pdev->dev);
+       return 0;
+}
+
+struct dp_power *dp_power_get(struct dp_parser *parser)
+{
+       struct dp_power_private *power;
+       struct dp_power *dp_power;
+
+       if (!parser) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
+       if (!power)
+               return ERR_PTR(-ENOMEM);
+
+       power->parser = parser;
+       power->pdev = parser->pdev;
+
+       dp_power = &power->dp_power;
+
+       return dp_power;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
new file mode 100644 (file)
index 0000000..76743d7
--- /dev/null
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_POWER_H_
+#define _DP_POWER_H_
+
+#include "dp_parser.h"
+
+/**
+ * sruct dp_power - DisplayPort's power related data
+ *
+ * @init: initializes the regulators/core clocks/GPIOs/pinctrl
+ * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
+ * @clk_enable: enable/disable the DP clocks
+ * @set_pixel_clk_parent: set the parent of DP pixel clock
+ */
+struct dp_power {
+       bool core_clks_on;
+       bool link_clks_on;
+       bool stream_clks_on;
+};
+
+/**
+ * dp_power_init() - enable power supplies for display controller
+ *
+ * @power: instance of power module
+ * @flip: bool for flipping gpio direction
+ * return: 0 if success or error if failure.
+ *
+ * This API will turn on the regulators and configures gpio's
+ * aux/hpd.
+ */
+int dp_power_init(struct dp_power *power, bool flip);
+
+/**
+ * dp_power_deinit() - turn off regulators and gpios.
+ *
+ * @power: instance of power module
+ * return: 0 for success
+ *
+ * This API turns off power and regulators.
+ */
+int dp_power_deinit(struct dp_power *power);
+
+/**
+ * dp_power_clk_status() - display controller clocks status
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * return: status of power clocks
+ *
+ * This API return status of DP clocks
+ */
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
+
+/**
+ * dp_power_clk_enable() - enable display controller clocks
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * @enable: enables or disables
+ * return: pointer to allocated power module data
+ *
+ * This API will call setrate and enable for DP clocks
+ */
+
+int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
+                               bool enable);
+
+/**
+ * dp_power_client_init() - initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will configure the DisplayPort's clocks and regulator
+ * modules.
+ */
+int dp_power_client_init(struct dp_power *power);
+
+/**
+ * dp_power_clinet_deinit() - de-initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will de-initialize the DisplayPort's clocks and regulator
+ * modueles.
+ */
+void dp_power_client_deinit(struct dp_power *power);
+
+/**
+ * dp_power_get() - configure and get the DisplayPort power module data
+ *
+ * @parser: instance of parser module
+ * return: pointer to allocated power module data
+ *
+ * This API will configure the DisplayPort's power module and provides
+ * methods to be called by the client to configure the power related
+ * modueles.
+ */
+struct dp_power *dp_power_get(struct dp_parser *parser);
+
+#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
new file mode 100644 (file)
index 0000000..43042ff
--- /dev/null
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_REG_H_
+#define _DP_REG_H_
+
+/* DP_TX Registers */
+#define REG_DP_HW_VERSION                      (0x00000000)
+
+#define REG_DP_SW_RESET                                (0x00000010)
+#define DP_SW_RESET                            (0x00000001)
+
+#define REG_DP_PHY_CTRL                                (0x00000014)
+#define DP_PHY_CTRL_SW_RESET_PLL               (0x00000001)
+#define DP_PHY_CTRL_SW_RESET                   (0x00000004)
+
+#define REG_DP_CLK_CTRL                                (0x00000018)
+#define REG_DP_CLK_ACTIVE                      (0x0000001C)
+#define REG_DP_INTR_STATUS                     (0x00000020)
+#define REG_DP_INTR_STATUS2                    (0x00000024)
+#define REG_DP_INTR_STATUS3                    (0x00000028)
+
+#define REG_DP_DP_HPD_CTRL                     (0x00000000)
+#define DP_DP_HPD_CTRL_HPD_EN                  (0x00000001)
+
+#define REG_DP_DP_HPD_INT_STATUS               (0x00000004)
+
+#define REG_DP_DP_HPD_INT_ACK                  (0x00000008)
+#define DP_DP_HPD_PLUG_INT_ACK                 (0x00000001)
+#define DP_DP_IRQ_HPD_INT_ACK                  (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_ACK               (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_ACK               (0x00000008)
+
+#define REG_DP_DP_HPD_INT_MASK                 (0x0000000C)
+#define DP_DP_HPD_PLUG_INT_MASK                        (0x00000001)
+#define DP_DP_IRQ_HPD_INT_MASK                 (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_MASK              (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_MASK              (0x00000008)
+#define DP_DP_HPD_INT_MASK                     (DP_DP_HPD_PLUG_INT_MASK | \
+                                               DP_DP_IRQ_HPD_INT_MASK | \
+                                               DP_DP_HPD_REPLUG_INT_MASK | \
+                                               DP_DP_HPD_UNPLUG_INT_MASK)
+#define DP_DP_HPD_STATE_STATUS_CONNECTED       (0x40000000)
+#define DP_DP_HPD_STATE_STATUS_PENDING         (0x20000000)
+#define DP_DP_HPD_STATE_STATUS_DISCONNECTED    (0x00000000)
+#define DP_DP_HPD_STATE_STATUS_MASK            (0xE0000000)
+
+#define REG_DP_DP_HPD_REFTIMER                 (0x00000018)
+#define DP_DP_HPD_REFTIMER_ENABLE              (1 << 16)
+
+#define REG_DP_DP_HPD_EVENT_TIME_0             (0x0000001C)
+#define REG_DP_DP_HPD_EVENT_TIME_1             (0x00000020)
+#define DP_DP_HPD_EVENT_TIME_0_VAL             (0x3E800FA)
+#define DP_DP_HPD_EVENT_TIME_1_VAL             (0x1F407D0)
+
+#define REG_DP_AUX_CTRL                                (0x00000030)
+#define DP_AUX_CTRL_ENABLE                     (0x00000001)
+#define DP_AUX_CTRL_RESET                      (0x00000002)
+
+#define REG_DP_AUX_DATA                                (0x00000034)
+#define DP_AUX_DATA_READ                       (0x00000001)
+#define DP_AUX_DATA_WRITE                      (0x00000000)
+#define DP_AUX_DATA_OFFSET                     (0x00000008)
+#define DP_AUX_DATA_INDEX_OFFSET               (0x00000010)
+#define DP_AUX_DATA_MASK                       (0x0000ff00)
+#define DP_AUX_DATA_INDEX_WRITE                        (0x80000000)
+
+#define REG_DP_AUX_TRANS_CTRL                  (0x00000038)
+#define DP_AUX_TRANS_CTRL_I2C                  (0x00000100)
+#define DP_AUX_TRANS_CTRL_GO                   (0x00000200)
+#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR         (0x00000400)
+#define DP_AUX_TRANS_CTRL_NO_SEND_STOP         (0x00000800)
+
+#define REG_DP_TIMEOUT_COUNT                   (0x0000003C)
+#define REG_DP_AUX_LIMITS                      (0x00000040)
+#define REG_DP_AUX_STATUS                      (0x00000044)
+
+#define DP_DPCD_CP_IRQ                         (0x201)
+#define DP_DPCD_RXSTATUS                       (0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM                 (0x000000A0)
+
+#define REG_DP_MAINLINK_CTRL                   (0x00000000)
+#define DP_MAINLINK_CTRL_ENABLE                        (0x00000001)
+#define DP_MAINLINK_CTRL_RESET                 (0x00000002)
+#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER   (0x00000010)
+#define DP_MAINLINK_FB_BOUNDARY_SEL            (0x02000000)
+
+#define REG_DP_STATE_CTRL                      (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1   (0x00000001)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2   (0x00000002)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3   (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4   (0x00000008)
+#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE  (0x00000010)
+#define DP_STATE_CTRL_LINK_PRBS7               (0x00000020)
+#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040)
+#define DP_STATE_CTRL_SEND_VIDEO               (0x00000080)
+#define DP_STATE_CTRL_PUSH_IDLE                        (0x00000100)
+
+#define REG_DP_CONFIGURATION_CTRL              (0x00000008)
+#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK   (0x00000001)
+#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002)
+#define DP_CONFIGURATION_CTRL_P_INTERLACED     (0x00000004)
+#define DP_CONFIGURATION_CTRL_INTERLACED_BTF   (0x00000008)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES     (0x00000010)
+#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040)
+#define DP_CONFIGURATION_CTRL_SEND_VSC         (0x00000080)
+#define DP_CONFIGURATION_CTRL_BPC              (0x00000100)
+#define DP_CONFIGURATION_CTRL_ASSR             (0x00000400)
+#define DP_CONFIGURATION_CTRL_RGB_YUV          (0x00000800)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV                (0x00002000)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT       (0x04)
+#define DP_CONFIGURATION_CTRL_BPC_SHIFT                (0x08)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT  (0x0D)
+
+#define REG_DP_SOFTWARE_MVID                   (0x00000010)
+#define REG_DP_SOFTWARE_NVID                   (0x00000018)
+#define REG_DP_TOTAL_HOR_VER                   (0x0000001C)
+#define REG_DP_START_HOR_VER_FROM_SYNC         (0x00000020)
+#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY      (0x00000024)
+#define REG_DP_ACTIVE_HOR_VER                  (0x00000028)
+
+#define REG_DP_MISC1_MISC0                     (0x0000002C)
+#define DP_MISC0_SYNCHRONOUS_CLK               (0x00000001)
+#define DP_MISC0_COLORIMETRY_CFG_SHIFT         (0x00000001)
+#define DP_MISC0_TEST_BITS_DEPTH_SHIFT         (0x00000005)
+
+#define REG_DP_VALID_BOUNDARY                  (0x00000030)
+#define REG_DP_VALID_BOUNDARY_2                        (0x00000034)
+
+#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING   (0x00000038)
+#define LANE0_MAPPING_SHIFT                    (0x00000000)
+#define LANE1_MAPPING_SHIFT                    (0x00000002)
+#define LANE2_MAPPING_SHIFT                    (0x00000004)
+#define LANE3_MAPPING_SHIFT                    (0x00000006)
+
+#define REG_DP_MAINLINK_READY                  (0x00000040)
+#define DP_MAINLINK_READY_FOR_VIDEO            (0x00000001)
+#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT  (0x00000003)
+
+#define REG_DP_MAINLINK_LEVELS                 (0x00000044)
+#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2       (0x00000002)
+
+
+#define REG_DP_TU                              (0x0000004C)
+
+#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
+#define DP_HBR2_ERM_PATTERN                    (0x00010000)
+
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0  (0x000000C0)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1  (0x000000C4)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2  (0x000000C8)
+
+#define MMSS_DP_MISC1_MISC0                    (0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN               (0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32            (0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32            (0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44            (0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44            (0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48            (0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48            (0x00000098)
+
+#define MMSS_DP_PSR_CRC_RG                     (0x00000154)
+#define MMSS_DP_PSR_CRC_B                      (0x00000158)
+
+#define REG_DP_COMPRESSION_MODE_CTRL           (0x00000180)
+
+#define MMSS_DP_AUDIO_CFG                      (0x00000200)
+#define MMSS_DP_AUDIO_STATUS                   (0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL                 (0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2                        (0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL                 (0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET               (0x00000214)
+
+#define MMSS_DP_SDP_CFG                                (0x00000228)
+#define MMSS_DP_SDP_CFG2                       (0x0000022C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0              (0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1              (0x00000234)
+
+#define MMSS_DP_AUDIO_STREAM_0                 (0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1                 (0x00000244)
+
+#define MMSS_DP_EXTENSION_0                    (0x00000250)
+#define MMSS_DP_EXTENSION_1                    (0x00000254)
+#define MMSS_DP_EXTENSION_2                    (0x00000258)
+#define MMSS_DP_EXTENSION_3                    (0x0000025C)
+#define MMSS_DP_EXTENSION_4                    (0x00000260)
+#define MMSS_DP_EXTENSION_5                    (0x00000264)
+#define MMSS_DP_EXTENSION_6                    (0x00000268)
+#define MMSS_DP_EXTENSION_7                    (0x0000026C)
+#define MMSS_DP_EXTENSION_8                    (0x00000270)
+#define MMSS_DP_EXTENSION_9                    (0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0         (0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1         (0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2         (0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3         (0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4         (0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5         (0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0                   (0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1                   (0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2                   (0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3                   (0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4                   (0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5                   (0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0              (0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1              (0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2              (0x000002B0)
+
+#define MMSS_DP_GENERIC0_0                     (0x00000300)
+#define MMSS_DP_GENERIC0_1                     (0x00000304)
+#define MMSS_DP_GENERIC0_2                     (0x00000308)
+#define MMSS_DP_GENERIC0_3                     (0x0000030C)
+#define MMSS_DP_GENERIC0_4                     (0x00000310)
+#define MMSS_DP_GENERIC0_5                     (0x00000314)
+#define MMSS_DP_GENERIC0_6                     (0x00000318)
+#define MMSS_DP_GENERIC0_7                     (0x0000031C)
+#define MMSS_DP_GENERIC0_8                     (0x00000320)
+#define MMSS_DP_GENERIC0_9                     (0x00000324)
+#define MMSS_DP_GENERIC1_0                     (0x00000328)
+#define MMSS_DP_GENERIC1_1                     (0x0000032C)
+#define MMSS_DP_GENERIC1_2                     (0x00000330)
+#define MMSS_DP_GENERIC1_3                     (0x00000334)
+#define MMSS_DP_GENERIC1_4                     (0x00000338)
+#define MMSS_DP_GENERIC1_5                     (0x0000033C)
+#define MMSS_DP_GENERIC1_6                     (0x00000340)
+#define MMSS_DP_GENERIC1_7                     (0x00000344)
+#define MMSS_DP_GENERIC1_8                     (0x00000348)
+#define MMSS_DP_GENERIC1_9                     (0x0000034C)
+
+#define MMSS_DP_VSCEXT_0                       (0x000002D0)
+#define MMSS_DP_VSCEXT_1                       (0x000002D4)
+#define MMSS_DP_VSCEXT_2                       (0x000002D8)
+#define MMSS_DP_VSCEXT_3                       (0x000002DC)
+#define MMSS_DP_VSCEXT_4                       (0x000002E0)
+#define MMSS_DP_VSCEXT_5                       (0x000002E4)
+#define MMSS_DP_VSCEXT_6                       (0x000002E8)
+#define MMSS_DP_VSCEXT_7                       (0x000002EC)
+#define MMSS_DP_VSCEXT_8                       (0x000002F0)
+#define MMSS_DP_VSCEXT_9                       (0x000002F4)
+
+#define MMSS_DP_BIST_ENABLE                    (0x00000000)
+#define DP_BIST_ENABLE_DPBIST_EN               (0x00000001)
+
+#define MMSS_DP_TIMING_ENGINE_EN               (0x00000010)
+#define DP_TIMING_ENGINE_EN_EN                 (0x00000001)
+
+#define MMSS_DP_INTF_CONFIG                    (0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL                 (0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0           (0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1           (0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0      (0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1      (0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0           (0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1           (0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0          (0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1          (0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0         (0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1         (0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0           (0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1           (0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL              (0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL               (0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL              (0x00000058)
+
+#define MMSS_DP_TPG_MAIN_CONTROL               (0x00000060)
+#define MMSS_DP_DSC_DTO                                (0x0000007C)
+#define DP_TPG_CHECKERED_RECT_PATTERN          (0x00000100)
+
+#define MMSS_DP_TPG_VIDEO_CONFIG               (0x00000064)
+#define DP_TPG_VIDEO_CONFIG_BPP_8BIT           (0x00000001)
+#define DP_TPG_VIDEO_CONFIG_RGB                        (0x00000004)
+
+#define MMSS_DP_ASYNC_FIFO_CONFIG              (0x00000088)
+
+#define REG_DP_PHY_AUX_INTERRUPT_CLEAR          (0x0000004C)
+#define REG_DP_PHY_AUX_BIST_CFG                        (0x00000050)
+#define REG_DP_PHY_AUX_INTERRUPT_STATUS         (0x000000BC)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL                                   (0x0A0)
+#define DP_HDCP_STATUS                                 (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x098)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x350)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x35C)
+#define DP_HDCP_SHA_STATUS                             (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3                          (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x0A8)
+#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
+
+#endif /* _DP_REG_H_ */
index 4de771d..78ef5d4 100644 (file)
@@ -30,6 +30,8 @@ enum msm_dsi_phy_type {
        MSM_DSI_PHY_28NM_8960,
        MSM_DSI_PHY_14NM,
        MSM_DSI_PHY_10NM,
+       MSM_DSI_PHY_7NM,
+       MSM_DSI_PHY_7NM_V4_1,
        MSM_DSI_PHY_MAX
 };
 
index 8e536e0..50eb4d1 100644 (file)
@@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000
 
 #define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE                 0x000001a0
 
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID0                       0x00000000
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID1                       0x00000004
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID2                       0x00000008
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID3                       0x0000000c
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG0                           0x00000010
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG1                           0x00000014
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL                          0x00000018
+
+#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL                          0x0000001c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0                                0x00000020
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_0                             0x00000024
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_1                             0x00000028
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_2                             0x0000002c
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_3                             0x00000030
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG0                          0x00000034
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG1                          0x00000038
+
+#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL                          0x0000003c
+
+#define REG_DSI_7nm_PHY_CMN_DPHY_SOT                           0x00000040
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0                         0x000000a0
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1                         0x000000a4
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2                         0x000000a8
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3                         0x000000ac
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4                         0x000000b0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0                      0x000000b4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1                      0x000000b8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2                      0x000000bc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3                      0x000000c0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4                      0x000000c4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5                      0x000000c8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6                      0x000000cc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7                      0x000000d0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8                      0x000000d4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9                      0x000000d8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10                     0x000000dc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11                     0x000000e0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12                     0x000000e4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13                     0x000000e8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0               0x000000ec
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1               0x000000f0
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL       0x000000f4
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL       0x000000f8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL       0x000000fc
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL                 0x00000100
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0                  0x00000104
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1                  0x00000108
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL          0x0000010c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1                                0x00000110
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_4                             0x00000114
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4                 0x00000128
+
+#define REG_DSI_7nm_PHY_CMN_PHY_STATUS                         0x00000140
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0                       0x00000148
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1                       0x0000014c
+
+static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE                        0x00000000
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO                        0x00000004
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS                  0x00000008
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO              0x0000000c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE              0x00000010
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR               0x00000014
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE               0x00000018
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS                  0x0000001c
+
+#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER                                0x00000020
+
+#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER                   0x00000024
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES                       0x00000028
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES      0x0000002c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE                              0x00000030
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CTRL                           0x00000034
+
+#define REG_DSI_7nm_PHY_PLL_RSM_CTRL                           0x00000038
+
+#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP                       0x0000003c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL                          0x00000040
+
+#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS               0x00000044
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW             0x00000048
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH            0x0000004c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS              0x00000050
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN                       0x00000054
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX                       0x00000058
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT                     0x0000005c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT                     0x00000060
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO          0x00000064
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE                0x00000068
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR         0x0000006c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH                        0x00000070
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW                 0x00000074
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE           0x00000078
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH                 0x0000007c
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH               0x00000080
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW                        0x00000084
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH               0x00000088
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW                        0x0000008c
+
+#define REG_DSI_7nm_PHY_PLL_PFILT                              0x00000090
+
+#define REG_DSI_7nm_PHY_PLL_IFILT                              0x00000094
+
+#define REG_DSI_7nm_PHY_PLL_PLL_GAIN                           0x00000098
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_LOW                          0x0000009c
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH                         0x000000a0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKDET                            0x000000a4
+
+#define REG_DSI_7nm_PHY_PLL_OUTDIV                             0x000000a8
+
+#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL                   0x000000ac
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE              0x000000b0
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO              0x000000b4
+
+#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE                      0x000000b8
+
+#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE                        0x000000bc
+
+#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE                                0x000000c0
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS                 0x000000c4
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO             0x000000c8
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START                  0x000000cc
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW                 0x000000d0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID                 0x000000d4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH                        0x000000d8
+
+#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES                     0x000000dc
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1                        0x000000e0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1               0x000000e4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1               0x000000e8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1              0x000000ec
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2                        0x000000f0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2               0x000000f4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2               0x000000f8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2              0x000000fc
+
+#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL                       0x00000100
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW                   0x00000104
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH                  0x00000108
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW                    0x0000010c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH                   0x00000110
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW                     0x00000114
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH                    0x00000118
+
+#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL                    0x0000011c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1                 0x00000120
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1                        0x00000124
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1                  0x00000128
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1                 0x0000012c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1                   0x00000130
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1                  0x00000134
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2                 0x00000138
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2                        0x0000013c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2                  0x00000140
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2                 0x00000144
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2                   0x00000148
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2                  0x0000014c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL                                0x00000150
+
+#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE                    0x00000154
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1                 0x00000158
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2                 0x0000015c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1               0x00000160
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2               0x00000164
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1                        0x00000168
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2                        0x0000016c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1          0x00000170
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2          0x00000174
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1       0x00000178
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2       0x0000017c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND               0x00000180
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID           0x00000184
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH          0x00000188
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX           0x0000018c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE                  0x00000190
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY                     0x00000194
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY                 0x00000198
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS                    0x0000019c
+
+#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES            0x000001a0
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1                     0x000001a4
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2                     0x000001a8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1               0x000001ac
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE                  0x000001b0
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO                  0x000001b4
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL                       0x000001b8
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW             0x000001bc
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH            0x000001c0
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW                         0x000001c4
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH                                0x000001c8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1             0x000001cc
+
+#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG                    0x000001d0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG                         0x000001d4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME                  0x000001d8
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE0                          0x000001dc
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE1                          0x000001e0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0                          0x000001e4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1                          0x000001e8
+
+#define REG_DSI_7nm_PHY_PLL_SW_RESET                           0x000001ec
+
+#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP                         0x000001f0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME0                          0x000001f4
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME1                          0x000001f8
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL                      0x000001fc
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0                         0x00000200
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1                         0x00000204
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2                         0x00000208
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3                         0x0000020c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES       0x00000210
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG                         0x00000214
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS         0x00000218
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS         0x0000021c
+
+#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS                    0x00000220
+
+#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET                         0x00000224
+
+#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS               0x00000228
+
+#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS               0x0000022c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS                   0x00000230
+
+#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS          0x00000234
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS                   0x00000238
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2                     0x0000023c
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1                       0x00000240
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2                       0x00000244
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1                  0x00000248
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2                  0x0000024c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_1                            0x00000250
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_2                            0x00000254
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1             0x00000258
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2             0x0000025c
+
+#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE                      0x00000260
 
 #endif /* DSI_XML */
index f892f2c..b2ff68a 100644 (file)
@@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
                &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
                &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
+               &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
+               &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
                &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
-
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
index efd469d..ade9b60 100644 (file)
@@ -21,6 +21,8 @@
 #define MSM_DSI_6G_VER_MINOR_V2_1_0    0x20010000
 #define MSM_DSI_6G_VER_MINOR_V2_2_0    0x20000000
 #define MSM_DSI_6G_VER_MINOR_V2_2_1    0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_3_0    0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_4_0    0x20040000
 #define MSM_DSI_6G_VER_MINOR_V2_4_1    0x20040001
 
 #define MSM_DSI_V2_VER_MINOR_8064      0x0
index 009f5b8..e8c1a72 100644 (file)
@@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
        return 0;
 }
 
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+       struct msm_dsi_phy_clk_request *clk_req)
+{
+       const unsigned long bit_rate = clk_req->bitclk_rate;
+       const unsigned long esc_rate = clk_req->escclk_rate;
+       s32 ui, ui_x8;
+       s32 tmax, tmin;
+       s32 pcnt_clk_prep = 50;
+       s32 pcnt_clk_zero = 2;
+       s32 pcnt_clk_trail = 30;
+       s32 pcnt_hs_prep = 50;
+       s32 pcnt_hs_zero = 10;
+       s32 pcnt_hs_trail = 30;
+       s32 pcnt_hs_exit = 10;
+       s32 coeff = 1000; /* Precision, should avoid overflow */
+       s32 hb_en;
+       s32 temp;
+
+       if (!bit_rate || !esc_rate)
+               return -EINVAL;
+
+       hb_en = 0;
+
+       ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+       ui_x8 = ui << 3;
+
+       /* TODO: verify these calculations against latest downstream driver
+        * everything except clk_post/clk_pre uses calculations from v3 based
+        * on the downstream driver having the same calculations for v3 and v4
+        */
+
+       temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (95 * coeff) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
+
+       temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = (tmin > 255) ? 511 : 255;
+       timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp + 3 * ui) / ui_x8;
+       timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
+
+       temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (85 * coeff + 6 * ui) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
+
+       temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp / ui_x8) - 1;
+       timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
+
+       temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+       timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+       tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
+
+       /* recommended min
+        * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
+        */
+       temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
+       tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+       tmax = 255;
+       timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
+
+       /* recommended min
+        * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
+        * val2 = (16 * bit_clk_ns)
+        * final = roundup(val1/val2, 0) - 1
+        */
+       temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
+       tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+       tmax = 255;
+       timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+
+       DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+               timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+               timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+               timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
+
+       return 0;
+}
+
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask)
 {
@@ -508,6 +604,12 @@ static const struct of_device_id dsi_phy_dt_match[] = {
        { .compatible = "qcom,dsi-phy-10nm-8998",
          .data = &dsi_phy_10nm_8998_cfgs },
 #endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+       { .compatible = "qcom,dsi-phy-7nm",
+         .data = &dsi_phy_7nm_cfgs },
+       { .compatible = "qcom,dsi-phy-7nm-8150",
+         .data = &dsi_phy_7nm_8150_cfgs },
+#endif
        {}
 };
 
index ef8672d..d2bd74b 100644 (file)
@@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
 
 struct msm_dsi_dphy_timing {
-       u32 clk_pre;
-       u32 clk_post;
        u32 clk_zero;
        u32 clk_trail;
        u32 clk_prepare;
@@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+                               struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask);
 int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
new file mode 100644 (file)
index 0000000..255b5f5
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+       void __iomem *base = phy->base;
+       u32 data = 0;
+
+       data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
+       mb(); /* make sure read happened */
+
+       return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+       void __iomem *lane_base = phy->lane_base;
+       int phy_lane_0 = 0;     /* TODO: Support all lane swap configs */
+
+       /*
+        * LPRX and CDRX need to enabled only for physical data lane
+        * corresponding to the logical data lane 0
+        */
+       if (enable)
+               dsi_phy_write(lane_base +
+                             REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+       else
+               dsi_phy_write(lane_base +
+                             REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
+{
+       int i;
+       const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+       const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
+       const u8 *tx_dctrl = tx_dctrl_0;
+       void __iomem *lane_base = phy->lane_base;
+
+       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
+               tx_dctrl = tx_dctrl_1;
+
+       /* Strength ctrl settings */
+       for (i = 0; i < 5; i++) {
+               /*
+                * Disable LPRX and CDRX for all lanes. And later on, it will
+                * be only enabled for the physical data lane corresponding
+                * to the logical data lane 0
+                */
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
+       }
+
+       dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
+
+       /* other settings */
+       for (i = 0; i < 5; i++) {
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
+       }
+}
+
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+                             struct msm_dsi_phy_clk_request *clk_req)
+{
+       int ret;
+       u32 status;
+       u32 const delay_us = 5;
+       u32 const timeout_us = 1000;
+       struct msm_dsi_dphy_timing *timing = &phy->timing;
+       void __iomem *base = phy->base;
+       bool less_than_1500_mhz;
+       u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+       u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
+       u32 data;
+
+       DBG("");
+
+       if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
+               DRM_DEV_ERROR(&phy->pdev->dev,
+                       "%s: D-PHY timing calculation failed\n", __func__);
+               return -EINVAL;
+       }
+
+       if (dsi_phy_hw_v4_0_is_pll_on(phy))
+               pr_warn("PLL turned on before configuring PHY\n");
+
+       /* wait for REFGEN READY */
+       ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
+                                       status, (status & BIT(0)),
+                                       delay_us, timeout_us);
+       if (ret) {
+               pr_err("Ref gen not ready. Aborting\n");
+               return -EINVAL;
+       }
+
+       /* TODO: CPHY enable path (this is for DPHY only) */
+
+       /* Alter PHY configurations if data rate less than 1.5GHZ*/
+       less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+
+       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
+               vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+               glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d :  0x00;
+               glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 :  0x3c;
+               glbl_str_swi_cal_sel_ctrl = 0x00;
+               glbl_hstx_str_ctrl_0 = 0x88;
+       } else {
+               vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
+               glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
+               glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
+               glbl_rescode_top_ctrl = 0x03;
+               glbl_rescode_bot_ctrl = 0x3c;
+       }
+
+       /* de-assert digital and pll power down */
+       data = BIT(6) | BIT(5);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+
+       /* Assert PLL core reset */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+       /* turn off resync FIFO */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+       /* program CMN_CTRL_4 for minor_ver 2 chipsets*/
+       data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
+       data = data & (0xf0);
+       if (data == 0x20)
+               dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
+
+       /* Configure PHY lane swap (TODO: we need to calculate this) */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+
+       /* Enable LDO */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+                     glbl_str_swi_cal_sel_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
+                     glbl_hstx_str_ctrl_0);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+                     glbl_rescode_top_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+                     glbl_rescode_bot_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+       /* Remove power down from all blocks */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
+
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
+
+       /* Select full-rate mode */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+
+       ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+       if (ret) {
+               DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       /* DSI PHY timings */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+                     timing->shared_timings.clk_pre);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+                     timing->shared_timings.clk_post);
+
+       /* DSI lane settings */
+       dsi_phy_hw_v4_0_lane_settings(phy);
+
+       DBG("DSI%d PHY enabled", phy->id);
+
+       return 0;
+}
+
+static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
+{
+       /* TODO */
+}
+
+static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+
+       phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+                                    "DSI_PHY_LANE");
+       if (IS_ERR(phy->lane_base)) {
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
+       .type = MSM_DSI_PHY_7NM_V4_1,
+       .src_pll_truthtable = { {false, false}, {true, false} },
+       .reg_cfg = {
+               .num = 1,
+               .regs = {
+                       {"vdds", 36000, 32},
+               },
+       },
+       .ops = {
+               .enable = dsi_7nm_phy_enable,
+               .disable = dsi_7nm_phy_disable,
+               .init = dsi_7nm_phy_init,
+       },
+       .io_start = { 0xae94400, 0xae96400 },
+       .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
+       .type = MSM_DSI_PHY_7NM,
+       .src_pll_truthtable = { {false, false}, {true, false} },
+       .reg_cfg = {
+               .num = 1,
+               .regs = {
+                       {"vdds", 36000, 32},
+               },
+       },
+       .ops = {
+               .enable = dsi_7nm_phy_enable,
+               .disable = dsi_7nm_phy_disable,
+               .init = dsi_7nm_phy_init,
+       },
+       .io_start = { 0xae94400, 0xae96400 },
+       .num_dsi_phy = 2,
+};
index 4a4aa3c..a45fe95 100644 (file)
@@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
        case MSM_DSI_PHY_10NM:
                pll = msm_dsi_pll_10nm_init(pdev, id);
                break;
+       case MSM_DSI_PHY_7NM:
+       case MSM_DSI_PHY_7NM_V4_1:
+               pll = msm_dsi_pll_7nm_init(pdev, id);
+               break;
        default:
                pll = ERR_PTR(-ENXIO);
                break;
index c6a3623..3405982 100644 (file)
@@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
        return ERR_PTR(-ENODEV);
 }
 #endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+       return ERR_PTR(-ENODEV);
+}
+#endif
+
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
new file mode 100644 (file)
index 0000000..de0dfb8
--- /dev/null
@@ -0,0 +1,904 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK               0
+#define DSI_PIXEL_PLL_CLK              1
+#define NUM_PROVIDED_CLKS              2
+
+#define VCO_REF_CLK_RATE               19200000
+
+struct dsi_pll_regs {
+       u32 pll_prop_gain_rate;
+       u32 pll_lockdet_rate;
+       u32 decimal_div_start;
+       u32 frac_div_start_low;
+       u32 frac_div_start_mid;
+       u32 frac_div_start_high;
+       u32 pll_clock_inverters;
+       u32 ssc_stepsize_low;
+       u32 ssc_stepsize_high;
+       u32 ssc_div_per_low;
+       u32 ssc_div_per_high;
+       u32 ssc_adjper_low;
+       u32 ssc_adjper_high;
+       u32 ssc_control;
+};
+
+struct dsi_pll_config {
+       u32 ref_freq;
+       bool div_override;
+       u32 output_div;
+       bool ignore_frac;
+       bool disable_prescaler;
+       bool enable_ssc;
+       bool ssc_center;
+       u32 dec_bits;
+       u32 frac_bits;
+       u32 lock_timer;
+       u32 ssc_freq;
+       u32 ssc_offset;
+       u32 ssc_adj_per;
+       u32 thresh_cycles;
+       u32 refclk_cycles;
+};
+
+struct pll_7nm_cached_state {
+       unsigned long vco_rate;
+       u8 bit_clk_div;
+       u8 pix_clk_div;
+       u8 pll_out_div;
+       u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+       struct msm_dsi_pll base;
+
+       int id;
+       struct platform_device *pdev;
+
+       void __iomem *phy_cmn_mmio;
+       void __iomem *mmio;
+
+       u64 vco_ref_clk_rate;
+       u64 vco_current_rate;
+
+       /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+       spinlock_t postdiv_lock;
+
+       int vco_delay;
+       struct dsi_pll_config pll_configuration;
+       struct dsi_pll_regs reg_setup;
+
+       /* private clocks: */
+       struct clk_hw *out_div_clk_hw;
+       struct clk_hw *bit_clk_hw;
+       struct clk_hw *byte_clk_hw;
+       struct clk_hw *by_2_bit_clk_hw;
+       struct clk_hw *post_out_div_clk_hw;
+       struct clk_hw *pclk_mux_hw;
+       struct clk_hw *out_dsiclk_hw;
+
+       /* clock-provider: */
+       struct clk_hw_onecell_data *hw_data;
+
+       struct pll_7nm_cached_state cached_state;
+
+       enum msm_dsi_phy_usecase uc;
+       struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x)  container_of(x, struct dsi_pll_7nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+
+       config->ref_freq = pll->vco_ref_clk_rate;
+       config->output_div = 1;
+       config->dec_bits = 8;
+       config->frac_bits = 18;
+       config->lock_timer = 64;
+       config->ssc_freq = 31500;
+       config->ssc_offset = 4800;
+       config->ssc_adj_per = 2;
+       config->thresh_cycles = 32;
+       config->refclk_cycles = 256;
+
+       config->div_override = false;
+       config->ignore_frac = false;
+       config->disable_prescaler = false;
+
+       /* TODO: ssc enable */
+       config->enable_ssc = false;
+       config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+       u64 fref = pll->vco_ref_clk_rate;
+       u64 pll_freq;
+       u64 divider;
+       u64 dec, dec_multiple;
+       u32 frac;
+       u64 multiplier;
+
+       pll_freq = pll->vco_current_rate;
+
+       if (config->disable_prescaler)
+               divider = fref;
+       else
+               divider = fref * 2;
+
+       multiplier = 1 << config->frac_bits;
+       dec_multiple = div_u64(pll_freq * multiplier, divider);
+       div_u64_rem(dec_multiple, multiplier, &frac);
+
+       dec = div_u64(dec_multiple, multiplier);
+
+       if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
+               regs->pll_clock_inverters = 0x28;
+       else if (pll_freq <= 1000000000ULL)
+               regs->pll_clock_inverters = 0xa0;
+       else if (pll_freq <= 2500000000ULL)
+               regs->pll_clock_inverters = 0x20;
+       else if (pll_freq <= 3020000000ULL)
+               regs->pll_clock_inverters = 0x00;
+       else
+               regs->pll_clock_inverters = 0x40;
+
+       regs->pll_lockdet_rate = config->lock_timer;
+       regs->decimal_div_start = dec;
+       regs->frac_div_start_low = (frac & 0xff);
+       regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+       regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER             BIT(0)
+#define SSC_EN                 BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+       u32 ssc_per;
+       u32 ssc_mod;
+       u64 ssc_step_size;
+       u64 frac;
+
+       if (!config->enable_ssc) {
+               DBG("SSC not enabled\n");
+               return;
+       }
+
+       ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+       ssc_per -= ssc_mod;
+
+       frac = regs->frac_div_start_low |
+                       (regs->frac_div_start_mid << 8) |
+                       (regs->frac_div_start_high << 16);
+       ssc_step_size = regs->decimal_div_start;
+       ssc_step_size *= (1 << config->frac_bits);
+       ssc_step_size += frac;
+       ssc_step_size *= config->ssc_offset;
+       ssc_step_size *= (config->ssc_adj_per + 1);
+       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+       regs->ssc_div_per_low = ssc_per & 0xFF;
+       regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+       regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+       regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+       regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+       regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+       regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+                regs->decimal_div_start, frac, config->frac_bits);
+       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+
+       if (pll->pll_configuration.enable_ssc) {
+               pr_debug("SSC is enabled\n");
+
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+                         regs->ssc_stepsize_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+                         regs->ssc_stepsize_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+                         regs->ssc_div_per_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+                         regs->ssc_div_per_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+                         regs->ssc_adjper_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+                         regs->ssc_adjper_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+                         SSC_EN | regs->ssc_control);
+       }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+               if (pll->vco_current_rate >= 3100000000ULL)
+                       analog_controls_five_1 = 0x03;
+
+               if (pll->vco_current_rate < 1520000000ULL)
+                       vco_config_1 = 0x08;
+               else if (pll->vco_current_rate < 2990000000ULL)
+                       vco_config_1 = 0x01;
+       }
+
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+                 analog_controls_five_1);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+                 pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
+
+       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+               pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+               if (pll->slave)
+                       pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+       }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       struct dsi_pll_regs *reg = &pll->reg_setup;
+
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
+           parent_rate);
+
+       pll_7nm->vco_current_rate = rate;
+       pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+       dsi_pll_setup_config(pll_7nm);
+
+       dsi_pll_calc_dec_frac(pll_7nm);
+
+       dsi_pll_calc_ssc(pll_7nm);
+
+       dsi_pll_commit(pll_7nm);
+
+       dsi_pll_config_hzindep_reg(pll_7nm);
+
+       dsi_pll_ssc_commit(pll_7nm);
+
+       /* flush, ensure all register writes are done*/
+       wmb();
+
+       return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+       int rc;
+       u32 status = 0;
+       u32 const delay_us = 100;
+       u32 const timeout_us = 5000;
+
+       rc = readl_poll_timeout_atomic(pll->mmio +
+                                      REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+                                      status,
+                                      ((status & BIT(0)) > 0),
+                                      delay_us,
+                                      timeout_us);
+       if (rc)
+               pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+                      pll->id, status);
+
+       return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+       ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+       ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                 data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+       /*
+        * Reset the PHY digital domain. This would be needed when
+        * coming out of a CX or analog rail power collapse while
+        * ensuring that the pads maintain LP00 or LP11 state
+        */
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+       wmb(); /* Ensure that the reset is deasserted */
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+       wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       int rc;
+
+       dsi_pll_enable_pll_bias(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+       /* Start PLL */
+       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+       /*
+        * ensure all PLL configurations are written prior to checking
+        * for PLL lock.
+        */
+       wmb();
+
+       /* Check for PLL lock */
+       rc = dsi_pll_7nm_lock_status(pll_7nm);
+       if (rc) {
+               pr_err("PLL(%d) lock failed\n", pll_7nm->id);
+               goto error;
+       }
+
+       pll->pll_on = true;
+
+       /*
+        * assert power on reset for PHY digital in case the PLL is
+        * enabled after CX of analog domain power collapse. This needs
+        * to be done before enabling the global clk.
+        */
+       dsi_pll_phy_dig_reset(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+       dsi_pll_enable_global_clk(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+       return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+       dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+       /*
+        * To avoid any stray glitches while abruptly powering down the PLL
+        * make sure to gate the clock using the clock enable bit before
+        * powering down the PLL
+        */
+       dsi_pll_disable_global_clk(pll_7nm);
+       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+       dsi_pll_disable_sub(pll_7nm);
+       if (pll_7nm->slave) {
+               dsi_pll_disable_global_clk(pll_7nm->slave);
+               dsi_pll_disable_sub(pll_7nm->slave);
+       }
+       /* flush, ensure all register writes are done */
+       wmb();
+       pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       void __iomem *base = pll_7nm->mmio;
+       u64 ref_clk = pll_7nm->vco_ref_clk_rate;
+       u64 vco_rate = 0x0;
+       u64 multiplier;
+       u32 frac;
+       u32 dec;
+       u64 pll_freq, tmp64;
+
+       dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+       dec &= 0xff;
+
+       frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+                 0xff) << 8);
+       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+                 0x3) << 16);
+
+       /*
+        * TODO:
+        *      1. Assumes prescaler is disabled
+        *      2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+        */
+       multiplier = 1 << 18;
+       pll_freq = dec * (ref_clk * 2);
+       tmp64 = (ref_clk * 2 * frac);
+       pll_freq += div_u64(tmp64, multiplier);
+
+       vco_rate = pll_freq;
+
+       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+           pll_7nm->id, (unsigned long)vco_rate, dec, frac);
+
+       return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+       .round_rate = msm_dsi_pll_helper_clk_round_rate,
+       .set_rate = dsi_pll_7nm_vco_set_rate,
+       .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+       .prepare = dsi_pll_7nm_vco_prepare,
+       .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+       u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+       cached->pll_out_div = pll_read(pll_7nm->mmio +
+                                      REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       cached->pll_out_div &= 0x3;
+
+       cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+       cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+           pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
+           cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+       u32 val;
+
+       val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       val &= ~0x3;
+       val |= cached->pll_out_div;
+       pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                 cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+       val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       val &= ~0x3;
+       val |= cached->pll_mux;
+       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       return 0;
+}
+
+static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
+                                   enum msm_dsi_phy_usecase uc)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       void __iomem *base = pll_7nm->phy_cmn_mmio;
+       u32 data = 0x0; /* internal PLL */
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       switch (uc) {
+       case MSM_DSI_PHY_STANDALONE:
+               break;
+       case MSM_DSI_PHY_MASTER:
+               pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
+               break;
+       case MSM_DSI_PHY_SLAVE:
+               data = 0x1; /* external PLL */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set PLL src */
+       pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+       pll_7nm->uc = uc;
+
+       return 0;
+}
+
+static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
+                                    struct clk **byte_clk_provider,
+                                    struct clk **pixel_clk_provider)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       if (byte_clk_provider)
+               *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+       if (pixel_clk_provider)
+               *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+       return 0;
+}
+
+static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct device *dev = &pll_7nm->pdev->dev;
+
+       DBG("DSI PLL%d", pll_7nm->id);
+       of_clk_del_provider(dev->of_node);
+
+       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+       clk_hw_unregister(&pll_7nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
+{
+       char clk_name[32], parent[32], vco_name[32];
+       char parent2[32], parent3[32], parent4[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "bi_tcxo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_7nm_vco,
+       };
+       struct device *dev = &pll_7nm->pdev->dev;
+       struct clk_hw_onecell_data *hw_data;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("DSI%d", pll_7nm->id);
+
+       hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+                              NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+                              GFP_KERNEL);
+       if (!hw_data)
+               return -ENOMEM;
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
+       pll_7nm->base.clk_hw.init = &vco_init;
+
+       ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
+
+       hw = clk_hw_register_divider(dev, clk_name,
+                                    parent, CLK_SET_RATE_PARENT,
+                                    pll_7nm->mmio +
+                                    REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_base_clk_hw;
+       }
+
+       pll_7nm->out_div_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+       /* BIT CLK: DIV_CTRL_3_0 */
+       hw = clk_hw_register_divider(dev, clk_name, parent,
+                                    CLK_SET_RATE_PARENT,
+                                    pll_7nm->phy_cmn_mmio +
+                                    REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    0, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_out_div_clk_hw;
+       }
+
+       pll_7nm->bit_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         CLK_SET_RATE_PARENT, 1, 8);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_bit_clk_hw;
+       }
+
+       pll_7nm->byte_clk_hw = hw;
+       hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 2);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_byte_clk_hw;
+       }
+
+       pll_7nm->by_2_bit_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 4);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_by_2_bit_clk_hw;
+       }
+
+       pll_7nm->post_out_div_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+
+       hw = clk_hw_register_mux(dev, clk_name,
+                                ((const char *[]){
+                                parent, parent2, parent3, parent4
+                                }), 4, 0, pll_7nm->phy_cmn_mmio +
+                                REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                                0, 2, 0, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_post_out_div_clk_hw;
+       }
+
+       pll_7nm->pclk_mux_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
+
+       /* PIX CLK DIV : DIV_CTRL_7_4*/
+       hw = clk_hw_register_divider(dev, clk_name, parent,
+                                    0, pll_7nm->phy_cmn_mmio +
+                                       REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    4, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_pclk_mux_hw;
+       }
+
+       pll_7nm->out_dsiclk_hw = hw;
+       hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+       hw_data->num = NUM_PROVIDED_CLKS;
+       pll_7nm->hw_data = hw_data;
+
+       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+                                    pll_7nm->hw_data);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+               goto err_dsiclk_hw;
+       }
+
+       return 0;
+
+err_dsiclk_hw:
+       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+err_bit_clk_hw:
+       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+err_out_div_clk_hw:
+       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+err_base_clk_hw:
+       clk_hw_unregister(&pll_7nm->base.clk_hw);
+
+       return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+       struct dsi_pll_7nm *pll_7nm;
+       struct msm_dsi_pll *pll;
+       int ret;
+
+       pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+       if (!pll_7nm)
+               return ERR_PTR(-ENOMEM);
+
+       DBG("DSI PLL%d", id);
+
+       pll_7nm->pdev = pdev;
+       pll_7nm->id = id;
+       pll_7nm_list[id] = pll_7nm;
+
+       pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+       if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+       if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       spin_lock_init(&pll_7nm->postdiv_lock);
+
+       pll = &pll_7nm->base;
+       pll->min_rate = 1000000000UL;
+       pll->max_rate = 3500000000UL;
+       if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+               pll->min_rate = 600000000UL;
+               pll->max_rate = (unsigned long)5000000000ULL;
+               /* workaround for max rate overflowing on 32-bit builds: */
+               pll->max_rate = max(pll->max_rate, 0xffffffffUL);
+       }
+       pll->get_provider = dsi_pll_7nm_get_provider;
+       pll->destroy = dsi_pll_7nm_destroy;
+       pll->save_state = dsi_pll_7nm_save_state;
+       pll->restore_state = dsi_pll_7nm_restore_state;
+       pll->set_usecase = dsi_pll_7nm_set_usecase;
+
+       pll_7nm->vco_delay = 1;
+
+       ret = pll_7nm_register(pll_7nm);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       /* TODO: Remove this when we have proper display handover support */
+       msm_dsi_pll_save_state(pll);
+
+       return pll;
+}
index 7933384..4968557 100644 (file)
@@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        if (ret)
                goto err_msm_uninit;
 
-       if (!dev->dma_parms) {
-               dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
-                                             GFP_KERNEL);
-               if (!dev->dma_parms) {
-                       ret = -ENOMEM;
-                       goto err_msm_uninit;
-               }
-       }
-       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(dev, UINT_MAX);
 
        msm_gem_shrinker_init(ddev);
 
@@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
        if (!ctx)
                return -ENOMEM;
 
+       kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
 
-       ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
+       ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
 
        return 0;
@@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
 static void context_close(struct msm_file_private *ctx)
 {
        msm_submitqueue_close(ctx);
-       kfree(ctx);
+       msm_file_private_put(ctx);
 }
 
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 }
 
 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
-               struct drm_gem_object *obj, uint64_t *iova)
+               struct drm_file *file, struct drm_gem_object *obj,
+               uint64_t *iova)
 {
-       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_file_private *ctx = file->driver_priv;
 
-       if (!priv->gpu)
+       if (!ctx->aspace)
                return -EINVAL;
 
        /*
         * Don't pin the memory here - just get an address so that userspace can
         * be productive
         */
-       return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
+       return msm_gem_get_iova(obj, ctx->aspace, iova);
 }
 
 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
@@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
                args->value = msm_gem_mmap_offset(obj);
                break;
        case MSM_INFO_GET_IOVA:
-               ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+               ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
                break;
        case MSM_INFO_SET_NAME:
                /* length check should leave room for terminating null: */
@@ -1358,6 +1352,7 @@ static int __init msm_drm_register(void)
        msm_dsi_register();
        msm_edp_register();
        msm_hdmi_register();
+       msm_dp_register();
        adreno_register();
        return platform_driver_register(&msm_platform_driver);
 }
@@ -1366,6 +1361,7 @@ static void __exit msm_drm_unregister(void)
 {
        DBG("fini");
        platform_driver_unregister(&msm_platform_driver);
+       msm_dp_unregister();
        msm_hdmi_unregister();
        adreno_unregister();
        msm_edp_unregister();
index af259b0..b9dd8f8 100644 (file)
@@ -57,6 +57,7 @@ struct msm_file_private {
        struct list_head submitqueues;
        int queueid;
        struct msm_gem_address_space *aspace;
+       struct kref ref;
 };
 
 enum msm_mdp_plane_property {
@@ -159,6 +160,8 @@ struct msm_drm_private {
        /* DSI is shared by mdp4 and mdp5 */
        struct msm_dsi *dsi[2];
 
+       struct msm_dp *dp;
+
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
        struct msm_file_private *lastctx;
@@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma);
 
+
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace);
+
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
 struct msm_gem_address_space *
@@ -302,9 +309,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj);
 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
 int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive);
-void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
-void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
+void msm_gem_active_put(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
@@ -378,6 +384,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 }
 #endif
 
+#ifdef CONFIG_DRM_MSM_DP
+int __init msm_dp_register(void);
+void __exit msm_dp_unregister(void);
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+                        struct drm_encoder *encoder);
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode);
+void msm_dp_irq_postinstall(struct msm_dp *dp_display);
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
+
+#else
+static inline int __init msm_dp_register(void)
+{
+       return -EINVAL;
+}
+static inline void __exit msm_dp_unregister(void)
+{
+}
+static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
+                                      struct drm_device *dev,
+                                      struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline int msm_dp_display_enable(struct msm_dp *dp,
+                                       struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline int msm_dp_display_disable(struct msm_dp *dp,
+                                       struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline void msm_dp_display_mode_set(struct msm_dp *dp,
+                               struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+}
+
+static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+}
+
+static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
+               struct drm_minor *minor)
+{
+}
+
+#endif
+
 void __init msm_mdp_register(void);
 void __exit msm_mdp_unregister(void);
 void __init msm_dpu_register(void);
@@ -398,8 +461,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
 #else
 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
 __printf(3, 4)
-static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
-               const char *fmt, ...) {}
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
+                       struct msm_gem_submit *submit,
+                       const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
 #endif
@@ -419,7 +483,8 @@ struct msm_gpu_submitqueue;
 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
                u32 id);
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm,
+               struct msm_file_private *ctx,
                u32 prio, u32 flags, u32 *id);
 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
                struct drm_msm_submitqueue_query *args);
@@ -428,6 +493,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
 
 void msm_submitqueue_destroy(struct kref *kref);
 
+static inline void __msm_file_private_destroy(struct kref *kref)
+{
+       struct msm_file_private *ctx = container_of(kref,
+               struct msm_file_private, ref);
+
+       msm_gem_address_space_put(ctx->aspace);
+       kfree(ctx);
+}
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+       kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+       struct msm_file_private *ctx)
+{
+       kref_get(&ctx->ref);
+       return ctx;
+}
 
 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
index b4553ca..ec60211 100644 (file)
@@ -52,26 +52,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
 {
        struct device *dev = msm_obj->base.dev->dev;
 
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
-                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-       } else {
-               dma_map_sg(dev, msm_obj->sgt->sgl,
-                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-       }
+       dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 static void sync_for_cpu(struct msm_gem_object *msm_obj)
 {
        struct device *dev = msm_obj->base.dev->dev;
 
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
-                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-       } else {
-               dma_unmap_sg(dev, msm_obj->sgt->sgl,
-                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-       }
+       dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 /* allocate pages from VRAM carveout, used when no IOMMU: */
@@ -753,31 +741,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        return 0;
 }
 
-void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
-       msm_obj->gpu = gpu;
-       if (exclusive)
-               dma_resv_add_excl_fence(obj->resv, fence);
-       else
-               dma_resv_add_shared_fence(obj->resv, fence);
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+
+       if (!atomic_fetch_inc(&msm_obj->active_count)) {
+               msm_obj->gpu = gpu;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+       }
 }
 
-void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+void msm_gem_active_put(struct drm_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_drm_private *priv = obj->dev->dev_private;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
-       msm_obj->gpu = NULL;
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       if (!atomic_dec_return(&msm_obj->active_count)) {
+               msm_obj->gpu = NULL;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       }
 }
 
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -852,11 +840,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
                seq_puts(m, "      vmas:");
 
-               list_for_each_entry(vma, &msm_obj->vmas, list)
-                       seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
-                               vma->aspace != NULL ? vma->aspace->name : NULL,
-                               vma->iova, vma->mapped ? "mapped" : "unmapped",
+               list_for_each_entry(vma, &msm_obj->vmas, list) {
+                       const char *name, *comm;
+                       if (vma->aspace) {
+                               struct msm_gem_address_space *aspace = vma->aspace;
+                               struct task_struct *task =
+                                       get_pid_task(aspace->pid, PIDTYPE_PID);
+                               if (task) {
+                                       comm = kstrdup(task->comm, GFP_KERNEL);
+                               } else {
+                                       comm = NULL;
+                               }
+                               name = aspace->name;
+                       } else {
+                               name = comm = NULL;
+                       }
+                       seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+                               name, comm ? ":" : "", comm ? comm : "",
+                               vma->aspace, vma->iova,
+                               vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
+                       kfree(comm);
+               }
 
                seq_puts(m, "\n");
        }
index 972490b..a1bf741 100644 (file)
@@ -24,6 +24,11 @@ struct msm_gem_address_space {
        spinlock_t lock; /* Protects drm_mm node allocation/removal */
        struct msm_mmu *mmu;
        struct kref kref;
+
+       /* For address spaces associated with a specific process, this
+        * will be non-NULL:
+        */
+       struct pid *pid;
 };
 
 struct msm_gem_vma {
@@ -83,12 +88,14 @@ struct msm_gem_object {
        struct mutex lock; /* Protects resources associated with bo */
 
        char name[32]; /* Identifier to print for the debugfs files */
+
+       atomic_t active_count;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
 static inline bool is_active(struct msm_gem_object *msm_obj)
 {
-       return msm_obj->gpu != NULL;
+       return atomic_read(&msm_obj->active_count);
 }
 
 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -142,6 +149,7 @@ struct msm_gem_submit {
        bool valid;         /* true if no cmdstream patching needed */
        bool in_rb;         /* "sudo" mode, copy cmds into RB */
        struct msm_ringbuffer *ring;
+       struct msm_file_private *ctx;
        unsigned int nr_cmds;
        unsigned int nr_bos;
        u32 ident;         /* A "identifier" for the submit for logging */
index 722d616..482576d 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 
 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 {
@@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                mutex_unlock(&dev->struct_mutex);
 
        if (freed > 0)
-               pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+               trace_msm_gem_purge(freed << PAGE_SHIFT);
 
        return freed;
 }
@@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
        *(unsigned long *)ptr += unmapped;
 
        if (unmapped > 0)
-               pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+               trace_msm_gem_purge_vmaps(unmapped);
 
        return NOTIFY_DONE;
 }
index 8cb9aa1..aa5c60a 100644 (file)
@@ -27,7 +27,7 @@
 #define BO_PINNED   0x2000
 
 static struct msm_gem_submit *submit_create(struct drm_device *dev,
-               struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
+               struct msm_gpu *gpu,
                struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
                uint32_t nr_cmds)
 {
@@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
                return NULL;
 
        submit->dev = dev;
-       submit->aspace = aspace;
+       submit->aspace = queue->ctx->aspace;
        submit->gpu = gpu;
        submit->fence = NULL;
        submit->cmd = (void *)&submit->bos[nr_bos];
@@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                }
        }
 
-       submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
+       submit = submit_create(dev, gpu, queue, args->nr_bos,
                args->nr_cmds);
        if (!submit) {
                ret = -ENOMEM;
@@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                }
        }
 
-       msm_gpu_submit(gpu, submit, ctx);
+       msm_gpu_submit(gpu, submit);
 
        args->fence = submit->fence->seqno;
 
index 5f6a112..f914ddb 100644 (file)
@@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref)
        drm_mm_takedown(&aspace->mm);
        if (aspace->mmu)
                aspace->mmu->funcs->destroy(aspace->mmu);
+       put_pid(aspace->pid);
        kfree(aspace);
 }
 
@@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
                kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+{
+       if (!IS_ERR_OR_NULL(aspace))
+               kref_get(&aspace->kref);
+
+       return aspace;
+}
+
 /* Actually unmap memory for the vma */
 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma)
@@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
                                size, prot);
 
-       if (ret)
+       if (ret) {
                vma->mapped = false;
+               vma->inuse--;
+       }
 
        return ret;
 }
index 57ddc94..55d1648 100644 (file)
@@ -24,7 +24,7 @@
 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
                u32 flags)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
        struct dev_pm_opp *opp;
 
        opp = devfreq_recommended_opp(dev, freq, flags);
@@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
        if (IS_ERR(opp))
                return PTR_ERR(opp);
 
+       trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+
        if (gpu->funcs->gpu_set_freq)
                gpu->funcs->gpu_set_freq(gpu, opp);
        else
@@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
 static int msm_devfreq_get_dev_status(struct device *dev,
                struct devfreq_dev_status *status)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
        ktime_t time;
 
        if (gpu->funcs->gpu_get_freq)
@@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev,
 
 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        if (gpu->funcs->gpu_get_freq)
                *freq = gpu->funcs->gpu_get_freq(gpu);
@@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
        int ret;
 
        DBG("%s", gpu->name);
+       trace_msm_gpu_resume(0);
 
        ret = enable_pwrrail(gpu);
        if (ret)
@@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
        int ret;
 
        DBG("%s", gpu->name);
+       trace_msm_gpu_suspend(0);
 
        devfreq_suspend_device(gpu->devfreq.devfreq);
 
@@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work)
                        struct msm_ringbuffer *ring = gpu->rb[i];
 
                        list_for_each_entry(submit, &ring->submits, node)
-                               gpu->funcs->submit(gpu, submit, NULL);
+                               gpu->funcs->submit(gpu, submit);
                }
        }
 
@@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
-               /* move to inactive: */
-               msm_gem_move_to_inactive(&msm_obj->base);
+
+               msm_gem_active_put(&msm_obj->base);
                msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
                drm_gem_object_put_locked(&msm_obj->base);
        }
@@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
@@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
+               struct drm_gem_object *drm_obj = &msm_obj->base;
                uint64_t iova;
 
                /* can't happen yet.. but when we add 2d support we'll have
@@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
-                       msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+                       dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
                else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
-                       msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+                       dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
+
+               msm_gem_active_get(drm_obj, gpu);
        }
 
-       gpu->funcs->submit(gpu, submit, ctx);
-       priv->lastctx = ctx;
+       gpu->funcs->submit(gpu, submit);
+       priv->lastctx = submit->queue->ctx;
 
        hangcheck_timer_reset(gpu);
 }
@@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
        return 0;
 }
 
+/* Return a new address space for a msm_drm_private instance */
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+{
+       struct msm_gem_address_space *aspace = NULL;
+       if (!gpu)
+               return NULL;
+
+       /*
+        * If the target doesn't support private address spaces then return
+        * the global one
+        */
+       if (gpu->funcs->create_private_address_space) {
+               aspace = gpu->funcs->create_private_address_space(gpu);
+               if (!IS_ERR(aspace))
+                       aspace->pid = get_pid(task_pid(task));
+       }
+
+       if (IS_ERR_OR_NULL(aspace))
+               aspace = msm_gem_address_space_get(gpu->aspace);
+
+       return aspace;
+}
+
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
                const char *name, struct msm_gpu_config *config)
@@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                gpu->gpu_cx = NULL;
 
        gpu->pdev = pdev;
-       platform_set_drvdata(pdev, gpu);
+       platform_set_drvdata(pdev, &gpu->adreno_smmu);
 
        msm_devfreq_init(gpu);
 
index 37cffac..6c9e1fd 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef __MSM_GPU_H__
 #define __MSM_GPU_H__
 
+#include <linux/adreno-smmu-priv.h>
 #include <linux/clk.h>
 #include <linux/interconnect.h>
 #include <linux/pm_opp.h>
@@ -45,8 +46,7 @@ struct msm_gpu_funcs {
        int (*hw_init)(struct msm_gpu *gpu);
        int (*pm_suspend)(struct msm_gpu *gpu);
        int (*pm_resume)(struct msm_gpu *gpu);
-       void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-                       struct msm_file_private *ctx);
+       void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
        void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
        irqreturn_t (*irq)(struct msm_gpu *irq);
        struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
@@ -66,6 +66,9 @@ struct msm_gpu_funcs {
        void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
        struct msm_gem_address_space *(*create_address_space)
                (struct msm_gpu *gpu, struct platform_device *pdev);
+       struct msm_gem_address_space *(*create_private_address_space)
+               (struct msm_gpu *gpu);
+       uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 };
 
 struct msm_gpu {
@@ -74,6 +77,8 @@ struct msm_gpu {
        struct platform_device *pdev;
        const struct msm_gpu_funcs *funcs;
 
+       struct adreno_smmu_priv adreno_smmu;
+
        /* performance counters (hw & sw): */
        spinlock_t perf_lock;
        bool perfcntr_active;
@@ -144,6 +149,12 @@ struct msm_gpu {
        bool hw_apriv;
 };
 
+static inline struct msm_gpu *dev_to_gpu(struct device *dev)
+{
+       struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+       return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
+}
+
 /* It turns out that all targets use the same ringbuffer size */
 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
@@ -184,6 +195,7 @@ struct msm_gpu_submitqueue {
        u32 flags;
        u32 prio;
        int faults;
+       struct msm_file_private *ctx;
        struct list_head node;
        struct kref ref;
 };
@@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
                const char *name, struct msm_gpu_config *config);
 
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+
 void msm_gpu_cleanup(struct msm_gpu *gpu);
 
 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
index 122b847..03e0c25 100644 (file)
@@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired,
                    __entry->start_ticks, __entry->end_ticks)
 );
 
+
+TRACE_EVENT(msm_gpu_freq_change,
+               TP_PROTO(u32 freq),
+               TP_ARGS(freq),
+               TP_STRUCT__entry(
+                       __field(u32, freq)
+                       ),
+               TP_fast_assign(
+                       /* trace freq in MHz to match intel_gpu_freq_change, to make life easier
+                        * for userspace
+                        */
+                       __entry->freq = DIV_ROUND_UP(freq, 1000000);
+                       ),
+               TP_printk("new_freq=%u", __entry->freq)
+);
+
+
+TRACE_EVENT(msm_gmu_freq_change,
+               TP_PROTO(u32 freq, u32 perf_index),
+               TP_ARGS(freq, perf_index),
+               TP_STRUCT__entry(
+                       __field(u32, freq)
+                       __field(u32, perf_index)
+                       ),
+               TP_fast_assign(
+                       __entry->freq = freq;
+                       __entry->perf_index = perf_index;
+                       ),
+               TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index)
+);
+
+
+TRACE_EVENT(msm_gem_purge,
+               TP_PROTO(u32 bytes),
+               TP_ARGS(bytes),
+               TP_STRUCT__entry(
+                       __field(u32, bytes)
+                       ),
+               TP_fast_assign(
+                       __entry->bytes = bytes;
+                       ),
+               TP_printk("Purging %u bytes", __entry->bytes)
+);
+
+
+TRACE_EVENT(msm_gem_purge_vmaps,
+               TP_PROTO(u32 unmapped),
+               TP_ARGS(unmapped),
+               TP_STRUCT__entry(
+                       __field(u32, unmapped)
+                       ),
+               TP_fast_assign(
+                       __entry->unmapped = unmapped;
+                       ),
+               TP_printk("Purging %u vmaps", __entry->unmapped)
+);
+
+
+TRACE_EVENT(msm_gpu_suspend,
+               TP_PROTO(int dummy),
+               TP_ARGS(dummy),
+               TP_STRUCT__entry(
+                       __field(u32, dummy)
+                       ),
+               TP_fast_assign(
+                       __entry->dummy = dummy;
+                       ),
+               TP_printk("%u", __entry->dummy)
+);
+
+
+TRACE_EVENT(msm_gpu_resume,
+               TP_PROTO(int dummy),
+               TP_ARGS(dummy),
+               TP_STRUCT__entry(
+                       __field(u32, dummy)
+                       ),
+               TP_fast_assign(
+                       __entry->dummy = dummy;
+                       ),
+               TP_printk("%u", __entry->dummy)
+);
+
 #endif
 
 #undef TRACE_INCLUDE_PATH
index 310a31b..3794961 100644 (file)
@@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
 {
        struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
        unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
-       struct scatterlist *sg;
+       struct sg_dma_page_iter dma_iter;
        unsigned prot_bits = 0;
-       unsigned i, j;
 
        if (prot & IOMMU_WRITE)
                prot_bits |= 1;
        if (prot & IOMMU_READ)
                prot_bits |= 2;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-               dma_addr_t addr = sg->dma_address;
-               for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
-                       gpummu->table[idx] = addr | prot_bits;
-                       addr += GPUMMU_PAGE_SIZE;
-               }
+       for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+               dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
+               int i;
+
+               for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
+                       gpummu->table[idx++] = (addr + i) | prot_bits;
        }
 
        /* we can improve by deferring flush for multiple map() */
@@ -102,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
        }
 
        gpummu->gpu = gpu;
-       msm_mmu_init(&gpummu->base, dev, &funcs);
+       msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
 
        return &gpummu->base;
 }
index 3a381a9..3a83ffd 100644 (file)
  * Author: Rob Clark <robdclark@gmail.com>
  */
 
+#include <linux/adreno-smmu-priv.h>
+#include <linux/io-pgtable.h>
 #include "msm_drv.h"
 #include "msm_mmu.h"
 
 struct msm_iommu {
        struct msm_mmu base;
        struct iommu_domain *domain;
+       atomic_t pagetables;
 };
+
 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
 
+struct msm_iommu_pagetable {
+       struct msm_mmu base;
+       struct msm_mmu *parent;
+       struct io_pgtable_ops *pgtbl_ops;
+       phys_addr_t ttbr;
+       u32 asid;
+};
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+{
+       return container_of(mmu, struct msm_iommu_pagetable, base);
+}
+
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+               size_t size)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       size_t unmapped = 0;
+
+       /* Unmap the block one page at a time */
+       while (size) {
+               unmapped += ops->unmap(ops, iova, 4096, NULL);
+               iova += 4096;
+               size -= 4096;
+       }
+
+       iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
+
+       return (unmapped == size) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+               struct sg_table *sgt, size_t len, int prot)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       struct scatterlist *sg;
+       size_t mapped = 0;
+       u64 addr = iova;
+       unsigned int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t size = sg->length;
+               phys_addr_t phys = sg_phys(sg);
+
+               /* Map the block one page at a time */
+               while (size) {
+                       if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
+                               msm_iommu_pagetable_unmap(mmu, iova, mapped);
+                               return -EINVAL;
+                       }
+
+                       phys += 4096;
+                       addr += 4096;
+                       size -= 4096;
+                       mapped += 4096;
+               }
+       }
+
+       return 0;
+}
+
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+       struct adreno_smmu_priv *adreno_smmu =
+               dev_get_drvdata(pagetable->parent->dev);
+
+       /*
+        * If this is the last attached pagetable for the parent,
+        * disable TTBR0 in the arm-smmu driver
+        */
+       if (atomic_dec_return(&iommu->pagetables) == 0)
+               adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+
+       free_io_pgtable_ops(pagetable->pgtbl_ops);
+       kfree(pagetable);
+}
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+               phys_addr_t *ttbr, int *asid)
+{
+       struct msm_iommu_pagetable *pagetable;
+
+       if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+               return -EINVAL;
+
+       pagetable = to_pagetable(mmu);
+
+       if (ttbr)
+               *ttbr = pagetable->ttbr;
+
+       if (asid)
+               *asid = pagetable->asid;
+
+       return 0;
+}
+
+static const struct msm_mmu_funcs pagetable_funcs = {
+               .map = msm_iommu_pagetable_map,
+               .unmap = msm_iommu_pagetable_unmap,
+               .destroy = msm_iommu_pagetable_destroy,
+};
+
+static void msm_iommu_tlb_flush_all(void *cookie)
+{
+}
+
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+               size_t granule, void *cookie)
+{
+}
+
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+               unsigned long iova, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops null_tlb_ops = {
+       .tlb_flush_all = msm_iommu_tlb_flush_all,
+       .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+       .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
+       .tlb_add_page = msm_iommu_tlb_add_page,
+};
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+{
+       struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
+       struct msm_iommu *iommu = to_msm_iommu(parent);
+       struct msm_iommu_pagetable *pagetable;
+       const struct io_pgtable_cfg *ttbr1_cfg = NULL;
+       struct io_pgtable_cfg ttbr0_cfg;
+       int ret;
+
+       /* Get the pagetable configuration from the domain */
+       if (adreno_smmu->cookie)
+               ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+       if (!ttbr1_cfg)
+               return ERR_PTR(-ENODEV);
+
+       pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
+       if (!pagetable)
+               return ERR_PTR(-ENOMEM);
+
+       msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
+               MSM_MMU_IOMMU_PAGETABLE);
+
+       /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
+       ttbr0_cfg = *ttbr1_cfg;
+
+       /* The incoming cfg will have the TTBR1 quirk enabled */
+       ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+       ttbr0_cfg.tlb = &null_tlb_ops;
+
+       pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+               &ttbr0_cfg, iommu->domain);
+
+       if (!pagetable->pgtbl_ops) {
+               kfree(pagetable);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /*
+        * If this is the first pagetable that we've allocated, send it back to
+        * the arm-smmu driver as a trigger to set up TTBR0
+        */
+       if (atomic_inc_return(&iommu->pagetables) == 1) {
+               ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
+               if (ret) {
+                       free_io_pgtable_ops(pagetable->pgtbl_ops);
+                       kfree(pagetable);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       /* Needed later for TLB flush */
+       pagetable->parent = parent;
+       pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+
+       /*
+        * TODO we would like each set of page tables to have a unique ASID
+        * to optimize TLB invalidation.  But iommu_flush_tlb_all() will
+        * end up flushing the ASID used for TTBR1 pagetables, which is not
+        * what we want.  So for now just use the same ASID as TTBR1.
+        */
+       pagetable->asid = 0;
+
+       return &pagetable->base;
+}
+
 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
@@ -36,7 +231,11 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        size_t ret;
 
-       ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+       /* The arm-smmu driver expects the addresses to be sign extended */
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
+
+       ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
        WARN_ON(!ret);
 
        return (ret == len) ? 0 : -EINVAL;
@@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
 
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
+
        iommu_unmap(iommu->domain, iova, len);
 
        return 0;
@@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
                return ERR_PTR(-ENOMEM);
 
        iommu->domain = domain;
-       msm_mmu_init(&iommu->base, dev, &funcs);
+       msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
        iommu_set_fault_handler(domain, msm_fault_handler, iommu);
 
+       atomic_set(&iommu->pagetables, 0);
+
        ret = iommu_attach_device(iommu->domain, dev);
        if (ret) {
                kfree(iommu);
index 3a534ee..61ade89 100644 (file)
@@ -17,18 +17,26 @@ struct msm_mmu_funcs {
        void (*destroy)(struct msm_mmu *mmu);
 };
 
+enum msm_mmu_type {
+       MSM_MMU_GPUMMU,
+       MSM_MMU_IOMMU,
+       MSM_MMU_IOMMU_PAGETABLE,
+};
+
 struct msm_mmu {
        const struct msm_mmu_funcs *funcs;
        struct device *dev;
        int (*handler)(void *arg, unsigned long iova, int flags);
        void *arg;
+       enum msm_mmu_type type;
 };
 
 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
-               const struct msm_mmu_funcs *funcs)
+               const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
 {
        mmu->dev = dev;
        mmu->funcs = funcs;
+       mmu->type = type;
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
@@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
        mmu->handler = handler;
 }
 
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+
 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
                dma_addr_t *tran_error);
 
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
+               int *asid);
+
 #endif /* __MSM_MMU_H__ */
index 7764373..0987d6b 100644 (file)
@@ -31,6 +31,7 @@ struct msm_rbmemptrs {
        volatile uint32_t fence;
 
        volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
+       volatile u64 ttbr0;
 };
 
 struct msm_ringbuffer {
index a1d94be..c3d2061 100644 (file)
@@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref)
        struct msm_gpu_submitqueue *queue = container_of(kref,
                struct msm_gpu_submitqueue, ref);
 
+       msm_file_private_put(queue->ctx);
+
        kfree(queue);
 }
 
@@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
         * No lock needed in close and there won't
         * be any more user ioctls coming our way
         */
-       list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+       list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
+               list_del(&entry->node);
                msm_submitqueue_put(entry);
+       }
 }
 
 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
@@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
 
        write_lock(&ctx->queuelock);
 
+       queue->ctx = msm_file_private_get(ctx);
        queue->id = ctx->queueid++;
 
        if (id)
index 810bf69..7b640e0 100644 (file)
@@ -239,8 +239,8 @@ nv50_dp_mode_valid(struct drm_connector *connector,
                return MODE_NO_INTERLACE;
 
        max_clock = outp->dp.link_nr * outp->dp.link_bw;
-       ds_clock = drm_dp_downstream_max_clock(outp->dp.dpcd,
-                                              outp->dp.downstream_ports);
+       ds_clock = drm_dp_downstream_max_dotclock(outp->dp.dpcd,
+                                                 outp->dp.downstream_ports);
        if (ds_clock)
                max_clock = min(max_clock, ds_clock);
 
index d0d12d5..f67f223 100644 (file)
@@ -1297,10 +1297,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
                omap_obj->dma_addr = sg_dma_address(sgt->sgl);
        } else {
                /* Create pages list from sgt */
-               struct sg_page_iter iter;
                struct page **pages;
                unsigned int npages;
-               unsigned int i = 0;
+               unsigned int ret;
 
                npages = DIV_ROUND_UP(size, PAGE_SIZE);
                pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
@@ -1311,14 +1310,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
                }
 
                omap_obj->pages = pages;
-
-               for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
-                       pages[i++] = sg_page_iter_page(&iter);
-                       if (i > npages)
-                               break;
-               }
-
-               if (WARN_ON(i != npages)) {
+               ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
+                                                      npages);
+               if (ret) {
                        omap_gem_free_object(obj);
                        obj = ERR_PTR(-ENOMEM);
                        goto done;
index 33355dd..1a6cea0 100644 (file)
@@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
 
                for (i = 0; i < n_sgt; i++) {
                        if (bo->sgts[i].sgl) {
-                               dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
-                                            bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+                               dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
+                                                 DMA_BIDIRECTIONAL, 0);
                                sg_free_table(&bo->sgts[i]);
                        }
                }
index e8f7b11..776448c 100644 (file)
@@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
        struct io_pgtable_ops *ops = mmu->pgtbl_ops;
        u64 start_iova = iova;
 
-       for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
+       for_each_sgtable_dma_sg(sgt, sgl, count) {
                unsigned long paddr = sg_dma_address(sgl);
                size_t len = sg_dma_len(sgl);
 
@@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        if (ret)
                goto err_pages;
 
-       if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
-               ret = -EINVAL;
+       ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret)
                goto err_map;
-       }
 
        mmu_map_sg(pfdev, bomapping->mmu, addr,
                   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
index 7b69d6d..e0ae911 100644 (file)
@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
 
        /* get matching reference and feedback divider */
        *ref_div = min(max(den/post_div, 1u), ref_div_max);
-       *fb_div = max(nom * *ref_div * post_div / den, 1u);
+       *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
 
        /* limit fb divider to its maximum */
        if (*fb_div > fb_div_max) {
index 8c5d6fd..05c4196 100644 (file)
@@ -712,6 +712,31 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_
 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
 
+static ssize_t radeon_hwmon_show_sclk(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
+       u32 sclk = 0;
+
+       /* Can't get clock frequency when the card is off */
+       if ((rdev->flags & RADEON_IS_PX) &&
+           (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
+       if (rdev->asic->dpm.get_current_sclk)
+               sclk = radeon_dpm_get_current_sclk(rdev);
+
+       /* Value returned by dpm is in 10 KHz units, need to convert it into Hz 
+          for hwmon */
+       sclk *= 10000;
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", sclk);
+}
+
+static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL,
+                         0);
+
 
 static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -721,6 +746,7 @@ static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_pwm1_enable.dev_attr.attr,
        &sensor_dev_attr_pwm1_min.dev_attr.attr,
        &sensor_dev_attr_pwm1_max.dev_attr.attr,
+       &sensor_dev_attr_freq1_input.dev_attr.attr,
        NULL
 };
 
@@ -738,7 +764,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
-            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+            attr == &sensor_dev_attr_freq1_input.dev_attr.attr))
                return 0;
 
        /* Skip fan attributes if fan is not present */
index ebad27c..27b14ef 100644 (file)
@@ -188,7 +188,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
            vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
                return NULL;
 
-       /* we definately need to flush */
+       /* we definitely need to flush */
        vm_id->pd_gpu_addr = ~0ll;
 
        /* skip over VMID 0, since it is the system VM */
index 8007211..58557c2 100644 (file)
@@ -117,7 +117,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
        if (r)
                return r;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = (rdev->uvd.gpu_addr >> 3) + 16;
        size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
@@ -360,7 +360,7 @@ int uvd_v1_0_start(struct radeon_device *rdev)
        /* Set the write pointer delay */
        WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
 
-       /* programm the 4GB memory segment for rptr and ring buffer */
+       /* program the 4GB memory segment for rptr and ring buffer */
        WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
                                   (0x7 << 16) | (0x1 << 31));
 
index 23b18ed..6266167 100644 (file)
@@ -109,7 +109,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
        if (r)
                return r;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = rdev->uvd.gpu_addr >> 3;
        size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
index dc54fa4..f9e97fa 100644 (file)
@@ -40,7 +40,7 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
        uint64_t addr;
        uint32_t size;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
 
        /* skip over the header of the new firmware format */
        if (rdev->uvd.fw_header_present)
index f65d148..b47e744 100644 (file)
@@ -22,11 +22,11 @@ config DRM_RCAR_CMM
          Enable support for R-Car Color Management Module (CMM).
 
 config DRM_RCAR_DW_HDMI
-       tristate "R-Car DU Gen3 HDMI Encoder Support"
+       tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
        depends on DRM && OF
        select DRM_DW_HDMI
        help
-         Enable support for R-Car Gen3 internal HDMI encoder.
+         Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
 
 config DRM_RCAR_LVDS
        tristate "R-Car DU LVDS Encoder Support"
@@ -49,3 +49,4 @@ config DRM_RCAR_VSP
 config DRM_RCAR_WRITEBACK
        bool
        default y if ARM64
+       depends on DRM_RCAR_DU
index f53b0ec..447be99 100644 (file)
@@ -186,6 +186,35 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
        .lvds_clk_mask =  BIT(1) | BIT(0),
 };
 
+static const struct rcar_du_device_info rcar_du_r8a774e1_info = {
+       .gen = 3,
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+                 | RCAR_DU_FEATURE_VSP1_SOURCE
+                 | RCAR_DU_FEATURE_INTERLACED
+                 | RCAR_DU_FEATURE_TVM_SYNC,
+       .channels_mask = BIT(3) | BIT(1) | BIT(0),
+       .routes = {
+               /*
+                * R8A774E1 has one RGB output, one LVDS output and one HDMI
+                * output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(2),
+                       .port = 0,
+               },
+               [RCAR_DU_OUTPUT_HDMI0] = {
+                       .possible_crtcs = BIT(1),
+                       .port = 1,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0),
+                       .port = 2,
+               },
+       },
+       .num_lvds = 1,
+       .dpll_mask =  BIT(1),
+};
+
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
        .gen = 1,
        .features = RCAR_DU_FEATURE_INTERLACED
@@ -216,8 +245,9 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
        .channels_mask = BIT(2) | BIT(1) | BIT(0),
        .routes = {
                /*
-                * R8A7790 has one RGB output, two LVDS outputs and one
-                * (currently unsupported) TCON output.
+                * R8A7742 and R8A7790 each have one RGB output and two LVDS
+                * outputs. Additionally R8A7790 supports one TCON output
+                * (currently unsupported by the driver).
                 */
                [RCAR_DU_OUTPUT_DPAD0] = {
                        .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
@@ -443,6 +473,7 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
 };
 
 static const struct of_device_id rcar_du_of_table[] = {
+       { .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
        { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
        { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
        { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
@@ -450,6 +481,7 @@ static const struct of_device_id rcar_du_of_table[] = {
        { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
        { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
        { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
+       { .compatible = "renesas,du-r8a774e1", .data = &rcar_du_r8a774e1_info },
        { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
        { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
        { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -458,6 +490,7 @@ static const struct of_device_id rcar_du_of_table[] = {
        { .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
        { .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
        { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
+       { .compatible = "renesas,du-r8a77961", .data = &rcar_du_r8a7796_info },
        { .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
        { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
        { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
index 4823291..72dda44 100644 (file)
@@ -40,6 +40,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_RGB565,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
                .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -47,6 +48,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_ARGB555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
                .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -61,6 +63,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_XBGR32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
                .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
                .edf = PnDDCR4_EDF_RGB888,
        }, {
@@ -68,6 +71,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_ABGR32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
                .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
                .edf = PnDDCR4_EDF_ARGB8888,
        }, {
@@ -75,6 +79,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_UYVY,
                .bpp = 16,
                .planes = 1,
+               .hsub = 2,
                .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -82,6 +87,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_YUYV,
                .bpp = 16,
                .planes = 1,
+               .hsub = 2,
                .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -89,6 +95,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_NV12M,
                .bpp = 12,
                .planes = 2,
+               .hsub = 2,
                .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -96,6 +103,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_NV21M,
                .bpp = 12,
                .planes = 2,
+               .hsub = 2,
                .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
                .edf = PnDDCR4_EDF_NONE,
        }, {
@@ -103,6 +111,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_NV16M,
                .bpp = 16,
                .planes = 2,
+               .hsub = 2,
                .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
                .edf = PnDDCR4_EDF_NONE,
        },
@@ -115,156 +124,187 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
                .v4l2 = V4L2_PIX_FMT_RGB332,
                .bpp = 8,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_ARGB4444,
                .v4l2 = V4L2_PIX_FMT_ARGB444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_XRGB4444,
                .v4l2 = V4L2_PIX_FMT_XRGB444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBA4444,
                .v4l2 = V4L2_PIX_FMT_RGBA444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBX4444,
                .v4l2 = V4L2_PIX_FMT_RGBX444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_ABGR4444,
                .v4l2 = V4L2_PIX_FMT_ABGR444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_XBGR4444,
                .v4l2 = V4L2_PIX_FMT_XBGR444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRA4444,
                .v4l2 = V4L2_PIX_FMT_BGRA444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRX4444,
                .v4l2 = V4L2_PIX_FMT_BGRX444,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBA5551,
                .v4l2 = V4L2_PIX_FMT_RGBA555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBX5551,
                .v4l2 = V4L2_PIX_FMT_RGBX555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_ABGR1555,
                .v4l2 = V4L2_PIX_FMT_ABGR555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_XBGR1555,
                .v4l2 = V4L2_PIX_FMT_XBGR555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRA5551,
                .v4l2 = V4L2_PIX_FMT_BGRA555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRX5551,
                .v4l2 = V4L2_PIX_FMT_BGRX555,
                .bpp = 16,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGR888,
                .v4l2 = V4L2_PIX_FMT_RGB24,
                .bpp = 24,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGB888,
                .v4l2 = V4L2_PIX_FMT_BGR24,
                .bpp = 24,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBA8888,
                .v4l2 = V4L2_PIX_FMT_BGRA32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_RGBX8888,
                .v4l2 = V4L2_PIX_FMT_BGRX32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_ABGR8888,
                .v4l2 = V4L2_PIX_FMT_RGBA32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_XBGR8888,
                .v4l2 = V4L2_PIX_FMT_RGBX32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRA8888,
                .v4l2 = V4L2_PIX_FMT_ARGB32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_BGRX8888,
                .v4l2 = V4L2_PIX_FMT_XRGB32,
                .bpp = 32,
                .planes = 1,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_YVYU,
                .v4l2 = V4L2_PIX_FMT_YVYU,
                .bpp = 16,
                .planes = 1,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_NV61,
                .v4l2 = V4L2_PIX_FMT_NV61M,
                .bpp = 16,
                .planes = 2,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_YUV420,
                .v4l2 = V4L2_PIX_FMT_YUV420M,
                .bpp = 12,
                .planes = 3,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_YVU420,
                .v4l2 = V4L2_PIX_FMT_YVU420M,
                .bpp = 12,
                .planes = 3,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_YUV422,
                .v4l2 = V4L2_PIX_FMT_YUV422M,
                .bpp = 16,
                .planes = 3,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_YVU422,
                .v4l2 = V4L2_PIX_FMT_YVU422M,
                .bpp = 16,
                .planes = 3,
+               .hsub = 2,
        }, {
                .fourcc = DRM_FORMAT_YUV444,
                .v4l2 = V4L2_PIX_FMT_YUV444M,
                .bpp = 24,
                .planes = 3,
+               .hsub = 1,
        }, {
                .fourcc = DRM_FORMAT_YVU444,
                .v4l2 = V4L2_PIX_FMT_YVU444M,
                .bpp = 24,
                .planes = 3,
+               .hsub = 1,
        },
 };
 
@@ -311,6 +351,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 {
        struct rcar_du_device *rcdu = dev->dev_private;
        const struct rcar_du_format_info *format;
+       unsigned int chroma_pitch;
        unsigned int max_pitch;
        unsigned int align;
        unsigned int i;
@@ -353,10 +394,19 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-EINVAL);
        }
 
+       /*
+        * Calculate the chroma plane(s) pitch using the horizontal subsampling
+        * factor. For semi-planar formats, the U and V planes are combined, the
+        * pitch must thus be doubled.
+        */
+       chroma_pitch = mode_cmd->pitches[0] / format->hsub;
+       if (format->planes == 2)
+               chroma_pitch *= 2;
+
        for (i = 1; i < format->planes; ++i) {
-               if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) {
+               if (mode_cmd->pitches[i] != chroma_pitch) {
                        dev_dbg(dev->dev,
-                               "luma and chroma pitches do not match\n");
+                               "luma and chroma pitches are not compatible\n");
                        return ERR_PTR(-EINVAL);
                }
        }
index 0346504..8f5fff1 100644 (file)
@@ -22,6 +22,7 @@ struct rcar_du_format_info {
        u32 v4l2;
        unsigned int bpp;
        unsigned int planes;
+       unsigned int hsub;
        unsigned int pnmr;
        unsigned int edf;
 };
index f1a81c9..f6a69aa 100644 (file)
@@ -13,6 +13,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -197,9 +198,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
                        goto fail;
 
                ret = vsp1_du_map_sg(vsp->vsp, sgt);
-               if (!ret) {
+               if (ret) {
                        sg_free_table(sgt);
-                       ret = -ENOMEM;
                        goto fail;
                }
        }
@@ -279,7 +279,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
 
        if (plane->state->visible)
                rcar_du_vsp_plane_setup(rplane);
-       else
+       else if (old_state->crtc)
                vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
                                      rplane->index, NULL);
 }
@@ -341,6 +341,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
        .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
 };
 
+static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
+{
+       struct rcar_du_vsp *vsp = res;
+
+       put_device(vsp->vsp);
+}
+
 int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
                     unsigned int crtcs)
 {
@@ -357,6 +364,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 
        vsp->vsp = &pdev->dev;
 
+       ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+       if (ret < 0)
+               return ret;
+
        ret = vsp1_du_init(vsp->vsp);
        if (ret < 0)
                return ret;
index bced729..70dbbe4 100644 (file)
@@ -978,11 +978,13 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
 };
 
 static const struct of_device_id rcar_lvds_of_table[] = {
+       { .compatible = "renesas,r8a7742-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
+       { .compatible = "renesas,r8a774e1-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
index 0055d86..62e5d09 100644 (file)
@@ -36,8 +36,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
 
        rk_obj->dma_addr = rk_obj->mm.start;
 
-       ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
-                          rk_obj->sgt->nents, prot);
+       ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+                               prot);
        if (ret < rk_obj->base.size) {
                DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
                          ret, rk_obj->base.size);
@@ -99,11 +99,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
         * TODO: Replace this by drm_clflush_sg() once it can be implemented
         * without relying on symbols that are not exported.
         */
-       for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
+       for_each_sgtable_sg(rk_obj->sgt, s, i)
                sg_dma_address(s) = sg_phys(s);
 
-       dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
-                              DMA_TO_DEVICE);
+       dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
 
        return 0;
 
@@ -351,8 +350,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
                if (private->domain) {
                        rockchip_gem_iommu_unmap(rk_obj);
                } else {
-                       dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
-                                    rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
+                       dma_unmap_sgtable(drm->dev, rk_obj->sgt,
+                                         DMA_BIDIRECTIONAL, 0);
                }
                drm_prime_gem_destroy(obj, rk_obj->sgt);
        } else {
@@ -461,23 +460,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
        return sgt;
 }
 
-static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
-                                                    int count)
-{
-       struct scatterlist *s;
-       dma_addr_t expected = sg_dma_address(sgt->sgl);
-       unsigned int i;
-       unsigned long size = 0;
-
-       for_each_sg(sgt->sgl, s, count, i) {
-               if (sg_dma_address(s) != expected)
-                       break;
-               expected = sg_dma_address(s) + sg_dma_len(s);
-               size += sg_dma_len(s);
-       }
-       return size;
-}
-
 static int
 rockchip_gem_iommu_map_sg(struct drm_device *drm,
                          struct dma_buf_attachment *attach,
@@ -494,15 +476,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm,
                        struct sg_table *sg,
                        struct rockchip_gem_object *rk_obj)
 {
-       int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
-                              DMA_BIDIRECTIONAL);
-       if (!count)
-               return -EINVAL;
+       int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
+       if (err)
+               return err;
 
-       if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
+       if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
                DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
-               dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
-                            DMA_BIDIRECTIONAL);
+               dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
                return -EINVAL;
        }
 
index 8b45c3a..69de2c7 100644 (file)
@@ -101,7 +101,7 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
 /**
  * drm_sched_fence_release_scheduled - callback that fence can be freed
  *
- * @fence: fence
+ * @f: fence
  *
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
index bd990d1..1d696ec 100644 (file)
@@ -5,6 +5,8 @@
 
 #define PREFIX_STR "[drm_dp_mst_helper]"
 
+#include <linux/random.h>
+
 #include <drm/drm_dp_mst_helper.h>
 #include <drm/drm_print.h>
 
@@ -237,6 +239,21 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused)
        in.u.i2c_write.bytes = data;
        DO_TEST();
 
+       in.req_type = DP_QUERY_STREAM_ENC_STATUS;
+       in.u.enc_status.stream_id = 1;
+       DO_TEST();
+       get_random_bytes(in.u.enc_status.client_id,
+                        sizeof(in.u.enc_status.client_id));
+       DO_TEST();
+       in.u.enc_status.stream_event = 3;
+       DO_TEST();
+       in.u.enc_status.valid_stream_event = 0;
+       DO_TEST();
+       in.u.enc_status.stream_behavior = 3;
+       DO_TEST();
+       in.u.enc_status.valid_stream_behavior = 1;
+       DO_TEST();
+
 #undef DO_TEST
        return 0;
 }
index b254432..f38de08 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/gpio/consumer.h>
 
 #include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
@@ -116,6 +117,7 @@ struct tegra_output {
        struct device_node *of_node;
        struct device *dev;
 
+       struct drm_bridge *bridge;
        struct drm_panel *panel;
        struct i2c_adapter *ddc;
        const struct edid *edid;
index 47e2935..a2bac20 100644 (file)
@@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
                 * the SG table needs to be copied to avoid overwriting any
                 * other potential users of the original SG table.
                 */
-               err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
-                                            GFP_KERNEL);
+               err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
+                                            obj->sgt->orig_nents, GFP_KERNEL);
                if (err < 0)
                        goto free;
        } else {
@@ -196,8 +196,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 
        bo->iova = bo->mm->start;
 
-       bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
-                               bo->sgt->nents, prot);
+       bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
        if (!bo->size) {
                dev_err(tegra->drm->dev, "failed to map buffer\n");
                err = -ENOMEM;
@@ -264,8 +263,7 @@ free:
 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
 {
        if (bo->pages) {
-               dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
-                            DMA_FROM_DEVICE);
+               dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
                sg_free_table(bo->sgt);
                kfree(bo->sgt);
@@ -290,12 +288,9 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
                goto put_pages;
        }
 
-       err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
-                        DMA_FROM_DEVICE);
-       if (err == 0) {
-               err = -EFAULT;
+       err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
+       if (err)
                goto free_sgt;
-       }
 
        return 0;
 
@@ -571,7 +566,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
                        goto free;
        }
 
-       if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+       if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                goto free;
 
        return sgt;
@@ -590,7 +585,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
        struct tegra_bo *bo = to_tegra_bo(gem);
 
        if (bo->pages)
-               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+               dma_unmap_sgtable(attach->dev, sgt, dir, 0);
 
        sg_free_table(sgt);
        kfree(sgt);
@@ -609,8 +604,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
        struct drm_device *drm = gem->dev;
 
        if (bo->pages)
-               dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
-                                   DMA_FROM_DEVICE);
+               dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
 
        return 0;
 }
@@ -623,8 +617,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
        struct drm_device *drm = gem->dev;
 
        if (bo->pages)
-               dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
-                                      DMA_TO_DEVICE);
+               dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
 
        return 0;
 }
index a3adb9e..5a4fd0d 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_simple_kms_helper.h>
 
@@ -99,27 +100,38 @@ int tegra_output_probe(struct tegra_output *output)
        if (!output->of_node)
                output->of_node = output->dev->of_node;
 
+       err = drm_of_find_panel_or_bridge(output->of_node, -1, -1,
+                                         &output->panel, &output->bridge);
+       if (err && err != -ENODEV)
+               return err;
+
        panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
        if (panel) {
+               /*
+                * Don't mix nvidia,panel phandle with the graph in a
+                * device-tree.
+                */
+               WARN_ON(output->panel || output->bridge);
+
                output->panel = of_drm_find_panel(panel);
+               of_node_put(panel);
+
                if (IS_ERR(output->panel))
                        return PTR_ERR(output->panel);
-
-               of_node_put(panel);
        }
 
        output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
 
        ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
        if (ddc) {
-               output->ddc = of_find_i2c_adapter_by_node(ddc);
+               output->ddc = of_get_i2c_adapter_by_node(ddc);
+               of_node_put(ddc);
+
                if (!output->ddc) {
                        err = -EPROBE_DEFER;
                        of_node_put(ddc);
                        return err;
                }
-
-               of_node_put(ddc);
        }
 
        output->hpd_gpio = devm_gpiod_get_from_of_node(output->dev,
@@ -173,7 +185,7 @@ void tegra_output_remove(struct tegra_output *output)
                free_irq(output->hpd_irq, output);
 
        if (output->ddc)
-               put_device(&output->ddc->dev);
+               i2c_put_adapter(output->ddc);
 }
 
 int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
index 4cd0461..539d149 100644 (file)
@@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
                }
 
                if (sgt) {
-                       err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
-                                        DMA_TO_DEVICE);
-                       if (err == 0) {
-                               err = -ENOMEM;
+                       err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
+                       if (err)
                                goto unpin;
-                       }
 
                        /*
                         * The display controller needs contiguous memory, so
@@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
                         * map its SG table to a single contiguous chunk of
                         * I/O virtual memory.
                         */
-                       if (err > 1) {
+                       if (sgt->nents > 1) {
                                err = -EINVAL;
                                goto unpin;
                        }
@@ -166,8 +163,7 @@ unpin:
                struct sg_table *sgt = state->sgt[i];
 
                if (sgt)
-                       dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
-                                    DMA_TO_DEVICE);
+                       dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
 
                host1x_bo_unpin(dc->dev, &bo->base, sgt);
                state->iova[i] = DMA_MAPPING_ERROR;
@@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
                struct sg_table *sgt = state->sgt[i];
 
                if (sgt)
-                       dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
-                                    DMA_TO_DEVICE);
+                       dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
 
                host1x_bo_unpin(dc->dev, &bo->base, sgt);
                state->iova[i] = DMA_MAPPING_ERROR;
index 0562a7e..4142a56 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/clk.h>
 
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_simple_kms_helper.h>
 
 #include "drm.h"
@@ -85,45 +85,13 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
                tegra_dc_writel(dc, table[i].value, table[i].offset);
 }
 
-static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
-       .reset = drm_atomic_helper_connector_reset,
-       .detect = tegra_output_connector_detect,
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .destroy = tegra_output_connector_destroy,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static enum drm_mode_status
-tegra_rgb_connector_mode_valid(struct drm_connector *connector,
-                              struct drm_display_mode *mode)
-{
-       /*
-        * FIXME: For now, always assume that the mode is okay. There are
-        * unresolved issues with clk_round_rate(), which doesn't always
-        * reliably report whether a frequency can be set or not.
-        */
-       return MODE_OK;
-}
-
-static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
-       .get_modes = tegra_output_connector_get_modes,
-       .mode_valid = tegra_rgb_connector_mode_valid,
-};
-
 static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
        struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_rgb *rgb = to_rgb(output);
 
-       if (output->panel)
-               drm_panel_disable(output->panel);
-
        tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
        tegra_dc_commit(rgb->dc);
-
-       if (output->panel)
-               drm_panel_unprepare(output->panel);
 }
 
 static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
@@ -132,9 +100,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
        struct tegra_rgb *rgb = to_rgb(output);
        u32 value;
 
-       if (output->panel)
-               drm_panel_prepare(output->panel);
-
        tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
 
        value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -156,9 +121,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
        tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
 
        tegra_dc_commit(rgb->dc);
-
-       if (output->panel)
-               drm_panel_enable(output->panel);
 }
 
 static int
@@ -267,24 +229,68 @@ int tegra_dc_rgb_remove(struct tegra_dc *dc)
 int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
 {
        struct tegra_output *output = dc->rgb;
+       struct drm_connector *connector;
        int err;
 
        if (!dc->rgb)
                return -ENODEV;
 
-       drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs,
-                          DRM_MODE_CONNECTOR_LVDS);
-       drm_connector_helper_add(&output->connector,
-                                &tegra_rgb_connector_helper_funcs);
-       output->connector.dpms = DRM_MODE_DPMS_OFF;
-
        drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
        drm_encoder_helper_add(&output->encoder,
                               &tegra_rgb_encoder_helper_funcs);
 
-       drm_connector_attach_encoder(&output->connector,
-                                         &output->encoder);
-       drm_connector_register(&output->connector);
+       /*
+        * Wrap directly-connected panel into DRM bridge in order to let
+        * DRM core to handle panel for us.
+        */
+       if (output->panel) {
+               output->bridge = devm_drm_panel_bridge_add(output->dev,
+                                                          output->panel);
+               if (IS_ERR(output->bridge)) {
+                       dev_err(output->dev,
+                               "failed to wrap panel into bridge: %pe\n",
+                               output->bridge);
+                       return PTR_ERR(output->bridge);
+               }
+
+               output->panel = NULL;
+       }
+
+       /*
+        * Tegra devices that have LVDS panel utilize LVDS encoder bridge
+        * for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that
+        * go to display panel's receiver.
+        *
+        * Encoder usually have a power-down control which needs to be enabled
+        * in order to transmit data to the panel.  Historically devices that
+        * use an older device-tree version didn't model the bridge, assuming
+        * that encoder is turned ON by default, while today's DRM allows us
+        * to model LVDS encoder properly.
+        *
+        * Newer device-trees utilize LVDS encoder bridge, which provides
+        * us with a connector and handles the display panel.
+        *
+        * For older device-trees we wrapped panel into the panel-bridge.
+        */
+       if (output->bridge) {
+               err = drm_bridge_attach(&output->encoder, output->bridge,
+                                       NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+               if (err) {
+                       dev_err(output->dev, "failed to attach bridge: %d\n",
+                               err);
+                       return err;
+               }
+
+               connector = drm_bridge_connector_init(drm, &output->encoder);
+               if (IS_ERR(connector)) {
+                       dev_err(output->dev,
+                               "failed to initialize bridge connector: %pe\n",
+                               connector);
+                       return PTR_ERR(connector);
+               }
+
+               drm_connector_attach_encoder(connector, &output->encoder);
+       }
 
        err = tegra_output_init(drm, output);
        if (err < 0) {
index 45b5258..e88a17c 100644 (file)
@@ -3728,7 +3728,12 @@ static int tegra_sor_probe(struct platform_device *pdev)
                if (!sor->aux)
                        return -EPROBE_DEFER;
 
-               sor->output.ddc = &sor->aux->ddc;
+               if (get_device(&sor->aux->ddc.dev)) {
+                       if (try_module_get(sor->aux->ddc.owner))
+                               sor->output.ddc = &sor->aux->ddc;
+                       else
+                               put_device(&sor->aux->ddc.dev);
+               }
        }
 
        if (!sor->aux) {
index 3b81ea2..5a45353 100644 (file)
@@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
        struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
        u32 page = bo->node.start;
        u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
-       unsigned int count;
-       struct scatterlist *sgl;
+       struct sg_dma_page_iter dma_iter;
 
-       for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
-               u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
+       for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
+               dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
+               u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
                u32 pte = page_prot | page_address;
                u32 i;
 
-               BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
+               BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
                       BIT(24));
-
-               for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
+               for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
                        v3d->pt[page++] = pte + i;
        }
 
index 842f8b6..00d6b95 100644 (file)
@@ -72,9 +72,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 
                if (shmem->pages) {
                        if (shmem->mapped) {
-                               dma_unmap_sg(vgdev->vdev->dev.parent,
-                                            shmem->pages->sgl, shmem->mapped,
-                                            DMA_TO_DEVICE);
+                               dma_unmap_sgtable(vgdev->vdev->dev.parent,
+                                            shmem->pages, DMA_TO_DEVICE, 0);
                                shmem->mapped = 0;
                        }
 
@@ -164,13 +163,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
        }
 
        if (use_dma_api) {
-               shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
-                                          shmem->pages->sgl,
-                                          shmem->pages->nents,
-                                          DMA_TO_DEVICE);
-               *nents = shmem->mapped;
+               ret = dma_map_sgtable(vgdev->vdev->dev.parent,
+                                     shmem->pages, DMA_TO_DEVICE, 0);
+               if (ret)
+                       return ret;
+               *nents = shmem->mapped = shmem->pages->nents;
        } else {
-               *nents = shmem->pages->nents;
+               *nents = shmem->pages->orig_nents;
        }
 
        *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
@@ -180,13 +179,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
                return -ENOMEM;
        }
 
-       for_each_sg(shmem->pages->sgl, sg, *nents, si) {
-               (*ents)[si].addr = cpu_to_le64(use_dma_api
-                                              ? sg_dma_address(sg)
-                                              : sg_phys(sg));
-               (*ents)[si].length = cpu_to_le32(sg->length);
-               (*ents)[si].padding = 0;
+       if (use_dma_api) {
+               for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+                       (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
+                       (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
+                       (*ents)[si].padding = 0;
+               }
+       } else {
+               for_each_sgtable_sg(shmem->pages, sg, si) {
+                       (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
+                       (*ents)[si].length = cpu_to_le32(sg->length);
+                       (*ents)[si].padding = 0;
+               }
        }
+
        return 0;
 }
 
index a755004..07945ca 100644 (file)
@@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
                return NULL;
        }
 
-       for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+       for_each_sgtable_sg(sgt, sg, i) {
                pg = vmalloc_to_page(data);
                if (!pg) {
                        sg_free_table(sgt);
@@ -608,9 +608,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
-               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-                                      shmem->pages->sgl, shmem->pages->nents,
-                                      DMA_TO_DEVICE);
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+                                           shmem->pages, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1028,9 +1027,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
-               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-                                      shmem->pages->sgl, shmem->pages->nents,
-                                      DMA_TO_DEVICE);
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+                                           shmem->pages, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
index 63fe7da..c158e67 100644 (file)
@@ -26,6 +26,8 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
        return container_of(man, struct vmw_thp_manager, manager);
 }
 
+static const struct ttm_resource_manager_func vmw_thp_func;
+
 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
                                  unsigned long align_pages,
                                  const struct ttm_place *place,
@@ -132,6 +134,7 @@ int vmw_thp_init(struct vmw_private *dev_priv)
        ttm_resource_manager_init(&rman->manager,
                                  dev_priv->vram_size >> PAGE_SHIFT);
 
+       rman->manager.func = &vmw_thp_func;
        drm_mm_init(&rman->mm, 0, rman->manager.size);
        spin_lock_init(&rman->lock);
 
@@ -171,7 +174,7 @@ static void vmw_thp_debug(struct ttm_resource_manager *man,
        spin_unlock(&rman->lock);
 }
 
-const struct ttm_resource_manager_func vmw_thp_func = {
+static const struct ttm_resource_manager_func vmw_thp_func = {
        .alloc = vmw_thp_get_node,
        .free = vmw_thp_put_node,
        .debug = vmw_thp_debug
index 7454f79..7f03104 100644 (file)
@@ -384,8 +384,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
 {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
 
-       dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
-               DMA_BIDIRECTIONAL);
+       dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
        vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
 }
 
@@ -405,16 +404,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
-       int ret;
-
-       ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
-                        DMA_BIDIRECTIONAL);
-       if (unlikely(ret == 0))
-               return -ENOMEM;
 
-       vmw_tt->sgt.nents = ret;
-
-       return 0;
+       return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 /**
@@ -471,10 +462,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
                if (unlikely(ret != 0))
                        goto out_sg_alloc_fail;
 
-               if (vsgt->num_pages > vmw_tt->sgt.nents) {
+               if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
                        uint64_t over_alloc =
                                sgl_size * (vsgt->num_pages -
-                                           vmw_tt->sgt.nents);
+                                           vmw_tt->sgt.orig_nents);
 
                        ttm_mem_global_free(glob, over_alloc);
                        vmw_tt->sg_alloc_size -= over_alloc;
index a8aefaa..2f464ef 100644 (file)
@@ -218,7 +218,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
                return ERR_PTR(ret);
 
        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
-                 size, sgt->nents);
+                 size, sgt->orig_nents);
 
        return &xen_obj->base;
 }
index 89b6c14..82d0a60 100644 (file)
@@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                                goto unpin;
                        }
 
-                       err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
-                       if (!err) {
-                               err = -ENOMEM;
+                       err = dma_map_sgtable(dev, sgt, dir, 0);
+                       if (err)
                                goto unpin;
-                       }
 
                        job->unpins[job->num_unpins].dev = dev;
                        job->unpins[job->num_unpins].dir = dir;
@@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                }
 
                if (host->domain) {
-                       for_each_sg(sgt->sgl, sg, sgt->nents, j)
+                       for_each_sgtable_sg(sgt, sg, j)
                                gather_size += sg->length;
                        gather_size = iova_align(&host->iova, gather_size);
 
@@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                                goto put;
                        }
 
-                       err = iommu_map_sg(host->domain,
+                       err = iommu_map_sgtable(host->domain,
                                        iova_dma_addr(&host->iova, alloc),
-                                       sgt->sgl, sgt->nents, IOMMU_READ);
+                                       sgt, IOMMU_READ);
                        if (err == 0) {
                                __free_iova(&host->iova, alloc);
                                err = -EINVAL;
@@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                        job->unpins[job->num_unpins].size = gather_size;
                        phys_addr = iova_dma_addr(&host->iova, alloc);
                } else if (sgt) {
-                       err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
-                                        DMA_TO_DEVICE);
-                       if (!err) {
-                               err = -ENOMEM;
+                       err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
+                       if (err)
                                goto put;
-                       }
 
                        job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
                        job->unpins[job->num_unpins].dev = host->dev;
@@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job)
                }
 
                if (unpin->dev && sgt)
-                       dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
-                                    unpin->dir);
+                       dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
 
                host1x_bo_unpin(dev, unpin->bo, sgt);
                host1x_bo_put(unpin->bo);
index a4a45d6..86d5e3f 100644 (file)
@@ -912,8 +912,8 @@ int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
         * skip cache sync. This will need to be revisited when support for
         * non-coherent buffers will be added to the DU driver.
         */
-       return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
-                               DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+                              DMA_ATTR_SKIP_CPU_SYNC);
 }
 EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
 
@@ -921,8 +921,8 @@ void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
 {
        struct vsp1_device *vsp1 = dev_get_drvdata(dev);
 
-       dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
-                          DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+                         DMA_ATTR_SKIP_CPU_SYNC);
 }
 EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
 
index 7116127..116aca3 100644 (file)
@@ -1852,6 +1852,10 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
                                 cdns_phy->phys[node].num_lanes,
                                 cdns_phy->max_bit_rate / 1000,
                                 cdns_phy->max_bit_rate % 1000);
+
+                       gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
+                       gphy->attrs.max_link_rate = cdns_phy->max_bit_rate;
+                       gphy->attrs.mode = PHY_MODE_DP;
                } else {
                        dev_err(dev, "Driver supports only PHY_TYPE_DP\n");
                        ret = -ENOTSUPP;
index dee757c..50c5e93 100644 (file)
@@ -35,3 +35,10 @@ config PHY_MTK_XSPHY
          Enable this to support the SuperSpeedPlus XS-PHY transceiver for
          USB3.1 GEN2 controllers on MediaTek chips. The driver supports
          multiple USB2.0, USB3.1 GEN2 ports.
+
+config PHY_MTK_HDMI
+       tristate "MediaTek HDMI-PHY Driver"
+       depends on ARCH_MEDIATEK && OF
+       select GENERIC_PHY
+       help
+         Support HDMI PHY for Mediatek SoCs.
index 08a8e6a..6325e38 100644 (file)
@@ -6,3 +6,8 @@
 obj-$(CONFIG_PHY_MTK_TPHY)             += phy-mtk-tphy.o
 obj-$(CONFIG_PHY_MTK_UFS)              += phy-mtk-ufs.o
 obj-$(CONFIG_PHY_MTK_XSPHY)            += phy-mtk-xsphy.o
+
+phy-mtk-hdmi-drv-y                     := phy-mtk-hdmi.o
+phy-mtk-hdmi-drv-y                     += phy-mtk-hdmi-mt2701.o
+phy-mtk-hdmi-drv-y                     += phy-mtk-hdmi-mt8173.o
+obj-$(CONFIG_PHY_MTK_HDMI)             += phy-mtk-hdmi-drv.o
similarity index 99%
rename from drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
rename to drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
index d3cc402..b74c65a 100644 (file)
@@ -4,7 +4,7 @@
  * Author: Chunhui Dai <chunhui.dai@mediatek.com>
  */
 
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
 
 #define HDMI_CON0      0x00
 #define RG_HDMITX_DRV_IBIAS            0
@@ -237,8 +237,8 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 }
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
-       .tz_disabled = true,
        .flags = CLK_SET_RATE_GATE,
+       .pll_default_off = true,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
similarity index 99%
rename from drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
rename to drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
index 827b937..6cdfdf5 100644 (file)
@@ -4,7 +4,7 @@
  * Author: Jie Qiu <jie.qiu@mediatek.com>
  */
 
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
 
 #define HDMI_CON0              0x00
 #define RG_HDMITX_PLL_EN               BIT(31)
similarity index 96%
rename from drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
rename to drivers/phy/mediatek/phy-mtk-hdmi.c
index 5223498..47c029d 100644 (file)
@@ -4,7 +4,7 @@
  * Author: Jie Qiu <jie.qiu@mediatek.com>
  */
 
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
 
 static int mtk_hdmi_phy_power_on(struct phy *phy);
 static int mtk_hdmi_phy_power_off(struct phy *phy);
@@ -184,6 +184,9 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
                return PTR_ERR(phy_provider);
        }
 
+       if (hdmi_phy->conf->pll_default_off)
+               hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+
        return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
                                   hdmi_phy->pll);
 }
@@ -205,6 +208,7 @@ struct platform_driver mtk_hdmi_phy_driver = {
                .of_match_table = mtk_hdmi_phy_match,
        },
 };
+module_platform_driver(mtk_hdmi_phy_driver);
 
 MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
 MODULE_LICENSE("GPL v2");
similarity index 95%
rename from drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
rename to drivers/phy/mediatek/phy-mtk-hdmi.h
index 2d8b318..dcf9bb1 100644 (file)
@@ -20,8 +20,8 @@
 struct mtk_hdmi_phy;
 
 struct mtk_hdmi_phy_conf {
-       bool tz_disabled;
        unsigned long flags;
+       bool pll_default_off;
        const struct clk_ops *hdmi_phy_clk_ops;
        void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
        void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -50,7 +50,6 @@ void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                       u32 val, u32 mask);
 struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
 
-extern struct platform_driver mtk_hdmi_phy_driver;
 extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
 extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
 
index 272eeb0..ecfdfac 100644 (file)
@@ -21,8 +21,8 @@
 
 #define PWM_MAX_LEVEL          0xFF
 
-#define PWM_BASE_CLK           6000000  /* 6 MHz */
-#define PWM_MAX_PERIOD_NS      21333    /* 46.875KHz */
+#define PWM_BASE_CLK_MHZ       6       /* 6 MHz */
+#define PWM_MAX_PERIOD_NS      5461334 /* 183 Hz */
 
 /**
  * struct crystalcove_pwm - Crystal Cove PWM controller
@@ -39,59 +39,121 @@ static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
        return container_of(pc, struct crystalcove_pwm, chip);
 }
 
-static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+static int crc_pwm_calc_clk_div(int period_ns)
 {
-       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+       int clk_div;
 
-       regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+       clk_div = PWM_BASE_CLK_MHZ * period_ns / (256 * NSEC_PER_USEC);
+       /* clk_div 1 - 128, maps to register values 0-127 */
+       if (clk_div > 0)
+               clk_div--;
 
-       return 0;
-}
-
-static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
-{
-       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
-
-       regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+       return clk_div;
 }
 
-static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
-                         int duty_ns, int period_ns)
+static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
 {
-       struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+       struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
        struct device *dev = crc_pwm->chip.dev;
-       int level;
+       int err;
 
-       if (period_ns > PWM_MAX_PERIOD_NS) {
+       if (state->period > PWM_MAX_PERIOD_NS) {
                dev_err(dev, "un-supported period_ns\n");
                return -EINVAL;
        }
 
-       if (pwm_get_period(pwm) != period_ns) {
-               int clk_div;
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EOPNOTSUPP;
 
-               /* changing the clk divisor, need to disable fisrt */
-               crc_pwm_disable(c, pwm);
-               clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+       if (pwm_is_enabled(pwm) && !state->enabled) {
+               err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+               if (err) {
+                       dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err);
+                       return err;
+               }
+       }
 
-               regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
-                                       clk_div | PWM_OUTPUT_ENABLE);
+       if (pwm_get_duty_cycle(pwm) != state->duty_cycle ||
+           pwm_get_period(pwm) != state->period) {
+               u64 level = state->duty_cycle * PWM_MAX_LEVEL;
+
+               do_div(level, state->period);
+
+               err = regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+               if (err) {
+                       dev_err(dev, "Error writing PWM0_DUTY_CYCLE %d\n", err);
+                       return err;
+               }
+       }
+
+       if (pwm_is_enabled(pwm) && state->enabled &&
+           pwm_get_period(pwm) != state->period) {
+               /* changing the clk divisor, clear PWM_OUTPUT_ENABLE first */
+               err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV, 0);
+               if (err) {
+                       dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err);
+                       return err;
+               }
+       }
 
-               /* enable back */
-               crc_pwm_enable(c, pwm);
+       if (pwm_get_period(pwm) != state->period ||
+           pwm_is_enabled(pwm) != state->enabled) {
+               int clk_div = crc_pwm_calc_clk_div(state->period);
+               int pwm_output_enable = state->enabled ? PWM_OUTPUT_ENABLE : 0;
+
+               err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+                                  clk_div | pwm_output_enable);
+               if (err) {
+                       dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err);
+                       return err;
+               }
        }
 
-       /* change the pwm duty cycle */
-       level = duty_ns * PWM_MAX_LEVEL / period_ns;
-       regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+       if (!pwm_is_enabled(pwm) && state->enabled) {
+               err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+               if (err) {
+                       dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err);
+                       return err;
+               }
+       }
 
        return 0;
 }
 
+static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                             struct pwm_state *state)
+{
+       struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
+       struct device *dev = crc_pwm->chip.dev;
+       unsigned int clk_div, clk_div_reg, duty_cycle_reg;
+       int error;
+
+       error = regmap_read(crc_pwm->regmap, PWM0_CLK_DIV, &clk_div_reg);
+       if (error) {
+               dev_err(dev, "Error reading PWM0_CLK_DIV %d\n", error);
+               return;
+       }
+
+       error = regmap_read(crc_pwm->regmap, PWM0_DUTY_CYCLE, &duty_cycle_reg);
+       if (error) {
+               dev_err(dev, "Error reading PWM0_DUTY_CYCLE %d\n", error);
+               return;
+       }
+
+       clk_div = (clk_div_reg & ~PWM_OUTPUT_ENABLE) + 1;
+
+       state->period =
+               DIV_ROUND_UP(clk_div * NSEC_PER_USEC * 256, PWM_BASE_CLK_MHZ);
+       state->duty_cycle =
+               DIV_ROUND_UP_ULL(duty_cycle_reg * state->period, PWM_MAX_LEVEL);
+       state->polarity = PWM_POLARITY_NORMAL;
+       state->enabled = !!(clk_div_reg & PWM_OUTPUT_ENABLE);
+}
+
 static const struct pwm_ops crc_pwm_ops = {
-       .config = crc_pwm_config,
-       .enable = crc_pwm_enable,
-       .disable = crc_pwm_disable,
+       .apply = crc_pwm_apply,
+       .get_state = crc_pwm_get_state,
 };
 
 static int crystalcove_pwm_probe(struct platform_device *pdev)
index 48f34d2..c6502cf 100644 (file)
@@ -89,7 +89,6 @@ static int pwm_lpss_prepare(struct device *dev)
 
 static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
        .prepare = pwm_lpss_prepare,
-       SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume)
 };
 
 static const struct acpi_device_id pwm_lpss_acpi_match[] = {
index 9d965ff..3444c56 100644 (file)
@@ -85,7 +85,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        unsigned long long on_time_div;
        unsigned long c = lpwm->info->clk_rate, base_unit_range;
        unsigned long long base_unit, freq = NSEC_PER_SEC;
-       u32 orig_ctrl, ctrl;
+       u32 ctrl;
 
        do_div(freq, period_ns);
 
@@ -93,26 +93,25 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
         * The equation is:
         * base_unit = round(base_unit_range * freq / c)
         */
-       base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
+       base_unit_range = BIT(lpwm->info->base_unit_bits);
        freq *= base_unit_range;
 
        base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
+       /* base_unit must not be 0 and we also want to avoid overflowing it */
+       base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
 
        on_time_div = 255ULL * duty_ns;
        do_div(on_time_div, period_ns);
        on_time_div = 255ULL - on_time_div;
 
-       orig_ctrl = ctrl = pwm_lpss_read(pwm);
+       ctrl = pwm_lpss_read(pwm);
        ctrl &= ~PWM_ON_TIME_DIV_MASK;
-       ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
-       base_unit &= base_unit_range;
+       ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
        ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
        ctrl |= on_time_div;
 
-       if (orig_ctrl != ctrl) {
-               pwm_lpss_write(pwm, ctrl);
-               pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
-       }
+       pwm_lpss_write(pwm, ctrl);
+       pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
 }
 
 static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
@@ -121,41 +120,47 @@ static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
                pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
 }
 
+static int pwm_lpss_prepare_enable(struct pwm_lpss_chip *lpwm,
+                                  struct pwm_device *pwm,
+                                  const struct pwm_state *state)
+{
+       int ret;
+
+       ret = pwm_lpss_is_updating(pwm);
+       if (ret)
+               return ret;
+
+       pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
+       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+       ret = pwm_lpss_wait_for_update(pwm);
+       if (ret)
+               return ret;
+
+       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
+       return 0;
+}
+
 static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          const struct pwm_state *state)
 {
        struct pwm_lpss_chip *lpwm = to_lpwm(chip);
-       int ret;
+       int ret = 0;
 
        if (state->enabled) {
                if (!pwm_is_enabled(pwm)) {
                        pm_runtime_get_sync(chip->dev);
-                       ret = pwm_lpss_is_updating(pwm);
-                       if (ret) {
-                               pm_runtime_put(chip->dev);
-                               return ret;
-                       }
-                       pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
-                       ret = pwm_lpss_wait_for_update(pwm);
-                       if (ret) {
+                       ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
+                       if (ret)
                                pm_runtime_put(chip->dev);
-                               return ret;
-                       }
-                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
                } else {
-                       ret = pwm_lpss_is_updating(pwm);
-                       if (ret)
-                               return ret;
-                       pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       return pwm_lpss_wait_for_update(pwm);
+                       ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
                }
        } else if (pwm_is_enabled(pwm)) {
                pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
                pm_runtime_put(chip->dev);
        }
 
-       return 0;
+       return ret;
 }
 
 static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -255,30 +260,6 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
 }
 EXPORT_SYMBOL_GPL(pwm_lpss_remove);
 
-int pwm_lpss_suspend(struct device *dev)
-{
-       struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
-       int i;
-
-       for (i = 0; i < lpwm->info->npwm; i++)
-               lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
-
-int pwm_lpss_resume(struct device *dev)
-{
-       struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
-       int i;
-
-       for (i = 0; i < lpwm->info->npwm; i++)
-               writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_lpss_resume);
-
 MODULE_DESCRIPTION("PWM driver for Intel LPSS");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
 MODULE_LICENSE("GPL v2");
index 7909fa1..70db7e3 100644 (file)
@@ -19,7 +19,6 @@ struct pwm_lpss_chip {
        struct pwm_chip chip;
        void __iomem *regs;
        const struct pwm_lpss_boardinfo *info;
-       u32 saved_ctrl[MAX_PWMS];
 };
 
 struct pwm_lpss_boardinfo {
@@ -37,7 +36,5 @@ struct pwm_lpss_boardinfo {
 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
                                     const struct pwm_lpss_boardinfo *info);
 int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
-int pwm_lpss_suspend(struct device *dev);
-int pwm_lpss_resume(struct device *dev);
 
 #endif /* __PWM_LPSS_H */
index b1b6eeb..4c13cbc 100644 (file)
@@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
 
                if (sgt) {
                        if (gntdev_dmabuf_attach->dir != DMA_NONE)
-                               dma_unmap_sg_attrs(attach->dev, sgt->sgl,
-                                                  sgt->nents,
-                                                  gntdev_dmabuf_attach->dir,
-                                                  DMA_ATTR_SKIP_CPU_SYNC);
+                               dma_unmap_sgtable(attach->dev, sgt,
+                                                 gntdev_dmabuf_attach->dir,
+                                                 DMA_ATTR_SKIP_CPU_SYNC);
                        sg_free_table(sgt);
                }
 
@@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
        sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
                                  gntdev_dmabuf->nr_pages);
        if (!IS_ERR(sgt)) {
-               if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
-                                     DMA_ATTR_SKIP_CPU_SYNC)) {
+               if (dma_map_sgtable(attach->dev, sgt, dir,
+                                   DMA_ATTR_SKIP_CPU_SYNC)) {
                        sg_free_table(sgt);
                        kfree(sgt);
                        sgt = ERR_PTR(-ENOMEM);
@@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 
        /* Now convert sgt to array of pages and check for page validity. */
        i = 0;
-       for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
+       for_each_sgtable_page(sgt, &sg_iter, 0) {
                struct page *page = sg_page_iter_page(&sg_iter);
                /*
                 * Check if page is valid: this can happen if we are given
index 85513ee..da53aeb 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/types.h>
 #include <drm/drm_connector.h>
 
+struct drm_device;
+
 /*
  * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
  * DP and DPCD versions are independent.  Differences from 1.0 are not noted,
 # define DP_DS_PORT_TYPE_DP_DUALMODE        5
 # define DP_DS_PORT_TYPE_WIRELESS           6
 # define DP_DS_PORT_HPD                            (1 << 3)
+# define DP_DS_NON_EDID_MASK               (0xf << 4)
+# define DP_DS_NON_EDID_720x480i_60        (1 << 4)
+# define DP_DS_NON_EDID_720x480i_50        (2 << 4)
+# define DP_DS_NON_EDID_1920x1080i_60      (3 << 4)
+# define DP_DS_NON_EDID_1920x1080i_50      (4 << 4)
+# define DP_DS_NON_EDID_1280x720_60        (5 << 4)
+# define DP_DS_NON_EDID_1280x720_50        (7 << 4)
 /* offset 1 for VGA is maximum megapixels per second / 8 */
-/* offset 2 */
+/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */
+/* offset 2 for VGA/DVI/HDMI */
 # define DP_DS_MAX_BPC_MASK                (3 << 0)
 # define DP_DS_8BPC                        0
 # define DP_DS_10BPC                       1
 # define DP_DS_12BPC                       2
 # define DP_DS_16BPC                       3
+/* offset 3 for DVI */
+# define DP_DS_DVI_DUAL_LINK               (1 << 1)
+# define DP_DS_DVI_HIGH_COLOR_DEPTH        (1 << 2)
+/* offset 3 for HDMI */
+# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0)
+# define DP_DS_HDMI_YCBCR422_PASS_THROUGH   (1 << 1)
+# define DP_DS_HDMI_YCBCR420_PASS_THROUGH   (1 << 2)
+# define DP_DS_HDMI_YCBCR444_TO_422_CONV    (1 << 3)
+# define DP_DS_HDMI_YCBCR444_TO_420_CONV    (1 << 4)
 
 #define DP_MAX_DOWNSTREAM_PORTS                    0x10
 
 #define DP_CEC_TX_MESSAGE_BUFFER               0x3020
 #define DP_CEC_MESSAGE_BUFFER_LENGTH             0x10
 
+#define DP_PROTOCOL_CONVERTER_CONTROL_0                0x3050 /* DP 1.3 */
+# define DP_HDMI_DVI_OUTPUT_CONFIG             (1 << 0) /* DP 1.3 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_1                0x3051 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR420_ENABLE      (1 << 0) /* DP 1.3 */
+# define DP_HDMI_EDID_PROCESSING_DISABLE       (1 << 1) /* DP 1.4 */
+# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */
+# define DP_HDMI_FORCE_SCRAMBLING              (1 << 3) /* DP 1.4 */
+#define DP_PROTOCOL_CONVERTER_CONTROL_2                0x3052 /* DP 1.3 */
+# define DP_CONVERSION_TO_YCBCR422_ENABLE      (1 << 0) /* DP 1.3 */
+
 #define DP_AUX_HDCP_BKSV               0x68000
 #define DP_AUX_HDCP_RI_PRIME           0x68005
 #define DP_AUX_HDCP_AKSV               0x68007
 #define DP_POWER_DOWN_PHY              0x25
 #define DP_SINK_EVENT_NOTIFY           0x30
 #define DP_QUERY_STREAM_ENC_STATUS     0x38
+#define  DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST     0
+#define  DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE     1
+#define  DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE       2
 
 /* DP 1.2 MST sideband reply types */
 #define DP_SIDEBAND_REPLY_ACK          0x00
 #define DP_MST_PHYSICAL_PORT_0 0
 #define DP_MST_LOGICAL_PORT_0 8
 
+#define DP_LINK_CONSTANT_N_VALUE 0x8000
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
@@ -1619,13 +1652,35 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
 int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
                                const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                                u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
-int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                               const u8 port_cap[4]);
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                              const u8 port_cap[4], u8 type);
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                              const u8 port_cap[4],
+                              const struct edid *edid);
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                  const u8 port_cap[4]);
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                    const u8 port_cap[4],
+                                    const struct edid *edid);
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                    const u8 port_cap[4],
+                                    const struct edid *edid);
 int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                             const u8 port_cap[4]);
+                             const u8 port_cap[4],
+                             const struct edid *edid);
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                      const u8 port_cap[4]);
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                            const u8 port_cap[4]);
+struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
+                                               const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                                               const u8 port_cap[4]);
 int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
-void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
-                            const u8 port_cap[4], struct drm_dp_aux *aux);
+void drm_dp_downstream_debug(struct seq_file *m,
+                            const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                            const u8 port_cap[4],
+                            const struct edid *edid,
+                            struct drm_dp_aux *aux);
 enum drm_mode_subconnector
 drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                         const u8 port_cap[4]);
index 6ae5860..f5e92fe 100644 (file)
@@ -313,6 +313,34 @@ struct drm_dp_remote_i2c_write_ack_reply {
        u8 port_number;
 };
 
+struct drm_dp_query_stream_enc_status_ack_reply {
+       /* Bit[23:16]- Stream Id */
+       u8 stream_id;
+
+       /* Bit[15]- Signed */
+       bool reply_signed;
+
+       /* Bit[10:8]- Stream Output Sink Type */
+       bool unauthorizable_device_present;
+       bool legacy_device_present;
+       bool query_capable_device_present;
+
+       /* Bit[12:11]- Stream Output CP Type */
+       bool hdcp_1x_device_present;
+       bool hdcp_2x_device_present;
+
+       /* Bit[4]- Stream Authentication */
+       bool auth_completed;
+
+       /* Bit[3]- Stream Encryption */
+       bool encryption_enabled;
+
+       /* Bit[2]- Stream Repeater Function Present */
+       bool repeater_present;
+
+       /* Bit[1:0]- Stream State */
+       u8 state;
+};
 
 #define DRM_DP_MAX_SDP_STREAMS 16
 struct drm_dp_allocate_payload {
@@ -374,6 +402,15 @@ struct drm_dp_remote_i2c_write {
        u8 *bytes;
 };
 
+struct drm_dp_query_stream_enc_status {
+       u8 stream_id;
+       u8 client_id[7];        /* 56-bit nonce */
+       u8 stream_event;
+       bool valid_stream_event;
+       u8 stream_behavior;
+       u8 valid_stream_behavior;
+};
+
 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
 struct drm_dp_port_number_req {
        u8 port_number;
@@ -422,6 +459,8 @@ struct drm_dp_sideband_msg_req_body {
 
                struct drm_dp_remote_i2c_read i2c_read;
                struct drm_dp_remote_i2c_write i2c_write;
+
+               struct drm_dp_query_stream_enc_status enc_status;
        } u;
 };
 
@@ -444,6 +483,8 @@ struct drm_dp_sideband_msg_reply_body {
                struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
                struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
                struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+
+               struct drm_dp_query_stream_enc_status_ack_reply enc_status;
        } u;
 };
 
@@ -807,6 +848,9 @@ drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
                                 struct drm_dp_mst_port *port);
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
                                 struct drm_dp_mst_port *port, bool power_up);
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+               struct drm_dp_mst_port *port,
+               struct drm_dp_query_stream_enc_status_ack_reply *status);
 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
 
 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
index cfa4f5a..b27a0e2 100644 (file)
@@ -517,4 +517,8 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name,
 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
                                           int hsize, int vsize, int fresh,
                                           bool rb);
+struct drm_display_mode *
+drm_display_mode_from_cea_vic(struct drm_device *dev,
+                             u8 video_code);
+
 #endif /* __DRM_EDID_H__ */
index bf141e7..0f69f9f 100644 (file)
@@ -93,6 +93,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
                                     int flags);
 
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
+
 /* helper functions for importing */
 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
                                                struct dma_buf *dma_buf,
index 8e7ae30..7eeecb0 100644 (file)
        INTEL_VGA_DEVICE(0x4E51, info)
 
 /* TGL */
-#define INTEL_TGL_12_IDS(info) \
+#define INTEL_TGL_12_GT1_IDS(info) \
+       INTEL_VGA_DEVICE(0x9A60, info), \
+       INTEL_VGA_DEVICE(0x9A68, info), \
+       INTEL_VGA_DEVICE(0x9A70, info)
+
+#define INTEL_TGL_12_GT2_IDS(info) \
        INTEL_VGA_DEVICE(0x9A40, info), \
        INTEL_VGA_DEVICE(0x9A49, info), \
        INTEL_VGA_DEVICE(0x9A59, info), \
-       INTEL_VGA_DEVICE(0x9A60, info), \
-       INTEL_VGA_DEVICE(0x9A68, info), \
-       INTEL_VGA_DEVICE(0x9A70, info), \
        INTEL_VGA_DEVICE(0x9A78, info), \
        INTEL_VGA_DEVICE(0x9AC0, info), \
        INTEL_VGA_DEVICE(0x9AC9, info), \
        INTEL_VGA_DEVICE(0x9AD9, info), \
        INTEL_VGA_DEVICE(0x9AF8, info)
 
+#define INTEL_TGL_12_IDS(info) \
+       INTEL_TGL_12_GT1_IDS(info), \
+       INTEL_TGL_12_GT2_IDS(info)
+
 /* RKL */
 #define INTEL_RKL_IDS(info) \
        INTEL_VGA_DEVICE(0x4C80, info), \
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644 (file)
index 0000000..a889f28
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie:        An opque token provided by adreno-smmu and passed
+ *                 back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank.  A
+ *                 NULL config disables TTBR0 translation, otherwise
+ *                 TTBR0 translation is enabled with the specified cfg
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance.  This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate.  The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+    const void *cookie;
+    const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+    int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
\ No newline at end of file
index bcee8eb..e435bdb 100644 (file)
@@ -115,10 +115,12 @@ struct phy_ops {
 /**
  * struct phy_attrs - represents phy attributes
  * @bus_width: Data path width implemented by PHY
+ * @max_link_rate: Maximum link rate supported by PHY (in Mbps)
  * @mode: PHY mode
  */
 struct phy_attrs {
        u32                     bus_width;
+       u32                     max_link_rate;
        enum phy_mode           mode;
 };
 
index 3cc5e59..e030689 100644 (file)
@@ -846,7 +846,7 @@ static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
        if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
                                      0, dmabuf->mode.size, GFP_KERNEL) < 0)
                goto err2;
-       if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
+       if (dma_map_sgtable(at->dev, sg, direction, 0))
                goto err3;
 
        return sg;
@@ -868,6 +868,7 @@ static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
 
        dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
 
+       dma_unmap_sgtable(at->dev, sg, direction, 0);
        sg_free_table(sg);
        kfree(sg);
 }