Merge tag 'mediatek-drm-next-6.6' of https://git.kernel.org/pub/scm/linux/kernel...
authorDave Airlie <airlied@redhat.com>
Tue, 15 Aug 2023 02:07:03 +0000 (12:07 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 15 Aug 2023 02:07:19 +0000 (12:07 +1000)
Mediatek DRM Next for Linux 6.6

1. Small mtk-dpi cleanups
2. DisplayPort: support eDP and aux-bus
3. Fix uninitialized symbol
4. Do not check for 0 return after calling platform_get_irq()
5. Convert to platform remove callback returning void
6. Fix coverity issues
7. Fix potential memory leak if vmap() fail
8. Fix void-pointer-to-enum-cast warning
9. Rid W=1 warnings from GPU

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Chun-Kuang Hu <chunkuang.hu@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20230813152726.14802-1-chunkuang.hu@kernel.org
1407 files changed:
.mailmap
Documentation/ABI/testing/sysfs-driver-ufs
Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
Documentation/devicetree/bindings/input/elan,ekth6915.yaml
Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
Documentation/devicetree/bindings/input/hid-over-i2c.yaml
Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/loongson,ls1x-wdt.yaml [new file with mode: 0644]
Documentation/gpu/amdgpu/flashing.rst [new file with mode: 0644]
Documentation/gpu/amdgpu/index.rst
Documentation/gpu/driver-uapi.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/rfc/i915_scheduler.rst
Documentation/gpu/todo.rst
Documentation/process/maintainer-netdev.rst
Documentation/riscv/hwprobe.rst
Documentation/wmi/devices/dell-wmi-ddv.rst
MAINTAINERS
Makefile
arch/arm/kernel/efi.c
arch/arm64/Kconfig
arch/arm64/include/asm/ftrace.h
arch/arm64/include/asm/syscall.h
arch/arm64/kernel/efi.c
arch/arm64/kernel/syscall.c
arch/loongarch/kernel/efi.c
arch/openrisc/include/uapi/asm/sigcontext.h
arch/openrisc/kernel/signal.c
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/book3s/64/hash-64k.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/security.c
arch/powerpc/mm/book3s64/hash_native.c
arch/riscv/kernel/cpufeature.c
arch/riscv/mm/init.c
arch/riscv/net/bpf_jit.h
arch/riscv/net/bpf_jit_core.c
arch/sh/boards/mach-dreamcast/irq.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/boards/mach-highlander/setup.c
arch/sh/boards/mach-kfr2r09/setup.c
arch/sh/boards/mach-r2d/irq.c
arch/sh/boards/mach-sh7763rdp/setup.c
arch/sh/cchips/Kconfig
arch/sh/include/asm/hd64461.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/cmpxchg_64.h
arch/um/kernel/um_arch.c
arch/x86/Makefile
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/events/intel/core.c
arch/x86/include/asm/alternative.h
arch/x86/include/asm/ibt.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/switch_to.h
arch/x86/kernel/alternative.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/module.c
arch/x86/kernel/process.c
arch/x86/video/Makefile
arch/x86/xen/xen-head.S
arch/xtensa/kernel/align.S
arch/xtensa/kernel/traps.c
arch/xtensa/platforms/iss/network.c
block/blk-crypto-profile.c
block/blk-flush.c
block/blk-mq.c
block/blk-zoned.c
block/mq-deadline.c
block/partitions/amiga.c
crypto/af_alg.c
crypto/algif_hash.c
crypto/asymmetric_keys/public_key.c
drivers/Kconfig
drivers/accel/ivpu/Makefile
drivers/accel/ivpu/ivpu_debugfs.c [new file with mode: 0644]
drivers/accel/ivpu/ivpu_debugfs.h [new file with mode: 0644]
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_fw.c
drivers/accel/ivpu/ivpu_fw.h
drivers/accel/ivpu/ivpu_fw_log.c [new file with mode: 0644]
drivers/accel/ivpu/ivpu_fw_log.h [new file with mode: 0644]
drivers/accel/ivpu/ivpu_gem.c
drivers/accel/ivpu/ivpu_hw.h
drivers/accel/ivpu/ivpu_hw_37xx.c [new file with mode: 0644]
drivers/accel/ivpu/ivpu_hw_37xx_reg.h [new file with mode: 0644]
drivers/accel/ivpu/ivpu_hw_40xx.c [new file with mode: 0644]
drivers/accel/ivpu/ivpu_hw_40xx_reg.h [new file with mode: 0644]
drivers/accel/ivpu/ivpu_hw_mtl.c [deleted file]
drivers/accel/ivpu/ivpu_hw_mtl_reg.h [deleted file]
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_mmu.c
drivers/accel/ivpu/ivpu_mmu_context.c
drivers/accel/ivpu/ivpu_mmu_context.h
drivers/accel/ivpu/ivpu_pm.c
drivers/accel/ivpu/ivpu_pm.h
drivers/accel/qaic/qaic_data.c
drivers/accel/qaic/qaic_drv.c
drivers/auxdisplay/cfag12864bfb.c
drivers/auxdisplay/ht16k33.c
drivers/base/regmap/regmap-irq.c
drivers/block/null_blk/zoned.c
drivers/block/virtio_blk.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/dma-buf/dma-buf-sysfs-stats.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-fence-unwrap.c
drivers/dma-buf/dma-fence.c
drivers/dma-buf/heaps/cma_heap.c
drivers/dma-buf/heaps/system_heap.c
drivers/dma-buf/udmabuf.c
drivers/firmware/efi/libstub/efi-stub-entry.c
drivers/firmware/efi/libstub/screen_info.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Kconfig
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c [deleted file]
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/atom.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
drivers/gpu/drm/amd/amdkfd/kfd_debug.h
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
drivers/gpu/drm/amd/display/dc/basics/conversion.c
drivers/gpu/drm/amd/display/dc/basics/vector.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dsc.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
drivers/gpu/drm/amd/display/dc/link/link_detection.c
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h [deleted file]
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/include/mes_v11_api_def.h
drivers/gpu/drm/amd/include/yellow_carp_offset.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
drivers/gpu/drm/arm/display/komeda/komeda_drv.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.h
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/armada/Kconfig
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_fbdev.c
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/ast/ast_dp.c
drivers/gpu/drm/ast/ast_dp501.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mm.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
drivers/gpu/drm/bridge/analogix/anx7625.c
drivers/gpu/drm/bridge/analogix/anx7625.h
drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
drivers/gpu/drm/bridge/chipone-icn6211.c
drivers/gpu/drm/bridge/chrontel-ch7033.c
drivers/gpu/drm/bridge/display-connector.c
drivers/gpu/drm/bridge/fsl-ldb.c
drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
drivers/gpu/drm/bridge/ite-it6505.c
drivers/gpu/drm/bridge/lontium-lt9211.c
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
drivers/gpu/drm/bridge/lvds-codec.c
drivers/gpu/drm/bridge/nwl-dsi.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/bridge/samsung-dsim.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/bridge/simple-bridge.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
drivers/gpu/drm/bridge/tc358762.c
drivers/gpu/drm/bridge/tc358764.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/bridge/ti-sn65dsi83.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/bridge/ti-tfp410.c
drivers/gpu/drm/display/drm_hdcp_helper.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_atomic_uapi.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_bridge_connector.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_exec.c [new file with mode: 0644]
drivers/gpu/drm/drm_fbdev_dma.c
drivers/gpu/drm/drm_fbdev_generic.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_gpuva_mgr.c [new file with mode: 0644]
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_managed.c
drivers/gpu/drm/drm_mipi_dbi.c
drivers/gpu/drm/drm_mipi_dsi.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/fbdev.c
drivers/gpu/drm/gud/gud_pipe.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/hyperv/hyperv_drm_drv.c
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i2c/sil164_drv.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/g4x_dp.c
drivers/gpu/drm/i915/display/g4x_hdmi.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/icl_dsi.h
drivers/gpu/drm/i915/display/intel_atomic_plane.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_bios.h
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_color.c
drivers/gpu/drm/i915/display/intel_combo_phy.c
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_cx0_phy.c
drivers/gpu/drm/i915/display/intel_cx0_phy.h
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_ddi.h
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_display_core.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_device.c
drivers/gpu/drm/i915/display/intel_display_device.h
drivers/gpu/drm/i915/display/intel_display_driver.c
drivers/gpu/drm/i915/display/intel_display_irq.c
drivers/gpu/drm/i915/display/intel_display_irq.h
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_display_power.h
drivers/gpu/drm/i915/display/intel_display_power_well.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_aux.c
drivers/gpu/drm/i915/display/intel_dp_aux.h
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpt.c
drivers/gpu/drm/i915/display/intel_dsi.c
drivers/gpu/drm/i915/display/intel_dsi.h
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_frontbuffer.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_hotplug.c
drivers/gpu/drm/i915/display/intel_hotplug_irq.c
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/display/intel_pch_refclk.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_psr_regs.h
drivers/gpu/drm/i915/display/intel_qp_tables.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/display/intel_vdsc.h
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.h
drivers/gpu/drm/i915/gem/i915_gem_wait.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gt/intel_gpu_commands.h
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_defines.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_migrate.c
drivers/gpu/drm/i915/gt/intel_ppgtt.c
drivers/gpu/drm/i915/gt/intel_region_lmem.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_ring.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_rps.h
drivers/gpu/drm/i915/gt/intel_sa_media.c
drivers/gpu/drm/i915/gt/intel_sseu.c
drivers/gpu/drm/i915/gt/intel_tlb.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_tlb.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_mocs.c
drivers/gpu/drm/i915/gt/selftest_rc6.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/selftest_tlb.c
drivers/gpu/drm/i915/gt/shmem_utils.c
drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
drivers/gpu/drm/i915/gt/uc/intel_huc.c
drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_clock_gating.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_step.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/pxp/intel_pxp.c
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
drivers/gpu/drm/i915/selftests/i915_perf.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/igt_spinner.c
drivers/gpu/drm/i915/selftests/intel_uncore.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/soc/intel_dram.c
drivers/gpu/drm/i915/soc/intel_gmch.c
drivers/gpu/drm/i915/soc/intel_pch.c
drivers/gpu/drm/imx/dcss/dcss-dev.c
drivers/gpu/drm/imx/dcss/dcss-drv.c
drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
drivers/gpu/drm/imx/lcdc/imx-lcdc.c
drivers/gpu/drm/ingenic/Kconfig
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
drivers/gpu/drm/ingenic/ingenic-ipu.c
drivers/gpu/drm/lima/lima_drv.c
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/logicvc/logicvc_drm.c
drivers/gpu/drm/loongson/Kconfig [new file with mode: 0644]
drivers/gpu/drm/loongson/Makefile [new file with mode: 0644]
drivers/gpu/drm/loongson/loongson_device.c [new file with mode: 0644]
drivers/gpu/drm/loongson/loongson_module.c [new file with mode: 0644]
drivers/gpu/drm/loongson/loongson_module.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_benchmark.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_benchmark.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_crtc.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_drv.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_drv.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_gem.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_gem.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_gfxpll.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_gfxpll.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_i2c.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_i2c.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_irq.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_irq.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_output.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_output_7a1000.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_output_7a2000.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_pixpll.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_pixpll.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_plane.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_probe.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_probe.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_regs.h [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_ttm.c [new file with mode: 0644]
drivers/gpu/drm/loongson/lsdc_ttm.h [new file with mode: 0644]
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/mediatek/mtk_disp_aal.c
drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
drivers/gpu/drm/mediatek/mtk_disp_color.c
drivers/gpu/drm/mediatek/mtk_disp_gamma.c
drivers/gpu/drm/mediatek/mtk_disp_merge.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_ethdr.c
drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_drv.h
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
drivers/gpu/drm/meson/meson_encoder_dsi.c
drivers/gpu/drm/meson/meson_encoder_hdmi.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/dp/dp_audio.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_phy.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/mxsfb/lcdif_drv.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/mxsfb/mxsfb_kms.c
drivers/gpu/drm/nouveau/Kbuild
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/include/nvif/if000c.h
drivers/gpu/drm/nouveau/include/nvif/vmm.h
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
drivers/gpu/drm/nouveau/include/nvkm/core/os.h
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_abi16.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_chan.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_exec.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_exec.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_gem.h
drivers/gpu/drm/nouveau/nouveau_mem.h
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nouveau_sched.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_sched.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_uvmm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_uvmm.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nvif/mmu.c
drivers/gpu/drm/nouveau/nvif/vmm.c
drivers/gpu/drm/nouveau/nvkm/core/intr.c
drivers/gpu/drm/nouveau/nvkm/core/object.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem.h
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
drivers/gpu/drm/panel/panel-auo-a030jtn01.c
drivers/gpu/drm/panel/panel-boe-himax8279d.c
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/panel/panel-dsi-cm.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
drivers/gpu/drm/panel/panel-himax-hx8394.c
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
drivers/gpu/drm/panel/panel-innolux-ej030na.c
drivers/gpu/drm/panel/panel-innolux-p079zca.c
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
drivers/gpu/drm/panel/panel-lvds.c
drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
drivers/gpu/drm/panel/panel-newvision-nv3051d.c
drivers/gpu/drm/panel/panel-newvision-nv3052c.c
drivers/gpu/drm/panel/panel-novatek-nt35510.c
drivers/gpu/drm/panel/panel-novatek-nt35560.c
drivers/gpu/drm/panel/panel-novatek-nt35950.c
drivers/gpu/drm/panel/panel-novatek-nt36523.c
drivers/gpu/drm/panel/panel-novatek-nt36672a.c
drivers/gpu/drm/panel/panel-novatek-nt39016.c
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/panel/panel-samsung-db7430.c
drivers/gpu/drm/panel/panel-samsung-ld9040.c
drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
drivers/gpu/drm/panel/panel-samsung-sofef00.c
drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-sitronix-st7701.c
drivers/gpu/drm/panel/panel-sitronix-st7703.c
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-truly-nt35597.c
drivers/gpu/drm/panel/panel-visionox-r66451.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-visionox-rm69299.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/pl111/pl111_versatile.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/radeon/Kconfig
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/clearstate_si.h
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fbdev.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_tv.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770_smc.c
drivers/gpu/drm/radeon/sislands_smc.h
drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rk3066_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/solomon/ssd130x.c
drivers/gpu/drm/solomon/ssd130x.h
drivers/gpu/drm/sprd/sprd_dpu.c
drivers/gpu/drm/sprd/sprd_drm.c
drivers/gpu/drm/sprd/sprd_dsi.c
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_frontend.c
drivers/gpu/drm/sun4i/sun4i_frontend.h
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_drc.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/fbdev.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/gr2d.c
drivers/gpu/drm/tegra/gr3d.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/nvdec.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/tests/Makefile
drivers/gpu/drm/tests/drm_client_modeset_test.c
drivers/gpu/drm/tests/drm_exec_test.c [new file with mode: 0644]
drivers/gpu/drm/tests/drm_framebuffer_test.c
drivers/gpu/drm/tests/drm_kunit_helpers.c
drivers/gpu/drm/tests/drm_modes_test.c
drivers/gpu/drm/tests/drm_probe_helper_test.c
drivers/gpu/drm/tidss/tidss_dispc.c
drivers/gpu/drm/tidss/tidss_dispc.h
drivers/gpu/drm/tidss/tidss_drv.c
drivers/gpu/drm/tidss/tidss_encoder.c
drivers/gpu/drm/tidss/tidss_encoder.h
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/tidss/tidss_plane.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tiny/ili9225.c
drivers/gpu/drm/tiny/ili9486.c
drivers/gpu/drm/tiny/ofdrm.c
drivers/gpu/drm/tiny/panel-mipi-dbi.c
drivers/gpu/drm/tiny/repaper.c
drivers/gpu/drm/tiny/simpledrm.c
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/tests/.kunitconfig [new file with mode: 0644]
drivers/gpu/drm/ttm/tests/Makefile [new file with mode: 0644]
drivers/gpu/drm/ttm/tests/ttm_device_test.c [new file with mode: 0644]
drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c [new file with mode: 0644]
drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h [new file with mode: 0644]
drivers/gpu/drm/ttm/tests/ttm_pool_test.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_resource.c
drivers/gpu/drm/tve200/tve200_drv.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/v3d/v3d_drv.c
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/vc4/tests/vc4_mock.c
drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_hvs.c
drivers/gpu/drm/vc4/vc4_txp.c
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_submit.c
drivers/gpu/drm/vkms/vkms_composer.c
drivers/gpu/drm/vkms/vkms_crtc.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_drv.h
drivers/gpu/drm/vkms/vkms_formats.c
drivers/gpu/drm/vkms/vkms_formats.h
drivers/gpu/drm/vkms/vkms_writeback.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xlnx/zynqmp_disp.c
drivers/gpu/drm/xlnx/zynqmp_dp.c
drivers/gpu/drm/xlnx/zynqmp_dpsub.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/context.c
drivers/gpu/host1x/dev.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-nvidia-shield.c
drivers/hid/hid-picolcd_fb.c
drivers/hid/i2c-hid/Kconfig
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/iommu/iommu-sva.c
drivers/iommu/iommu.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/media/pci/ivtv/ivtvfb.c
drivers/media/test-drivers/vivid/Kconfig
drivers/media/test-drivers/vivid/vivid-osd.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/qca/qca8k-8xxx.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_tc_lib.h
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/igc/igc_tsn.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/octeontx2/af/ptp.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/thermal.c
drivers/net/ethernet/microchip/Kconfig
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_mm.c
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
drivers/net/netdevsim/dev.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.h
drivers/net/wireless/mediatek/mt76/mt7921/dma.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/realtek/rtw89/debug.c
drivers/nvme/host/core.c
drivers/nvme/host/fault_inject.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/sysfs.c
drivers/nvme/host/zns.c
drivers/nvme/target/loop.c
drivers/nvme/target/passthru.c
drivers/of/device.c
drivers/of/property.c
drivers/perf/riscv_pmu.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-amd.h
drivers/pinctrl/renesas/pinctrl-rzg2l.c
drivers/pinctrl/renesas/pinctrl-rzv2m.c
drivers/platform/x86/amd/Makefile
drivers/platform/x86/amd/pmc-quirks.c [new file with mode: 0644]
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/amd/pmc.h [new file with mode: 0644]
drivers/platform/x86/amd/pmf/core.c
drivers/platform/x86/dell/dell-wmi-ddv.c
drivers/platform/x86/intel/int3472/clk_and_regulator.c
drivers/platform/x86/intel/tpmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/wmi.c
drivers/s390/net/ism_drv.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/scsi_debug.c
drivers/scsi/sd_zbc.c
drivers/scsi/storvsc_drv.c
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-s3c64xx.c
drivers/staging/fbtft/Kconfig
drivers/staging/fbtft/fbtft-core.c
drivers/staging/sm750fb/sm750.c
drivers/staging/sm750fb/sm750_accel.c
drivers/staging/sm750fb/sm750_cursor.c
drivers/staging/sm750fb/sm750_hw.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/Kconfig
drivers/video/Kconfig
drivers/video/backlight/backlight.c
drivers/video/backlight/bd6107.c
drivers/video/backlight/gpio_backlight.c
drivers/video/backlight/lv5207lp.c
drivers/video/console/Kconfig
drivers/video/fbdev/68328fb.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/acornfb.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/amifb.c
drivers/video/fbdev/arcfb.c
drivers/video/fbdev/asiliantfb.c
drivers/video/fbdev/atafb.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/radeon_backlight.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/bw2.c
drivers/video/fbdev/carminefb.c
drivers/video/fbdev/cg14.c
drivers/video/fbdev/cg3.c
drivers/video/fbdev/cg6.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/cirrusfb.c
drivers/video/fbdev/clps711x-fb.c
drivers/video/fbdev/cobalt_lcdfb.c
drivers/video/fbdev/controlfb.c
drivers/video/fbdev/core/Kconfig [new file with mode: 0644]
drivers/video/fbdev/core/Makefile
drivers/video/fbdev/core/fb_backlight.c [new file with mode: 0644]
drivers/video/fbdev/core/fb_chrdev.c [new file with mode: 0644]
drivers/video/fbdev/core/fb_info.c [new file with mode: 0644]
drivers/video/fbdev/core/fb_internal.h [new file with mode: 0644]
drivers/video/fbdev/core/fb_procfs.c [new file with mode: 0644]
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/core/fbsysfs.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/da8xx-fb.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/ep93xx-fb.c
drivers/video/fbdev/ffb.c
drivers/video/fbdev/fm2fb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/g364fb.c
drivers/video/fbdev/gbefb.c
drivers/video/fbdev/geode/Kconfig
drivers/video/fbdev/geode/gx1fb_core.c
drivers/video/fbdev/geode/gxfb_core.c
drivers/video/fbdev/geode/lxfb_core.c
drivers/video/fbdev/goldfishfb.c
drivers/video/fbdev/grvga.c
drivers/video/fbdev/gxt4500.c
drivers/video/fbdev/hecubafb.c
drivers/video/fbdev/hgafb.c
drivers/video/fbdev/hitfb.c
drivers/video/fbdev/hpfb.c
drivers/video/fbdev/hyperv_fb.c
drivers/video/fbdev/i740fb.c
drivers/video/fbdev/i810/i810_main.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/intelfb/intelfbdrv.c
drivers/video/fbdev/kyro/fbdev.c
drivers/video/fbdev/leo.c
drivers/video/fbdev/macfb.c
drivers/video/fbdev/matrox/matroxfb_crtc2.c
drivers/video/fbdev/maxinefb.c
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/metronomefb.c
drivers/video/fbdev/mmp/fb/Kconfig
drivers/video/fbdev/mmp/fb/mmpfb.c
drivers/video/fbdev/mx3fb.c
drivers/video/fbdev/neofb.c
drivers/video/fbdev/nvidia/nv_backlight.c
drivers/video/fbdev/nvidia/nvidia.c
drivers/video/fbdev/ocfb.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap/Kconfig
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/Kconfig
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
drivers/video/fbdev/p9100.c
drivers/video/fbdev/platinumfb.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pm3fb.c
drivers/video/fbdev/pmag-aa-fb.c
drivers/video/fbdev/pmag-ba-fb.c
drivers/video/fbdev/pmagb-b-fb.c
drivers/video/fbdev/ps3fb.c
drivers/video/fbdev/pvr2fb.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/q40fb.c
drivers/video/fbdev/riva/fbdev.c
drivers/video/fbdev/s1d13xxxfb.c
drivers/video/fbdev/s3c-fb.c
drivers/video/fbdev/sa1100fb.c
drivers/video/fbdev/savage/savagefb_driver.c
drivers/video/fbdev/sh7760fb.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/simplefb.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/skeletonfb.c
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/sm712fb.c
drivers/video/fbdev/smscufx.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/sstfb.c
drivers/video/fbdev/sunxvr1000.c
drivers/video/fbdev/sunxvr2500.c
drivers/video/fbdev/sunxvr500.c
drivers/video/fbdev/tcx.c
drivers/video/fbdev/tdfxfb.c
drivers/video/fbdev/tgafb.c
drivers/video/fbdev/tridentfb.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/uvesafb.c
drivers/video/fbdev/valkyriefb.c
drivers/video/fbdev/vermilion/vermilion.c
drivers/video/fbdev/vesafb.c
drivers/video/fbdev/vfb.c
drivers/video/fbdev/vga16fb.c
drivers/video/fbdev/via/viafbdev.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/video/fbdev/wm8505fb.c
drivers/video/fbdev/xen-fbfront.c
drivers/video/fbdev/xilinxfb.c
drivers/video/logo/Kconfig
drivers/xen/grant-dma-ops.c
fs/erofs/decompressor.c
fs/erofs/inode.c
fs/erofs/zdata.c
fs/smb/client/cifsglob.h
fs/smb/client/cifssmb.c
fs/smb/client/connect.c
fs/smb/client/dfs.c
fs/smb/client/file.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2transport.c
include/asm-generic/vmlinux.lds.h
include/drm/bridge/dw_hdmi.h
include/drm/drm_bridge.h
include/drm/drm_crtc.h
include/drm/drm_debugfs.h
include/drm/drm_drv.h
include/drm/drm_exec.h [new file with mode: 0644]
include/drm/drm_file.h
include/drm/drm_gem.h
include/drm/drm_gem_dma_helper.h
include/drm/drm_gem_shmem_helper.h
include/drm/drm_gem_vram_helper.h
include/drm/drm_gpuva_mgr.h [new file with mode: 0644]
include/drm/drm_kunit_helpers.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_panel.h
include/drm/drm_plane.h
include/drm/drm_prime.h
include/drm/drm_syncobj.h
include/drm/drm_sysfs.h
include/drm/gpu_scheduler.h
include/drm/task_barrier.h
include/drm/ttm/ttm_bo.h
include/linux/blk-crypto-profile.h
include/linux/blk-mq.h
include/linux/dma-fence.h
include/linux/efi.h
include/linux/fb.h
include/linux/ftrace.h
include/linux/ism.h
include/linux/nvme.h
include/linux/platform_data/bd6107.h
include/linux/platform_data/gpio_backlight.h
include/linux/platform_data/lv5207lp.h
include/linux/psi.h
include/linux/psi_types.h
include/linux/rethook.h
include/linux/sysfb.h
include/net/netfilter/nf_conntrack_tuple.h
include/net/netfilter/nf_tables.h
include/net/pkt_sched.h
include/soc/mscc/ocelot.h
include/uapi/drm/drm.h
include/uapi/drm/drm_mode.h
include/uapi/drm/ivpu_accel.h
include/uapi/drm/nouveau_drm.h
include/uapi/drm/virtgpu_drm.h
include/uapi/scsi/scsi_bsg_ufs.h
include/ufs/ufs.h
io_uring/io_uring.c
kernel/bpf/cpumap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/kallsyms.c
kernel/kprobes.c
kernel/power/hibernate.c
kernel/power/qos.c
kernel/sched/fair.c
kernel/sched/psi.c
kernel/trace/fgraph.c
kernel/trace/fprobe.c
kernel/trace/ftrace.c
kernel/trace/ftrace_internal.h
kernel/trace/rethook.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_eprobe.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_user.c
kernel/trace/trace_kprobe_selftest.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe_kernel.h
kernel/trace/trace_probe_tmpl.h
kernel/trace/trace_uprobe.c
lib/iov_iter.c
net/ceph/messenger_v2.c
net/core/net-traces.c
net/core/skbuff.c
net/core/xdp.c
net/ipv6/addrconf.c
net/ipv6/icmp.c
net/ipv6/udp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_byteorder.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_immediate.c
net/netfilter/nft_objref.c
net/sched/act_api.c
net/sched/cls_flower.c
net/sched/cls_fw.c
net/sched/sch_qfq.c
net/wireless/util.c
samples/Kconfig
samples/ftrace/ftrace-direct-modify.c
samples/ftrace/ftrace-direct-multi-modify.c
samples/ftrace/ftrace-direct-multi.c
samples/ftrace/ftrace-direct-too.c
samples/ftrace/ftrace-direct.c
samples/vfio-mdev/mdpy-fb.c
scripts/kallsyms.c
tools/objtool/elf.c
tools/testing/selftests/bpf/prog_tests/async_stack_depth.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/async_stack_depth.c [new file with mode: 0644]
tools/testing/selftests/hid/vmtest.sh
tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json
tools/testing/selftests/user_events/dyn_test.c

index 1bce47a7f2ce247bef181ac8440c67788158f84e..89b7f33cd33036bb35b84c02ced0e63f223a74b5 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -246,6 +246,7 @@ John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
 <jon.toppins+linux@gmail.com> <jtoppins@redhat.com>
+Jonas Gorski <jonas.gorski@gmail.com> <jogo@openwrt.org>
 Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
index d5f44fc5b9dca58561a5c472c2990053e45ee736..e487f969a15ea937ee17d58959afd0f65fa44af7 100644 (file)
@@ -994,7 +994,7 @@ Description:        This file shows the amount of physical memory needed
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_lvl
 What:          /sys/bus/platform/devices/*.ufs/rpm_lvl
 Date:          September 2014
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry could be used to set or show the UFS device
                runtime power management level. The current driver
                implementation supports 7 levels with next target states:
@@ -1021,7 +1021,7 @@ Description:      This entry could be used to set or show the UFS device
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
 What:          /sys/bus/platform/devices/*.ufs/rpm_target_dev_state
 Date:          February 2018
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry shows the target power mode of an UFS device
                for the chosen runtime power management level.
 
@@ -1030,7 +1030,7 @@ Description:      This entry shows the target power mode of an UFS device
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_target_link_state
 What:          /sys/bus/platform/devices/*.ufs/rpm_target_link_state
 Date:          February 2018
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry shows the target state of an UFS UIC link
                for the chosen runtime power management level.
 
@@ -1039,7 +1039,7 @@ Description:      This entry shows the target state of an UFS UIC link
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_lvl
 What:          /sys/bus/platform/devices/*.ufs/spm_lvl
 Date:          September 2014
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry could be used to set or show the UFS device
                system power management level. The current driver
                implementation supports 7 levels with next target states:
@@ -1066,7 +1066,7 @@ Description:      This entry could be used to set or show the UFS device
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
 What:          /sys/bus/platform/devices/*.ufs/spm_target_dev_state
 Date:          February 2018
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry shows the target power mode of an UFS device
                for the chosen system power management level.
 
@@ -1075,7 +1075,7 @@ Description:      This entry shows the target power mode of an UFS device
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_target_link_state
 What:          /sys/bus/platform/devices/*.ufs/spm_target_link_state
 Date:          February 2018
-Contact:       Subhash Jadavani <subhashj@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This entry shows the target state of an UFS UIC link
                for the chosen system power management level.
 
@@ -1084,7 +1084,7 @@ Description:      This entry shows the target state of an UFS UIC link
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_enable
 What:          /sys/bus/platform/devices/*.ufs/monitor/monitor_enable
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the status of performance monitor enablement
                and it can be used to start/stop the monitor. When the monitor
                is stopped, the performance data collected is also cleared.
@@ -1092,7 +1092,7 @@ Description:      This file shows the status of performance monitor enablement
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_chunk_size
 What:          /sys/bus/platform/devices/*.ufs/monitor/monitor_chunk_size
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file tells the monitor to focus on requests transferring
                data of specific chunk size (in Bytes). 0 means any chunk size.
                It can only be changed when monitor is disabled.
@@ -1100,7 +1100,7 @@ Description:      This file tells the monitor to focus on requests transferring
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_sectors
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_total_sectors
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how many sectors (in 512 Bytes) have been
                sent from device to host after monitor gets started.
 
@@ -1109,7 +1109,7 @@ Description:      This file shows how many sectors (in 512 Bytes) have been
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_busy
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_total_busy
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how long (in micro seconds) has been spent
                sending data from device to host after monitor gets started.
 
@@ -1118,7 +1118,7 @@ Description:      This file shows how long (in micro seconds) has been spent
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_nr_requests
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_nr_requests
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how many read requests have been sent after
                monitor gets started.
 
@@ -1127,7 +1127,7 @@ Description:      This file shows how many read requests have been sent after
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_max
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_max
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the maximum latency (in micro seconds) of
                read requests after monitor gets started.
 
@@ -1136,7 +1136,7 @@ Description:      This file shows the maximum latency (in micro seconds) of
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_min
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_min
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the minimum latency (in micro seconds) of
                read requests after monitor gets started.
 
@@ -1145,7 +1145,7 @@ Description:      This file shows the minimum latency (in micro seconds) of
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_avg
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_avg
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the average latency (in micro seconds) of
                read requests after monitor gets started.
 
@@ -1154,7 +1154,7 @@ Description:      This file shows the average latency (in micro seconds) of
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_sum
 What:          /sys/bus/platform/devices/*.ufs/monitor/read_req_latency_sum
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the total latency (in micro seconds) of
                read requests sent after monitor gets started.
 
@@ -1163,7 +1163,7 @@ Description:      This file shows the total latency (in micro seconds) of
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_sectors
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_total_sectors
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how many sectors (in 512 Bytes) have been sent
                from host to device after monitor gets started.
 
@@ -1172,7 +1172,7 @@ Description:      This file shows how many sectors (in 512 Bytes) have been sent
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_busy
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_total_busy
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how long (in micro seconds) has been spent
                sending data from host to device after monitor gets started.
 
@@ -1181,7 +1181,7 @@ Description:      This file shows how long (in micro seconds) has been spent
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_nr_requests
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_nr_requests
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows how many write requests have been sent after
                monitor gets started.
 
@@ -1190,7 +1190,7 @@ Description:      This file shows how many write requests have been sent after
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_max
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_max
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the maximum latency (in micro seconds) of write
                requests after monitor gets started.
 
@@ -1199,7 +1199,7 @@ Description:      This file shows the maximum latency (in micro seconds) of write
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_min
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_min
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the minimum latency (in micro seconds) of write
                requests after monitor gets started.
 
@@ -1208,7 +1208,7 @@ Description:      This file shows the minimum latency (in micro seconds) of write
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_avg
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_avg
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the average latency (in micro seconds) of write
                requests after monitor gets started.
 
@@ -1217,7 +1217,7 @@ Description:      This file shows the average latency (in micro seconds) of write
 What:          /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_sum
 What:          /sys/bus/platform/devices/*.ufs/monitor/write_req_latency_sum
 Date:          January 2021
-Contact:       Can Guo <cang@codeaurora.org>
+Contact:       Can Guo <quic_cang@quicinc.com>
 Description:   This file shows the total latency (in micro seconds) of write
                requests after monitor gets started.
 
@@ -1226,7 +1226,7 @@ Description:      This file shows the total latency (in micro seconds) of write
 What:          /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_presv_us_en
 What:          /sys/bus/platform/devices/*.ufs/device_descriptor/wb_presv_us_en
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows if preserve user-space was configured
 
                The file is read only.
@@ -1234,7 +1234,7 @@ Description:      This entry shows if preserve user-space was configured
 What:          /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_shared_alloc_units
 What:          /sys/bus/platform/devices/*.ufs/device_descriptor/wb_shared_alloc_units
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the shared allocated units of WB buffer
 
                The file is read only.
@@ -1242,7 +1242,7 @@ Description:      This entry shows the shared allocated units of WB buffer
 What:          /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_type
 What:          /sys/bus/platform/devices/*.ufs/device_descriptor/wb_type
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the configured WB type.
                0x1 for shared buffer mode. 0x0 for dedicated buffer mode.
 
@@ -1251,7 +1251,7 @@ Description:      This entry shows the configured WB type.
 What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_buff_cap_adj
 What:          /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_buff_cap_adj
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the total user-space decrease in shared
                buffer mode.
                The value of this parameter is 3 for TLC NAND when SLC mode
@@ -1262,7 +1262,7 @@ Description:      This entry shows the total user-space decrease in shared
 What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_alloc_units
 What:          /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_alloc_units
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the Maximum total WriteBooster Buffer size
                which is supported by the entire device.
 
@@ -1271,7 +1271,7 @@ Description:      This entry shows the Maximum total WriteBooster Buffer size
 What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_max_wb_luns
 What:          /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_max_wb_luns
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the maximum number of luns that can support
                WriteBooster.
 
@@ -1280,7 +1280,7 @@ Description:      This entry shows the maximum number of luns that can support
 What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_red_type
 What:          /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_red_type
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   The supportability of user space reduction mode
                and preserve user space mode.
                00h: WriteBooster Buffer can be configured only in
@@ -1295,7 +1295,7 @@ Description:      The supportability of user space reduction mode
 What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/wb_sup_wb_type
 What:          /sys/bus/platform/devices/*.ufs/geometry_descriptor/wb_sup_wb_type
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   The supportability of WriteBooster Buffer type.
 
                ===  ==========================================================
@@ -1310,7 +1310,7 @@ Description:      The supportability of WriteBooster Buffer type.
 What:          /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable
 What:          /sys/bus/platform/devices/*.ufs/flags/wb_enable
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the status of WriteBooster.
 
                == ============================
@@ -1323,7 +1323,7 @@ Description:      This entry shows the status of WriteBooster.
 What:          /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_en
 What:          /sys/bus/platform/devices/*.ufs/flags/wb_flush_en
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows if flush is enabled.
 
                == =================================
@@ -1336,7 +1336,7 @@ Description:      This entry shows if flush is enabled.
 What:          /sys/bus/platform/drivers/ufshcd/*/flags/wb_flush_during_h8
 What:          /sys/bus/platform/devices/*.ufs/flags/wb_flush_during_h8
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   Flush WriteBooster Buffer during hibernate state.
 
                == =================================================
@@ -1351,7 +1351,7 @@ Description:      Flush WriteBooster Buffer during hibernate state.
 What:          /sys/bus/platform/drivers/ufshcd/*/attributes/wb_avail_buf
 What:          /sys/bus/platform/devices/*.ufs/attributes/wb_avail_buf
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the amount of unused WriteBooster buffer
                available.
 
@@ -1360,7 +1360,7 @@ Description:      This entry shows the amount of unused WriteBooster buffer
 What:          /sys/bus/platform/drivers/ufshcd/*/attributes/wb_cur_buf
 What:          /sys/bus/platform/devices/*.ufs/attributes/wb_cur_buf
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the amount of unused current buffer.
 
                The file is read only.
@@ -1368,7 +1368,7 @@ Description:      This entry shows the amount of unused current buffer.
 What:          /sys/bus/platform/drivers/ufshcd/*/attributes/wb_flush_status
 What:          /sys/bus/platform/devices/*.ufs/attributes/wb_flush_status
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the flush operation status.
 
 
@@ -1385,7 +1385,7 @@ Description:      This entry shows the flush operation status.
 What:          /sys/bus/platform/drivers/ufshcd/*/attributes/wb_life_time_est
 What:          /sys/bus/platform/devices/*.ufs/attributes/wb_life_time_est
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows an indication of the WriteBooster Buffer
                lifetime based on the amount of performed program/erase cycles
 
@@ -1399,7 +1399,7 @@ Description:      This entry shows an indication of the WriteBooster Buffer
 
 What:          /sys/class/scsi_device/*/device/unit_descriptor/wb_buf_alloc_units
 Date:          June 2020
-Contact:       Asutosh Das <asutoshd@codeaurora.org>
+Contact:       Asutosh Das <quic_asutoshd@quicinc.com>
 Description:   This entry shows the configured size of WriteBooster buffer.
                0400h corresponds to 4GB.
 
index 0521261b04a9c08d51d46b5b8113934cfc7620fa..ae894d996d21fc179f887ae6497ea51a8ad86076 100644 (file)
@@ -49,6 +49,9 @@ properties:
     description: |
         OF device-tree gpio specification for RSTX pin(active low system reset)
 
+  interrupts:
+    maxItems: 1
+
   toshiba,hpd-pin:
     $ref: /schemas/types.yaml#/definitions/uint32
     enum:
index c5d1df680858f8b9ba10ce359e003e62ad050123..e7ab6224b52e09437207a7a6bcd2d0af2bc31cd5 100644 (file)
@@ -18,6 +18,7 @@ properties:
       - enum:
           - bananapi,lhr050h41
           - feixin,k101-im2byl02
+          - tdo,tl050hdv35
           - wanchanglong,w552946aba
       - const: ilitek,ili9881c
 
index 929fe046d1e7e4c3532f98ee831da919334cea43..9f1016551e0b27e4518277ccc9b314b3245bb18f 100644 (file)
@@ -40,6 +40,12 @@ properties:
     items:
       - enum:
           - auo,b101ew05
+          # Chunghwa Picture Tubes Ltd. 7" WXGA (800x1280) TFT LCD LVDS panel
+          - chunghwa,claa070wp03xg
+          # HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel
+          - hannstar,hsd101pww2
+          # Hydis Technologies 7" WXGA (800x1280) TFT LCD LVDS panel
+          - hydis,hv070wx2-1e0
           - tbs,a711-panel
 
       - const: panel-lvds
index 1d4936fc51828c2aa252bc0bac4198f5db7322ee..25b4589d4a5811f25a5beaa9c0d8ad37401ad046 100644 (file)
@@ -103,8 +103,6 @@ properties:
       - cdtech,s070wv95-ct16
         # Chefree CH101OLHLWH-002 10.1" (1280x800) color TFT LCD panel
       - chefree,ch101olhlwh-002
-        # Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
-      - chunghwa,claa070wp03xg
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
       - chunghwa,claa101wa01a
         # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
@@ -168,8 +166,6 @@ properties:
       - hannstar,hsd070pww1
         # HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
       - hannstar,hsd100pxn1
-        # HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel
-      - hannstar,hsd101pww2
         # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
       - hit,tx23d38vm0caa
         # InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
@@ -196,6 +192,8 @@ properties:
       - innolux,n116bge
         # InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
       - innolux,n125hce-gn1
+        # InnoLux 15.6" FHD (1920x1080) TFT LCD panel
+      - innolux,g156hce-l01
         # InnoLux 15.6" WXGA TFT LCD panel
       - innolux,n156bge-l21
         # Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
index fa6556363ccacb46236ddf6ec2d83946904d7e79..ef162b51d010de233c4e7d660732670f249bfc30 100644 (file)
@@ -15,17 +15,26 @@ allOf:
 
 properties:
   compatible:
-    const: sitronix,st7789v
+    enum:
+      - edt,et028013dma
+      - inanbo,t28cp45tn89-v17
+      - jasonic,jt240mhqs-hwt-ek-e3
+      - sitronix,st7789v
 
   reg: true
   reset-gpios: true
   power-supply: true
   backlight: true
   port: true
+  rotation: true
 
   spi-cpha: true
   spi-cpol: true
 
+  spi-rx-bus-width:
+    minimum: 0
+    maximum: 1
+
   dc-gpios:
     maxItems: 1
     description: DCX pin, Display data/command selection pin in parallel interface
@@ -33,7 +42,6 @@ properties:
 required:
   - compatible
   - reg
-  - reset-gpios
   - power-supply
 
 unevaluatedProperties: false
@@ -52,6 +60,7 @@ examples:
             reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
             backlight = <&pwm_bl>;
             power-supply = <&power>;
+            rotation = <180>;
             spi-max-frequency = <100000>;
             spi-cpol;
             spi-cpha;
diff --git a/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml b/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml
new file mode 100644 (file)
index 0000000..d817f99
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/startek,kd070fhfid015.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Startek Electronic Technology Co. kd070fhfid015 7 inch TFT LCD panel
+
+maintainers:
+  - Alexandre Mergnat <amergnat@baylibre.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: startek,kd070fhfid015
+
+  enable-gpios: true
+
+  iovcc-supply:
+    description: Reference to the regulator powering the panel IO pins.
+
+  reg:
+    maxItems: 1
+    description: DSI virtual channel
+
+  reset-gpios: true
+
+  port: true
+
+  power-supply: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - enable-gpios
+  - iovcc-supply
+  - reg
+  - reset-gpios
+  - port
+  - power-supply
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "startek,kd070fhfid015";
+            reg = <0>;
+            enable-gpios = <&pio 67 GPIO_ACTIVE_HIGH>;
+            reset-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
+            iovcc-supply = <&mt6357_vsim1_reg>;
+            power-supply = <&vsys_lcm_reg>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&dsi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml b/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml
new file mode 100644 (file)
index 0000000..6ba3236
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,r66451.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox R66451 AMOLED DSI Panel
+
+maintainers:
+  - Jessica Zhang <quic_jesszhan@quicinc.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: visionox,r66451
+
+  reg:
+    maxItems: 1
+    description: DSI virtual channel
+
+  vddio-supply: true
+  vdd-supply: true
+  port: true
+  reset-gpios: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - vddio-supply
+  - vdd-supply
+  - reset-gpios
+  - port
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "visionox,r66451";
+            reg = <0>;
+            vddio-supply = <&vreg_l12c_1p8>;
+            vdd-supply = <&vreg_l13c_3p0>;
+
+            reset-gpios = <&tlmm 24 GPIO_ACTIVE_LOW>;
+
+            port {
+                panel0_in: endpoint {
+                    remote-endpoint = <&dsi0_out>;
+                };
+            };
+        };
+    };
+...
index 94bb5ef567c638f0525fb0285c854c6f9b35eb95..20e2bd15d4d297889b5114ad8aa3f5e536b1d11f 100644 (file)
@@ -49,15 +49,15 @@ properties:
 
   solomon,height:
     $ref: /schemas/types.yaml#/definitions/uint32
-    default: 16
     description:
-      Height in pixel of the screen driven by the controller
+      Height in pixel of the screen driven by the controller.
+      The default value is controller-dependent.
 
   solomon,width:
     $ref: /schemas/types.yaml#/definitions/uint32
-    default: 96
     description:
-      Width in pixel of the screen driven by the controller
+      Width in pixel of the screen driven by the controller.
+      The default value is controller-dependent.
 
   solomon,page-offset:
     $ref: /schemas/types.yaml#/definitions/uint32
@@ -157,6 +157,10 @@ allOf:
             const: sinowealth,sh1106
     then:
       properties:
+        width:
+          default: 132
+        height:
+          default: 64
         solomon,dclk-div:
           default: 1
         solomon,dclk-frq:
@@ -171,6 +175,10 @@ allOf:
               - solomon,ssd1305
     then:
       properties:
+        width:
+          default: 132
+        height:
+          default: 64
         solomon,dclk-div:
           default: 1
         solomon,dclk-frq:
@@ -185,6 +193,10 @@ allOf:
               - solomon,ssd1306
     then:
       properties:
+        width:
+          default: 128
+        height:
+          default: 64
         solomon,dclk-div:
           default: 1
         solomon,dclk-frq:
@@ -199,6 +211,10 @@ allOf:
               - solomon,ssd1307
     then:
       properties:
+        width:
+          default: 128
+        height:
+          default: 39
         solomon,dclk-div:
           default: 2
         solomon,dclk-frq:
@@ -215,6 +231,10 @@ allOf:
               - solomon,ssd1309
     then:
       properties:
+        width:
+          default: 128
+        height:
+          default: 64
         solomon,dclk-div:
           default: 1
         solomon,dclk-frq:
index b6b402f1616110d4cca95ecccf62886020ac0f02..ae09cd3cbce1fcb138459ab16e5872190fd9e5de 100644 (file)
@@ -12,14 +12,18 @@ maintainers:
   - Tomi Valkeinen <tomi.valkeinen@ti.com>
 
 description: |
-  The AM65x TI Keystone Display SubSystem with two output ports and
-  two video planes. The first video port supports OLDI and the second
-  supports DPI format. The fist plane is full video plane with all
-  features and the second is a "lite plane" without scaling support.
+  The AM625 and AM65x TI Keystone Display SubSystem with two output
+  ports and two video planes. In AM65x DSS, the first video port
+  supports 1 OLDI TX and in AM625 DSS, the first video port output is
+  internally routed to 2 OLDI TXes. The second video port supports DPI
+  format. The first plane is full video plane with all features and the
+  second is a "lite plane" without scaling support.
 
 properties:
   compatible:
-    const: ti,am65x-dss
+    enum:
+      - ti,am625-dss
+      - ti,am65x-dss
 
   reg:
     description:
@@ -80,7 +84,9 @@ properties:
       port@0:
         $ref: /schemas/graph.yaml#/properties/port
         description:
-          The DSS OLDI output port node form video port 1
+          For AM65x DSS, the OLDI output port node from video port 1.
+          For AM625 DSS, the internal DPI output port node from video
+          port 1.
 
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
index 05e6f2df604c8039ceeb08cf9343944ea6c1dc83..3e2d216c6432b2aa77a3aa8717383ff17d02710e 100644 (file)
@@ -13,6 +13,9 @@ description:
   Supports the Elan eKTH6915 touchscreen controller.
   This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
 
+allOf:
+  - $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
 properties:
   compatible:
     items:
@@ -24,6 +27,8 @@ properties:
   interrupts:
     maxItems: 1
 
+  panel: true
+
   reset-gpios:
     description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.
 
index 1edad1da1196d043e571fd4ed64425187c03b618..358cb8275bf1879b2fc616296a09dc0ad069dc2c 100644 (file)
@@ -14,6 +14,9 @@ description:
   This touchscreen uses the i2c-hid protocol but has some non-standard
   power sequencing required.
 
+allOf:
+  - $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
 properties:
   compatible:
     oneOf:
@@ -30,6 +33,8 @@ properties:
   interrupts:
     maxItems: 1
 
+  panel: true
+
   reset-gpios:
     true
 
index 7156b08f76453b49040ad47e53c30bcc0fde90f7..138caad96a298f4335526b1fc80d85d84dfdc48c 100644 (file)
@@ -44,6 +44,8 @@ properties:
     description: HID descriptor address
     $ref: /schemas/types.yaml#/definitions/uint32
 
+  panel: true
+
   post-power-on-delay-ms:
     description: Time required by the device after enabling its regulators
       or powering it on, before it is ready for communication.
index 895592da962634437a8f22519f15827e7f328923..431c13335c402f50a770988676bb0c20fb76da7c 100644 (file)
@@ -10,6 +10,13 @@ maintainers:
   - Dmitry Torokhov <dmitry.torokhov@gmail.com>
 
 properties:
+  panel:
+    description: If this touchscreen is integrally connected to a panel, this
+      is a reference to that panel. The presence of this reference indicates
+      that the touchscreen should be power sequenced together with the panel
+      and that they may share power and/or reset signals.
+    $ref: /schemas/types.yaml#/definitions/phandle
+
   touchscreen-min-x:
     description: minimum x coordinate reported
     $ref: /schemas/types.yaml#/definitions/uint32
index af60bf1a66648c32ad0ef20565f1014e7f84664d..1dfafc339ddd5d826b55ebaa1499fe05fefe5ea7 100644 (file)
@@ -617,6 +617,8 @@ patternProperties:
     description: Imagination Technologies Ltd.
   "^imi,.*":
     description: Integrated Micro-Electronics Inc.
+  "^inanbo,.*":
+    description: Shenzhen INANBO Electronic Technology Co., Ltd.
   "^incircuit,.*":
     description: In-Circuit GmbH
   "^indiedroid,.*":
@@ -675,6 +677,8 @@ patternProperties:
     description: iWave Systems Technologies Pvt. Ltd.
   "^jadard,.*":
     description: Jadard Technology Inc.
+  "^jasonic,.*":
+    description: Jasonic Technology Ltd.
   "^jdi,.*":
     description: Japan Display Inc.
   "^jedec,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/loongson,ls1x-wdt.yaml b/Documentation/devicetree/bindings/watchdog/loongson,ls1x-wdt.yaml
new file mode 100644 (file)
index 0000000..81690d4
--- /dev/null
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/loongson,ls1x-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1 Watchdog Timer
+
+maintainers:
+  - Keguang Zhang <keguang.zhang@gmail.com>
+
+allOf:
+  - $ref: watchdog.yaml#
+
+properties:
+  compatible:
+    enum:
+      - loongson,ls1b-wdt
+      - loongson,ls1c-wdt
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/loongson,ls1x-clk.h>
+    watchdog: watchdog@1fe5c060 {
+        compatible = "loongson,ls1b-wdt";
+        reg = <0x1fe5c060 0xc>;
+
+        clocks = <&clkc LS1X_CLKID_APB>;
+    };
diff --git a/Documentation/gpu/amdgpu/flashing.rst b/Documentation/gpu/amdgpu/flashing.rst
new file mode 100644 (file)
index 0000000..bd745c4
--- /dev/null
@@ -0,0 +1,33 @@
+=======================
+ dGPU firmware flashing
+=======================
+
+IFWI
+----
+Flashing the dGPU integrated firmware image (IFWI) is supported by GPUs that
+use the PSP to orchestrate the update (Navi3x or newer GPUs).
+For supported GPUs, `amdgpu` will export a series of sysfs files that can be
+used for the flash process.
+
+The IFWI flash process is:
+
+1. Ensure the IFWI image is intended for the dGPU on the system.
+2. "Write" the IFWI image to the sysfs file `psp_vbflash`. This will stage the IFWI in memory.
+3. "Read" from the `psp_vbflash` sysfs file to initiate the flash process.
+4. Poll the `psp_vbflash_status` sysfs file to determine when the flash process completes.
+
+USB-C PD F/W
+------------
+On GPUs that support flashing an updated USB-C PD firmware image, the process
+is done using the `usbc_pd_fw` sysfs file.
+
+* Reading the file will provide the current firmware version.
+* Writing the name of a firmware payload stored in `/lib/firmware/amdgpu` to the sysfs file will initiate the flash process.
+
+The firmware payload stored in `/lib/firmware/amdgpu` can be named any name
+as long as it doesn't conflict with other existing binaries that are used by
+`amdgpu`.
+
+sysfs files
+-----------
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 03c2966cae7980518ae9eab8e82d94c472a801b1..912e699fd37313d51f9f3d4a4688d6546d97a4b5 100644 (file)
@@ -10,6 +10,7 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
    module-parameters
    driver-core
    display/index
+   flashing
    xgmi
    ras
    thermal
index 4411e6919a3dfef69507abfbfa05d9a48e86d0d6..c08bcbb95fb30d810db1c3827b06af691cca460c 100644 (file)
@@ -6,3 +6,14 @@ drm/i915 uAPI
 =============
 
 .. kernel-doc:: include/uapi/drm/i915_drm.h
+
+drm/nouveau uAPI
+================
+
+VM_BIND / EXEC uAPI
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c
+    :doc: Overview
+
+.. kernel-doc:: include/uapi/drm/nouveau_drm.h
index a79fd3549ff8c267c195d225c0d2c722803c7b56..c19b34b1c0edf9ca6e1fc695cb6d20125273683a 100644 (file)
@@ -466,6 +466,42 @@ DRM MM Range Allocator Function References
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :export:
 
+DRM GPU VA Manager
+==================
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
+   :doc: Overview
+
+Split and Merge
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
+   :doc: Split and Merge
+
+Locking
+-------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
+   :doc: Locking
+
+Examples
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
+   :doc: Examples
+
+DRM GPU VA Manager Function References
+--------------------------------------
+
+.. kernel-doc:: include/drm/drm_gpuva_mgr.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
+   :export:
+
 DRM Buddy Allocator
 ===================
 
@@ -481,8 +517,10 @@ DRM Cache Handling and Fast WC memcpy()
 .. kernel-doc:: drivers/gpu/drm/drm_cache.c
    :export:
 
+.. _drm_sync_objects:
+
 DRM Sync Objects
-===========================
+================
 
 .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
    :doc: Overview
@@ -493,6 +531,18 @@ DRM Sync Objects
 .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
    :export:
 
+DRM Execution context
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_exec.c
+   :doc: Overview
+
+.. kernel-doc:: include/drm/drm_exec.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_exec.c
+   :export:
+
 GPU Scheduler
 =============
 
index d630f15ab7958f95a879b4931c47a7279d071d6d..ec086e7a43ffdcc08530e762c4bacc5741ea4ef6 100644 (file)
@@ -135,9 +135,13 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
 drm_i915_context_engines_parallel_submit to the uAPI to implement this
 extension.
 
+.. c:namespace-push:: rfc
+
 .. kernel-doc:: include/uapi/drm/i915_drm.h
         :functions: i915_context_engines_parallel_submit
 
+.. c:namespace-pop::
+
 Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
 -------------------------------------------------------------------
 Contexts that have been configured with the 'set_parallel' extension can only
index 68bdafa0284f55f6c8933c8bf4ae6b0c5618dca5..139980487ccff023df9e5d2588e07f796ecff597 100644 (file)
@@ -49,14 +49,18 @@ converted over. Modern compositors like Wayland or Surfaceflinger on Android
 really want an atomic modeset interface, so this is all about the bright
 future.
 
-There is a conversion guide for atomic and all you need is a GPU for a
-non-converted driver (again virtual HW drivers for KVM are still all
-suitable).
+There is a conversion guide for atomic [1]_ and all you need is a GPU for a
+non-converted driver.  The "Atomic mode setting design overview" series [2]_
+[3]_ at LWN.net can also be helpful.
 
 As part of this drivers also need to convert to universal plane (which means
 exposing primary & cursor as proper plane objects). But that's much easier to
 do by directly using the new atomic helper driver callbacks.
 
+  .. [1] https://blog.ffwll.ch/2014/11/atomic-modeset-support-for-kms-drivers.html
+  .. [2] https://lwn.net/Articles/653071/
+  .. [3] https://lwn.net/Articles/653466/
+
 Contact: Daniel Vetter, respective driver maintainers
 
 Level: Advanced
@@ -319,15 +323,6 @@ Contact: Daniel Vetter, Noralf Tronnes
 
 Level: Advanced
 
-struct drm_gem_object_funcs
----------------------------
-
-GEM objects can now have a function table instead of having the callbacks on the
-DRM driver struct. This is now the preferred way. Callbacks in drivers have been
-converted, except for struct drm_driver.gem_prime_mmap.
-
-Level: Intermediate
-
 connector register/unregister fixes
 -----------------------------------
 
@@ -452,6 +447,44 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
 
 Level: Starter
 
+Remove driver dependencies on FB_DEVICE
+---------------------------------------
+
+A number of fbdev drivers provide attributes via sysfs and therefore depend
+on CONFIG_FB_DEVICE to be selected. Review each driver and attempt to make
+any dependencies on CONFIG_FB_DEVICE optional. At the minimum, the respective
+code in the driver could be conditionalized via ifdef CONFIG_FB_DEVICE. Not
+all drivers might be able to drop CONFIG_FB_DEVICE.
+
+Contact: Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Starter
+
+Clean up checks for already prepared/enabled in panels
+------------------------------------------------------
+
+In a whole pile of panel drivers, we have code to make the
+prepare/unprepare/enable/disable callbacks behave as no-ops if they've already
+been called. To get some idea of the duplicated code, try::
+
+  git grep 'if.*>prepared' -- drivers/gpu/drm/panel
+  git grep 'if.*>enabled' -- drivers/gpu/drm/panel
+
+In the patch ("drm/panel: Check for already prepared/enabled in drm_panel")
+we've moved this check to the core. Now we can most definitely remove the
+check from the individual panels and save a pile of code.
+
+In adition to removing the check from the individual panels, it is believed
+that even the core shouldn't need this check and that should be considered
+an error if other code ever relies on this check. The check in the core
+currently prints a warning whenever something is relying on this check with
+dev_warn(). After a little while, we likely want to promote this to a
+WARN(1) to help encourage folks not to rely on this behavior.
+
+Contact: Douglas Anderson <dianders@chromium.org>
+
+Level: Starter/Intermediate
+
 
 Core refactorings
 =================
@@ -749,16 +782,16 @@ existing hardware. The new driver's call-back functions are filled from
 existing fbdev code.
 
 More complex fbdev drivers can be refactored step-by-step into a DRM
-driver with the help of the DRM fbconv helpers. [1] These helpers provide
+driver with the help of the DRM fbconv helpers [4]_. These helpers provide
 the transition layer between the DRM core infrastructure and the fbdev
 driver interface. Create a new DRM driver on top of the fbconv helpers,
 copy over the fbdev driver, and hook it up to the DRM code. Examples for
-several fbdev drivers are available at [1] and a tutorial of this process
-available at [2]. The result is a primitive DRM driver that can run X11
-and Weston.
+several fbdev drivers are available in Thomas Zimmermann's fbconv tree
+[4]_, as well as a tutorial of this process [5]_. The result is a primitive
+DRM driver that can run X11 and Weston.
 
- [1] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
- [2] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
.. [4] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
.. [5] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
 
 Contact: Thomas Zimmermann <tzimmermann@suse.de>
 
index 2397b31c0198cc6209315c82b8af266345f821bb..2ab843cde830cd31b4ba449409d5ffcd08c81fb8 100644 (file)
@@ -98,7 +98,7 @@ If you aren't subscribed to netdev and/or are simply unsure if
 repository link above for any new networking-related commits.  You may
 also check the following website for the current status:
 
-  http://vger.kernel.org/~davem/net-next.html
+  https://patchwork.hopto.org/net-next.html
 
 The ``net`` tree continues to collect fixes for the vX.Y content, and is
 fed back to Linus at regular (~weekly) intervals.  Meaning that the
index 19165ebd82baf8f3dcdff2861738bf2a99c811aa..933c715065d6f37757818523f1772b6edbd0708c 100644 (file)
@@ -49,7 +49,7 @@ The following keys are defined:
     privileged ISA, with the following known exceptions (more exceptions may be
     added, but only if it can be demonstrated that the user ABI is not broken):
 
-    * The :fence.i: instruction cannot be directly executed by userspace
+    * The ``fence.i`` instruction cannot be directly executed by userspace
       programs (it may still be executed in userspace via a
       kernel-controlled mechanism such as the vDSO).
 
index d8aa64e9c827cb30b76976c22467706165a976c2..bf963d91dd55b0f01bbe2f430e89f46387fb253a 100644 (file)
@@ -187,7 +187,8 @@ WMI method BatteryeRawAnalytics()
 
 Returns a buffer usually containg 12 blocks of analytics data.
 Those blocks contain:
-- block number starting with 0 (u8)
+
+- a block number starting with 0 (u8)
 - 31 bytes of unknown data
 
 .. note::
index 3be1bdfe8ecc7f9169df3657b4344c9ef1b8dc6f..cd882b87a3c607b522c54742f9e1b6bcccdc21e6 100644 (file)
@@ -4121,6 +4121,13 @@ F:       Documentation/devicetree/bindings/spi/brcm,bcm63xx-hsspi.yaml
 F:     drivers/spi/spi-bcm63xx-hsspi.c
 F:     drivers/spi/spi-bcmbca-hsspi.c
 
+BROADCOM BCM6348/BCM6358 SPI controller DRIVER
+M:     Jonas Gorski <jonas.gorski@gmail.com>
+L:     linux-spi@vger.kernel.org
+S:     Odd Fixes
+F:     Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
+F:     drivers/spi/spi-bcm63xx.c
+
 BROADCOM ETHERNET PHY DRIVERS
 M:     Florian Fainelli <florian.fainelli@broadcom.com>
 R:     Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
@@ -6143,10 +6150,9 @@ F:       kernel/dma/
 DMA-BUF HEAPS FRAMEWORK
 M:     Sumit Semwal <sumit.semwal@linaro.org>
 R:     Benjamin Gaignard <benjamin.gaignard@collabora.com>
-R:     Liam Mark <lmark@codeaurora.org>
-R:     Laura Abbott <labbott@redhat.com>
 R:     Brian Starkey <Brian.Starkey@arm.com>
 R:     John Stultz <jstultz@google.com>
+R:     T.J. Mercier <tjmercier@google.com>
 L:     linux-media@vger.kernel.org
 L:     dri-devel@lists.freedesktop.org
 L:     linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
@@ -6388,6 +6394,7 @@ F:        drivers/gpu/drm/aspeed/
 DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
 M:     Dave Airlie <airlied@redhat.com>
 R:     Thomas Zimmermann <tzimmermann@suse.de>
+R:     Jocelyn Falempe <jfalempe@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6539,6 +6546,7 @@ F:        drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
 DRM DRIVER FOR MGA G200 GRAPHICS CHIPS
 M:     Dave Airlie <airlied@redhat.com>
 R:     Thomas Zimmermann <tzimmermann@suse.de>
+R:     Jocelyn Falempe <jfalempe@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6938,6 +6946,13 @@ T:       git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/lima/
 F:     include/uapi/drm/lima_drm.h
 
+DRM DRIVERS FOR LOONGSON
+M:     Sui Jingfeng <suijingfeng@loongson.cn>
+L:     dri-devel@lists.freedesktop.org
+S:     Supported
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+F:     drivers/gpu/drm/loongson/
+
 DRM DRIVERS FOR MEDIATEK
 M:     Chun-Kuang Hu <chunkuang.hu@kernel.org>
 M:     Philipp Zabel <p.zabel@pengutronix.de>
@@ -7007,7 +7022,7 @@ F:        drivers/gpu/drm/stm
 
 DRM DRIVERS FOR TI KEYSTONE
 M:     Jyri Sarha <jyri.sarha@iki.fi>
-M:     Tomi Valkeinen <tomba@kernel.org>
+M:     Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -7018,16 +7033,18 @@ F:      drivers/gpu/drm/tidss/
 
 DRM DRIVERS FOR TI LCDC
 M:     Jyri Sarha <jyri.sarha@iki.fi>
-R:     Tomi Valkeinen <tomba@kernel.org>
+M:     Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
+T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/tilcdc/
 F:     drivers/gpu/drm/tilcdc/
 
 DRM DRIVERS FOR TI OMAP
-M:     Tomi Valkeinen <tomba@kernel.org>
+M:     Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
+T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/ti/
 F:     drivers/gpu/drm/omapdrm/
 
@@ -8672,8 +8689,11 @@ S:       Maintained
 F:     drivers/input/touchscreen/resistive-adc-touch.c
 
 GENERIC STRING LIBRARY
+M:     Kees Cook <keescook@chromium.org>
 R:     Andy Shevchenko <andy@kernel.org>
-S:     Maintained
+L:     linux-hardening@vger.kernel.org
+S:     Supported
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
 F:     include/linux/string.h
 F:     include/linux/string_choices.h
 F:     include/linux/string_helpers.h
@@ -13968,7 +13988,7 @@ T:      git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
 F:     drivers/soc/microchip/
 
 MICROCHIP SPI DRIVER
-M:     Tudor Ambarus <tudor.ambarus@linaro.org>
+M:     Ryan Wanner <ryan.wanner@microchip.com>
 S:     Supported
 F:     drivers/spi/spi-atmel.*
 
@@ -17543,6 +17563,7 @@ QUALCOMM ETHQOS ETHERNET DRIVER
 M:     Vinod Koul <vkoul@kernel.org>
 R:     Bhupesh Sharma <bhupesh.sharma@linaro.org>
 L:     netdev@vger.kernel.org
+L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/qcom,ethqos.yaml
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 47690c28456abc7c79aa96dd33ed28ed9f432d42..658ec2b8aa749bfdde92f6997877c8daa82362be 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index e2b9d2618c6727c64b0f1b0ed8788a36ce1049ff..e94655ef16bb30b7da4ea339d6e7fb0ab2e0956a 100644 (file)
@@ -5,6 +5,8 @@
 
 #include <linux/efi.h>
 #include <linux/memblock.h>
+#include <linux/screen_info.h>
+
 #include <asm/efi.h>
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
index 7856c3a3e35afb606d174452c0f938a793503d6b..a2511b30d0f676b5c70f4b98a78ea024ce79de19 100644 (file)
@@ -197,6 +197,8 @@ config ARM64
                    !CC_OPTIMIZE_FOR_SIZE)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_ARGS
+       select HAVE_SAMPLE_FTRACE_DIRECT
+       select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
        select HAVE_FAST_GUP
        select HAVE_FTRACE_MCOUNT_RECORD
index 21ac1c5c71d3bf00094732656e361aab3cbb76e5..ab158196480c391f78adc97649c5b17b524d2abc 100644 (file)
@@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs
 {
        return ret_regs->fp;
 }
+
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+                          unsigned long frame_pointer);
+
 #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER  */
 #endif
 
index 4cfe9b49709ba9c4a9f08e22d6c4b5c2f0fd69ab..ab8e14b96f68172e30d7783ad2d98449ffd21c23 100644 (file)
@@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task)
        return AUDIT_ARCH_AARCH64;
 }
 
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
 #endif /* __ASM_SYSCALL_H */
index baab8dd3ead3c27a73cb6473d69c3b999549cd9f..3afbe503b066f212ff24f09ef8af29dfa58ca551 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/efi.h>
 #include <linux/init.h>
+#include <linux/screen_info.h>
 
 #include <asm/efi.h>
 #include <asm/stacktrace.h>
index 5a668d7f3c1f71a5d211a3ec247e05ee7c3927e9..b1ae2f2eaf77ec3c88c95cece7f4b802f87046d6 100644 (file)
@@ -75,9 +75,6 @@ static inline bool has_syscall_work(unsigned long flags)
        return unlikely(flags & _TIF_SYSCALL_WORK);
 }
 
-int syscall_trace_enter(struct pt_regs *regs);
-void syscall_trace_exit(struct pt_regs *regs);
-
 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
                           const syscall_fn_t syscall_table[])
 {
index 3d448fef3af454ce46dc5b52e2cbcfa56d0be11d..9fc10cea21e10e387ef47f3fac85a9c3fc5fc66c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kobject.h>
 #include <linux/memblock.h>
 #include <linux/reboot.h>
+#include <linux/screen_info.h>
 #include <linux/uaccess.h>
 
 #include <asm/early_ioremap.h>
index ca585e4af6b8eac431e100b229e938e49d274c35..e7ffb58ff58fb003c67e281b6697d98985c717cc 100644 (file)
 
 struct sigcontext {
        struct user_regs_struct regs;  /* needs to be first */
-       struct __or1k_fpu_state fpu;
-       unsigned long oldmask;
+       union {
+               unsigned long fpcsr;
+               unsigned long oldmask;  /* unused */
+       };
 };
 
 #endif /* __ASM_OPENRISC_SIGCONTEXT_H */
index 4664a18f0787d4e40e86061831aed605a82846c9..2e7257a433ff4f94a58c1562614b7e1e63b22f2c 100644 (file)
@@ -50,7 +50,7 @@ static int restore_sigcontext(struct pt_regs *regs,
        err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
        err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
        err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
-       err |= __copy_from_user(&regs->fpcsr, &sc->fpu.fpcsr, sizeof(unsigned long));
+       err |= __copy_from_user(&regs->fpcsr, &sc->fpcsr, sizeof(unsigned long));
 
        /* make sure the SM-bit is cleared so user-mode cannot fool us */
        regs->sr &= ~SPR_SR_SM;
@@ -113,7 +113,7 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
        err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
        err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
        err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
-       err |= __copy_to_user(&sc->fpu.fpcsr, &regs->fpcsr, sizeof(unsigned long));
+       err |= __copy_to_user(&sc->fpcsr, &regs->fpcsr, sizeof(unsigned long));
 
        return err;
 }
index b6ac4f86c87b44b619681a2c7909c4a3fa7f0f0b..6472b08fa1b0cd6de6a962ced58234844e5ad28e 100644 (file)
@@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
        return 0;
 }
 
-static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
-       BUG();
-       return 0;
-}
-
 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
 {
        BUG();
index 338e62fbea0bb1dc30f984d4168c853689315e03..0bf6fd0bf42ae290b4c9982aadc388cfc4580dbc 100644 (file)
@@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
                  (_PAGE_PTE | H_PAGE_THP_HUGE));
 }
 
-static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
-       return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
-}
-
 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
 {
        return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
index 17e7a778c856c3bd6b33bd500db88f80f6d4b7b0..d4a19e6547acf6edbd22ac1bbc0e5fc545346c05 100644 (file)
@@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea)
        return region_id;
 }
 
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+       return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
+}
+
 #define        hash__pmd_bad(pmd)              (pmd_val(pmd) & H_PMD_BAD_BITS)
 #define        hash__pud_bad(pud)              (pud_val(pud) & H_PUD_BAD_BITS)
 static inline int hash__p4d_bad(p4d_t p4d)
index 3f86091e68b3bd4231ec069412c7b5edecb8e60b..7ab4c8c0f1abcb1aaf0412b9bf9fe78cd24ea489 100644 (file)
@@ -5,6 +5,7 @@
  *  Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
  */
 
+#include <linux/linkage.h>
 #include <linux/threads.h>
 #include <asm/reg.h>
 #include <asm/page.h>
@@ -66,7 +67,7 @@
 #define SPECIAL_EXC_LOAD(reg, name) \
        ld      reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
 
-special_reg_save:
+SYM_CODE_START_LOCAL(special_reg_save)
        /*
         * We only need (or have stack space) to save this stuff if
         * we interrupted the kernel.
@@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
        SPECIAL_EXC_STORE(r10,CSRR1)
 
        blr
+SYM_CODE_END(special_reg_save)
 
-ret_from_level_except:
+SYM_CODE_START_LOCAL(ret_from_level_except)
        ld      r3,_MSR(r1)
        andi.   r3,r3,MSR_PR
        beq     1f
@@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
        mtxer   r11
 
        blr
+SYM_CODE_END(ret_from_level_except)
 
 .macro ret_from_level srr0 srr1 paca_ex scratch
        bl      ret_from_level_except
@@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
        mfspr   r13,\scratch
 .endm
 
-ret_from_crit_except:
+SYM_CODE_START_LOCAL(ret_from_crit_except)
        ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
        rfci
+SYM_CODE_END(ret_from_crit_except)
 
-ret_from_mc_except:
+SYM_CODE_START_LOCAL(ret_from_mc_except)
        ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
        rfmci
+SYM_CODE_END(ret_from_mc_except)
 
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)                        \
@@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0:
  * r14 and r15 containing the fault address and error code, with the
  * original values stashed away in the PACA
  */
-storage_fault_common:
+SYM_CODE_START_LOCAL(storage_fault_common)
        addi    r3,r1,STACK_INT_FRAME_REGS
        bl      do_page_fault
        b       interrupt_return
+SYM_CODE_END(storage_fault_common)
 
 /*
  * Alignment exception doesn't fit entirely in the 0x100 bytes so it
  * continues here.
  */
-alignment_more:
+SYM_CODE_START_LOCAL(alignment_more)
        addi    r3,r1,STACK_INT_FRAME_REGS
        bl      alignment_exception
        REST_NVGPRS(r1)
        b       interrupt_return
+SYM_CODE_END(alignment_more)
 
 /*
  * Trampolines used when spotting a bad kernel stack pointer in
@@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00)
 BAD_STACK_TRAMPOLINE(0xf00)
 BAD_STACK_TRAMPOLINE(0xf20)
 
-       .globl  bad_stack_book3e
-bad_stack_book3e:
+_GLOBAL(bad_stack_book3e)
        /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
        mfspr   r10,SPRN_SRR0;            /* read SRR0 before touching stack */
        ld      r1,PACAEMERGSP(r13)
@@ -1285,8 +1291,7 @@ have_hes:
         * ever takes any parameters, the SCOM code must also be updated to
         * provide them.
         */
-       .globl a2_tlbinit_code_start
-a2_tlbinit_code_start:
+_GLOBAL(a2_tlbinit_code_start)
 
        ori     r11,r3,MAS0_WQ_ALLWAYS
        oris    r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
@@ -1479,8 +1484,7 @@ _GLOBAL(book3e_secondary_thread_init)
        mflr    r28
        b       3b
 
-       .globl init_core_book3e
-init_core_book3e:
+_GLOBAL(init_core_book3e)
        /* Establish the interrupt vector base */
        tovirt(r2,r2)
        LOAD_REG_ADDR(r3, interrupt_base_book3e)
@@ -1488,7 +1492,7 @@ init_core_book3e:
        sync
        blr
 
-init_thread_book3e:
+SYM_CODE_START_LOCAL(init_thread_book3e)
        lis     r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
        mtspr   SPRN_EPCR,r3
 
@@ -1502,6 +1506,7 @@ init_thread_book3e:
        mtspr   SPRN_TSR,r3
 
        blr
+SYM_CODE_END(init_thread_book3e)
 
 _GLOBAL(__setup_base_ivors)
        SET_IVOR(0, 0x020) /* Critical Input */
index 206475e3e0b480116719b69b0ddd0873f18dd529..4856e1a5161cccab5d7d5685dc1c9fc69516d119 100644 (file)
@@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
 
 static int ssb_prctl_get(struct task_struct *task)
 {
+       /*
+        * The STF_BARRIER feature is on by default, so if it's off that means
+        * firmware has explicitly said the CPU is not vulnerable via either
+        * the hypercall or device tree.
+        */
+       if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
+               return PR_SPEC_NOT_AFFECTED;
+
+       /*
+        * If the system's CPU has no known barrier (see setup_stf_barrier())
+        * then assume that the CPU is not vulnerable.
+        */
        if (stf_enabled_flush_types == STF_BARRIER_NONE)
-               /*
-                * We don't have an explicit signal from firmware that we're
-                * vulnerable or not, we only have certain CPU revisions that
-                * are known to be vulnerable.
-                *
-                * We assume that if we're on another CPU, where the barrier is
-                * NONE, then we are not vulnerable.
-                */
                return PR_SPEC_NOT_AFFECTED;
-       else
-               /*
-                * If we do have a barrier type then we are vulnerable. The
-                * barrier is not a global or per-process mitigation, so the
-                * only value we can report here is PR_SPEC_ENABLE, which
-                * appears as "vulnerable" in /proc.
-                */
-               return PR_SPEC_ENABLE;
-
-       return -EINVAL;
+
+       /*
+        * Otherwise the CPU is vulnerable. The barrier is not a global or
+        * per-process mitigation, so the only value that can be reported here
+        * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
+        */
+       return PR_SPEC_ENABLE;
 }
 
 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
index 9342e79870dfd408bb8220ed1865c8f9e1959f9e..430d1d935a7cb5c6355aeac0c0037b00cec84a17 100644 (file)
@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 
 static long native_hpte_remove(unsigned long hpte_group)
 {
+       unsigned long hpte_v, flags;
        struct hash_pte *hptep;
        int i;
        int slot_offset;
-       unsigned long hpte_v;
+
+       local_irq_save(flags);
 
        DBG_LOW("    remove(group=%lx)\n", hpte_group);
 
@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
                slot_offset &= 0x7;
        }
 
-       if (i == HPTES_PER_GROUP)
-               return -1;
+       if (i == HPTES_PER_GROUP) {
+               i = -1;
+               goto out;
+       }
 
        /* Invalidate the hpte. NOTE: this also unlocks it */
        release_hpte_lock();
        hptep->v = 0;
-
+out:
+       local_irq_restore(flags);
        return i;
 }
 
index bdcf460ea53d6a736bfd46b786ee1595b0766998..a8f66c015229727e9fd3bbcb93340943b24cff93 100644 (file)
@@ -317,19 +317,14 @@ void __init riscv_fill_hwcap(void)
 #undef SET_ISA_EXT_MAP
                }
 
-               /*
-                * Linux requires the following extensions, so we may as well
-                * always set them.
-                */
-               set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
-               set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
-
                /*
                 * These ones were as they were part of the base ISA when the
                 * port & dt-bindings were upstreamed, and so can be set
                 * unconditionally where `i` is in riscv,isa on DT systems.
                 */
                if (acpi_disabled) {
+                       set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
+                       set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
                        set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
                        set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
                }
index 70fb31960b639feeef86d7115fc3df1b6d8d45d3..9ce504737d18582b0a91152a885d5f0686fd6423 100644 (file)
@@ -1346,7 +1346,7 @@ static void __init reserve_crashkernel(void)
         */
        crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
                                               search_start,
-                                              min(search_end, (unsigned long) SZ_4G));
+                                              min(search_end, (unsigned long)(SZ_4G - 1)));
        if (crash_base == 0) {
                /* Try again without restricting region to 32bit addressible memory */
                crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
index bf9802a63061df851a8d1858a63c1e0ac7672e6c..2717f549042870cb020d90cbbd941cde490eb667 100644 (file)
@@ -69,7 +69,7 @@ struct rv_jit_context {
        struct bpf_prog *prog;
        u16 *insns;             /* RV insns */
        int ninsns;
-       int body_len;
+       int prologue_len;
        int epilogue_offset;
        int *offset;            /* BPF to RV */
        int nexentries;
@@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
        int from, to;
 
        off++; /* BPF branch is from PC+1, RV is from PC */
-       from = (insn > 0) ? ctx->offset[insn - 1] : 0;
-       to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
+       from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
+       to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
        return ninsns_rvoff(to - from);
 }
 
index 737baf8715da7eb9aa72ebdc5e63f9cd4cae1570..7a26a3e1c73cfe8c75c0090c546f83d185572e2c 100644 (file)
@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        unsigned int prog_size = 0, extable_size = 0;
        bool tmp_blinded = false, extra_pass = false;
        struct bpf_prog *tmp, *orig_prog = prog;
-       int pass = 0, prev_ninsns = 0, prologue_len, i;
+       int pass = 0, prev_ninsns = 0, i;
        struct rv_jit_data *jit_data;
        struct rv_jit_context *ctx;
 
@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                prog = orig_prog;
                goto out_offset;
        }
+
+       if (build_body(ctx, extra_pass, NULL)) {
+               prog = orig_prog;
+               goto out_offset;
+       }
+
        for (i = 0; i < prog->len; i++) {
                prev_ninsns += 32;
                ctx->offset[i] = prev_ninsns;
@@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        for (i = 0; i < NR_JIT_ITERATIONS; i++) {
                pass++;
                ctx->ninsns = 0;
+
+               bpf_jit_build_prologue(ctx);
+               ctx->prologue_len = ctx->ninsns;
+
                if (build_body(ctx, extra_pass, ctx->offset)) {
                        prog = orig_prog;
                        goto out_offset;
                }
-               ctx->body_len = ctx->ninsns;
-               bpf_jit_build_prologue(ctx);
+
                ctx->epilogue_offset = ctx->ninsns;
                bpf_jit_build_epilogue(ctx);
 
@@ -162,10 +171,8 @@ skip_init_ctx:
 
        if (!prog->is_func || extra_pass) {
                bpf_jit_binary_lock_ro(jit_data->header);
-               prologue_len = ctx->epilogue_offset - ctx->body_len;
                for (i = 0; i < prog->len; i++)
-                       ctx->offset[i] = ninsns_rvoff(prologue_len +
-                                                     ctx->offset[i]);
+                       ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
                bpf_prog_fill_jited_linfo(prog, ctx->offset);
 out_offset:
                kfree(ctx->offset);
index cc06e4cdb4cdf9b7cab0597183b06061a5901ec9..0eec82fb85e7c6b4cb262fc79ed7fa24d3e4f70b 100644 (file)
@@ -108,13 +108,13 @@ int systemasic_irq_demux(int irq)
        __u32 j, bit;
 
        switch (irq) {
-       case 13:
+       case 13 + 16:
                level = 0;
                break;
-       case 11:
+       case 11 + 16:
                level = 1;
                break;
-       case  9:
+       case 9 + 16:
                level = 2;
                break;
        default:
index 674da7ebd8b7f5005325c1fe48bb5c0daba2b5a1..310513646c9b3f2c452949f708f8fa5aa6ff8381 100644 (file)
@@ -386,7 +386,7 @@ static struct property_entry gpio_backlight_props[] = {
 };
 
 static struct gpio_backlight_platform_data gpio_backlight_data = {
-       .fbdev = &lcdc_device.dev,
+       .dev = &lcdc_device.dev,
 };
 
 static const struct platform_device_info gpio_backlight_device_info = {
index 533393d779c2b97f987c7df2444847278e14b549..01565660a6695014a75dd5d36365935677ea995f 100644 (file)
@@ -389,10 +389,10 @@ static unsigned char irl2irq[HL_NR_IRL];
 
 static int highlander_irq_demux(int irq)
 {
-       if (irq >= HL_NR_IRL || irq < 0 || !irl2irq[irq])
+       if (irq >= HL_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
                return irq;
 
-       return irl2irq[irq];
+       return irl2irq[irq - 16];
 }
 
 static void __init highlander_init_irq(void)
index 20f4db778ed6aee279fb3fc79668aa7076fa5db8..a18e80394aedca6bb27d133374094bc739b4c0ea 100644 (file)
@@ -202,7 +202,7 @@ static struct platform_device kfr2r09_sh_lcdc_device = {
 };
 
 static struct lv5207lp_platform_data kfr2r09_backlight_data = {
-       .fbdev = &kfr2r09_sh_lcdc_device.dev,
+       .dev = &kfr2r09_sh_lcdc_device.dev,
        .def_value = 13,
        .max_value = 13,
 };
index e34f81e9ae813b8dbb650a742e328869dc6d91de..d0a54a9adbce20fa447f620df1d65cdcbea35e08 100644 (file)
@@ -117,10 +117,10 @@ static unsigned char irl2irq[R2D_NR_IRL];
 
 int rts7751r2d_irq_demux(int irq)
 {
-       if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq])
+       if (irq >= R2D_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
                return irq;
 
-       return irl2irq[irq];
+       return irl2irq[irq - 16];
 }
 
 /*
index 97e715e4e9b33c4072b334504f002050ad990f14..e25193001ea081ee895b4f10aedd6dda420987c1 100644 (file)
@@ -119,7 +119,7 @@ static struct fb_videomode sh7763fb_videomode = {
        .vsync_len = 1,
        .sync = 0,
        .vmode = FB_VMODE_NONINTERLACED,
-       .flag = FBINFO_FLAG_DEFAULT,
+       .flag = FB_MODE_IS_UNKNOWN,
 };
 
 static struct sh7760fb_platdata sh7763fb_def_pdata = {
index efde2edb5627809e232cbf1528b9d152f4659103..9659a0bc58dec40b9952cd8b8cd15dbb3dee7c32 100644 (file)
@@ -29,9 +29,9 @@ endchoice
 config HD64461_IRQ
        int "HD64461 IRQ"
        depends on HD64461
-       default "36"
+       default "52"
        help
-         The default setting of the HD64461 IRQ is 36.
+         The default setting of the HD64461 IRQ is 52.
 
          Do not change this unless you know what you are doing.
 
index afb24cb034b114c91bb633f280b52880b988d3dd..d2c485fa333b5180a7b76a291e8e70a83bfa2231 100644 (file)
 #define        HD64461_NIMR            HD64461_IO_OFFSET(0x5002)
 
 #define        HD64461_IRQBASE         OFFCHIP_IRQ_BASE
-#define        OFFCHIP_IRQ_BASE        64
+#define        OFFCHIP_IRQ_BASE        (64 + 16)
 #define        HD64461_IRQ_NUM         16
 
 #define        HD64461_IRQ_UART        (HD64461_IRQBASE+5)
index 7a1339533d1d7ec214cff5ec9c51f60803c1aac0..d0af82c240b7318fcde01a6fe613d6a611fe4540 100644 (file)
@@ -15,7 +15,7 @@
 unsigned long __xchg_u32(volatile u32 *m, u32 new);
 void __xchg_called_with_bad_pointer(void);
 
-static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
+static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
        switch (size) {
        case 4:
index 66cd61dde9ec1f0284a17037ec178fa5157a5411..3de25262c411803106a6f445e1a246ff6aee6c89 100644 (file)
@@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val)
        return (load32 & mask) >> bit_shift;
 }
 
-static inline unsigned long
+static __always_inline unsigned long
 __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
        switch (size) {
index 918fed7ad4d8afe032f3c35937b71e397028e1bf..b1bfed0c85288fbb0ec583d571c9b3c6202b62c5 100644 (file)
@@ -437,7 +437,7 @@ void __init arch_cpu_finalize_init(void)
        os_check_bugs();
 }
 
-void apply_ibt_endbr(s32 *start, s32 *end)
+void apply_seal_endbr(s32 *start, s32 *end)
 {
 }
 
index fdc2e3abd6152f53530f687a3f9041c020065609..95315d3474a292cc3418ea4a9f43dedcdbd5dc14 100644 (file)
@@ -259,7 +259,7 @@ drivers-$(CONFIG_PCI)            += arch/x86/pci/
 # suspend and hibernation support
 drivers-$(CONFIG_PM) += arch/x86/power/
 
-drivers-$(CONFIG_FB) += arch/x86/video/
+drivers-$(CONFIG_FB_CORE) += arch/x86/video/
 
 ####
 # boot loader support. Several targets are kept for legacy purposes
index 91397f58ac3008556fc0cfbb2437eee45da706ff..6e6af42e044a20cf0aec1115406c546ebcb3bbab 100644 (file)
@@ -719,26 +719,6 @@ SYM_CODE_START(__switch_to_asm)
 SYM_CODE_END(__switch_to_asm)
 .popsection
 
-/*
- * The unwinder expects the last frame on the stack to always be at the same
- * offset from the end of the page, which allows it to validate the stack.
- * Calling schedule_tail() directly would break that convention because its an
- * asmlinkage function so its argument has to be pushed on the stack.  This
- * wrapper creates a proper "end of stack" frame header before the call.
- */
-.pushsection .text, "ax"
-SYM_FUNC_START(schedule_tail_wrapper)
-       FRAME_BEGIN
-
-       pushl   %eax
-       call    schedule_tail
-       popl    %eax
-
-       FRAME_END
-       RET
-SYM_FUNC_END(schedule_tail_wrapper)
-.popsection
-
 /*
  * A newly forked process directly context switches into this address.
  *
@@ -747,29 +727,22 @@ SYM_FUNC_END(schedule_tail_wrapper)
  * edi: kernel thread arg
  */
 .pushsection .text, "ax"
-SYM_CODE_START(ret_from_fork)
-       call    schedule_tail_wrapper
+SYM_CODE_START(ret_from_fork_asm)
+       movl    %esp, %edx      /* regs */
 
-       testl   %ebx, %ebx
-       jnz     1f              /* kernel threads are uncommon */
+       /* return address for the stack unwinder */
+       pushl   $.Lsyscall_32_done
 
-2:
-       /* When we fork, we trace the syscall return in the child, too. */
-       movl    %esp, %eax
-       call    syscall_exit_to_user_mode
-       jmp     .Lsyscall_32_done
+       FRAME_BEGIN
+       /* prev already in EAX */
+       movl    %ebx, %ecx      /* fn */
+       pushl   %edi            /* fn_arg */
+       call    ret_from_fork
+       addl    $4, %esp
+       FRAME_END
 
-       /* kernel thread */
-1:     movl    %edi, %eax
-       CALL_NOSPEC ebx
-       /*
-        * A kernel thread is allowed to return here after successfully
-        * calling kernel_execve().  Exit to userspace to complete the execve()
-        * syscall.
-        */
-       movl    $0, PT_EAX(%esp)
-       jmp     2b
-SYM_CODE_END(ret_from_fork)
+       RET
+SYM_CODE_END(ret_from_fork_asm)
 .popsection
 
 SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
index f31e286c297737d77d44a98b9526384ddbae273b..91f6818884fa3b39b61a7de2fb51ffa3e8c38136 100644 (file)
@@ -284,36 +284,19 @@ SYM_FUNC_END(__switch_to_asm)
  * r12: kernel thread arg
  */
 .pushsection .text, "ax"
-       __FUNC_ALIGN
-SYM_CODE_START_NOALIGN(ret_from_fork)
-       UNWIND_HINT_END_OF_STACK
+SYM_CODE_START(ret_from_fork_asm)
+       UNWIND_HINT_REGS
        ANNOTATE_NOENDBR // copy_thread
        CALL_DEPTH_ACCOUNT
-       movq    %rax, %rdi
-       call    schedule_tail                   /* rdi: 'prev' task parameter */
 
-       testq   %rbx, %rbx                      /* from kernel_thread? */
-       jnz     1f                              /* kernel threads are uncommon */
+       movq    %rax, %rdi              /* prev */
+       movq    %rsp, %rsi              /* regs */
+       movq    %rbx, %rdx              /* fn */
+       movq    %r12, %rcx              /* fn_arg */
+       call    ret_from_fork
 
-2:
-       UNWIND_HINT_REGS
-       movq    %rsp, %rdi
-       call    syscall_exit_to_user_mode       /* returns with IRQs disabled */
        jmp     swapgs_restore_regs_and_return_to_usermode
-
-1:
-       /* kernel thread */
-       UNWIND_HINT_END_OF_STACK
-       movq    %r12, %rdi
-       CALL_NOSPEC rbx
-       /*
-        * A kernel thread is allowed to return here after successfully
-        * calling kernel_execve().  Exit to userspace to complete the execve()
-        * syscall.
-        */
-       movq    $0, RAX(%rsp)
-       jmp     2b
-SYM_CODE_END(ret_from_fork)
+SYM_CODE_END(ret_from_fork_asm)
 .popsection
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
index a149fafad813dfcd9e8bab5f3639284fbfec1032..2a284ba951b76ac513f71dd89a2456a19125d965 100644 (file)
@@ -3993,6 +3993,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
                struct perf_event *leader = event->group_leader;
                struct perf_event *sibling = NULL;
 
+               /*
+                * When this memload event is also the first event (no group
+                * exists yet), then there is no aux event before it.
+                */
+               if (leader == event)
+                       return -ENODATA;
+
                if (!is_mem_loads_aux_event(leader)) {
                        for_each_sibling_event(sibling, leader) {
                                if (is_mem_loads_aux_event(sibling))
index 6c15a622ad6096e61efe89a95305156a2f5b6f2c..9c4da699e11aff225eac586a3bbc0598b1201a5d 100644 (file)
@@ -96,7 +96,7 @@ extern void alternative_instructions(void);
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 extern void apply_retpolines(s32 *start, s32 *end);
 extern void apply_returns(s32 *start, s32 *end);
-extern void apply_ibt_endbr(s32 *start, s32 *end);
+extern void apply_seal_endbr(s32 *start, s32 *end);
 extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
                          s32 *start_cfi, s32 *end_cfi);
 
index baae6b4fea23ae84ceb0550bd0ccdcaeb33bb305..1e59581d500ca9f82dc073e1587da11fe944873d 100644 (file)
@@ -34,7 +34,7 @@
 /*
  * Create a dummy function pointer reference to prevent objtool from marking
  * the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
- * apply_ibt_endbr()).
+ * apply_seal_endbr()).
  */
 #define IBT_NOSEAL(fname)                              \
        ".pushsection .discard.ibt_endbr_noseal\n\t"    \
index 55388c9f760123a0a55baf923afa1096f54e4587..1a65cf4acb2b93313581497ff27d29ac6cd61365 100644 (file)
  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
  * indirect jmp/call which may be susceptible to the Spectre variant 2
  * attack.
+ *
+ * NOTE: these do not take kCFI into account and are thus not comparable to C
+ * indirect calls, take care when using. The target of these should be an ENDBR
+ * instruction irrespective of kCFI.
  */
 .macro JMP_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
index 5c91305d09d26d5a8d3c6407e7b6fe23b85a28c7..f42dbf17f52b0ee12ec74f34d1fb551743be0e6c 100644 (file)
@@ -12,7 +12,9 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
 __visible struct task_struct *__switch_to(struct task_struct *prev,
                                          struct task_struct *next);
 
-asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_fork_asm(void);
+__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
+                            int (*fn)(void *), void *fn_arg);
 
 /*
  * This is the structure pointed to by thread.sp for an inactive task.  The
index 72646d75b6ffe092eaf28012fba24cd6a9369485..2dcf3a06af0908b6dc67594daff54d7555b9fc83 100644 (file)
@@ -778,6 +778,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 
 #ifdef CONFIG_X86_KERNEL_IBT
 
+static void poison_cfi(void *addr);
+
 static void __init_or_module poison_endbr(void *addr, bool warn)
 {
        u32 endbr, poison = gen_endbr_poison();
@@ -802,8 +804,11 @@ static void __init_or_module poison_endbr(void *addr, bool warn)
 
 /*
  * Generated by: objtool --ibt
+ *
+ * Seal the functions for indirect calls by clobbering the ENDBR instructions
+ * and the kCFI hash value.
  */
-void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
+void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
 {
        s32 *s;
 
@@ -812,13 +817,13 @@ void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
 
                poison_endbr(addr, true);
                if (IS_ENABLED(CONFIG_FINEIBT))
-                       poison_endbr(addr - 16, false);
+                       poison_cfi(addr - 16);
        }
 }
 
 #else
 
-void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { }
+void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
 
 #endif /* CONFIG_X86_KERNEL_IBT */
 
@@ -1063,6 +1068,17 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
        return 0;
 }
 
+static void cfi_rewrite_endbr(s32 *start, s32 *end)
+{
+       s32 *s;
+
+       for (s = start; s < end; s++) {
+               void *addr = (void *)s + *s;
+
+               poison_endbr(addr+16, false);
+       }
+}
+
 /* .retpoline_sites */
 static int cfi_rand_callers(s32 *start, s32 *end)
 {
@@ -1157,14 +1173,19 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
                return;
 
        case CFI_FINEIBT:
+               /* place the FineIBT preamble at func()-16 */
                ret = cfi_rewrite_preamble(start_cfi, end_cfi);
                if (ret)
                        goto err;
 
+               /* rewrite the callers to target func()-16 */
                ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
                if (ret)
                        goto err;
 
+               /* now that nobody targets func()+0, remove ENDBR there */
+               cfi_rewrite_endbr(start_cfi, end_cfi);
+
                if (builtin)
                        pr_info("Using FineIBT CFI\n");
                return;
@@ -1177,6 +1198,41 @@ err:
        pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
 }
 
+static inline void poison_hash(void *addr)
+{
+       *(u32 *)addr = 0;
+}
+
+static void poison_cfi(void *addr)
+{
+       switch (cfi_mode) {
+       case CFI_FINEIBT:
+               /*
+                * __cfi_\func:
+                *      osp nopl (%rax)
+                *      subl    $0, %r10d
+                *      jz      1f
+                *      ud2
+                * 1:   nop
+                */
+               poison_endbr(addr, false);
+               poison_hash(addr + fineibt_preamble_hash);
+               break;
+
+       case CFI_KCFI:
+               /*
+                * __cfi_\func:
+                *      movl    $0, %eax
+                *      .skip   11, 0x90
+                */
+               poison_hash(addr + 1);
+               break;
+
+       default:
+               break;
+       }
+}
+
 #else
 
 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
@@ -1184,6 +1240,10 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
 {
 }
 
+#ifdef CONFIG_X86_KERNEL_IBT
+static void poison_cfi(void *addr) { }
+#endif
+
 #endif
 
 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
@@ -1565,7 +1625,10 @@ void __init alternative_instructions(void)
         */
        callthunks_patch_builtin_calls();
 
-       apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
+       /*
+        * Seal all functions that do not have their address taken.
+        */
+       apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
 
 #ifdef CONFIG_SMP
        /* Patch to UP if other cpus not imminent. */
index 01e8f34daf22013266cbd32bd24a976ba29352ce..12df54ff0e817188d384affee8d98a515d6db471 100644 (file)
@@ -282,7 +282,6 @@ static inline void tramp_free(void *tramp) { }
 
 /* Defined as markers to the end of the ftrace default trampolines */
 extern void ftrace_regs_caller_end(void);
-extern void ftrace_regs_caller_ret(void);
 extern void ftrace_caller_end(void);
 extern void ftrace_caller_op_ptr(void);
 extern void ftrace_regs_caller_op_ptr(void);
index b05f62ee2344ba6eaf1bc8eab29ebcba3256e0e6..5f71a0cf4399a577e1235f6040a70fb26ee56346 100644 (file)
@@ -358,7 +358,7 @@ int module_finalize(const Elf_Ehdr *hdr,
        }
        if (ibt_endbr) {
                void *iseg = (void *)ibt_endbr->sh_addr;
-               apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
+               apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size);
        }
        if (locks) {
                void *lseg = (void *)locks->sh_addr;
index ff9b80a0e3e3bc7f86f02b3c459d96e133f815e7..72015dba72ab4c09fc750e8a66a40adfbf043b4e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/static_call.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/entry-common.h>
 #include <asm/cpu.h>
 #include <asm/apic.h>
 #include <linux/uaccess.h>
@@ -134,6 +135,25 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
                return do_set_thread_area_64(p, ARCH_SET_FS, tls);
 }
 
+__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
+                                    int (*fn)(void *), void *fn_arg)
+{
+       schedule_tail(prev);
+
+       /* Is this a kernel thread? */
+       if (unlikely(fn)) {
+               fn(fn_arg);
+               /*
+                * A kernel thread is allowed to return here after successfully
+                * calling kernel_execve().  Exit to userspace to complete the
+                * execve() syscall.
+                */
+               regs->ax = 0;
+       }
+
+       syscall_exit_to_user_mode(regs);
+}
+
 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
        unsigned long clone_flags = args->flags;
@@ -149,7 +169,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        frame = &fork_frame->frame;
 
        frame->bp = encode_frame_pointer(childregs);
-       frame->ret_addr = (unsigned long) ret_from_fork;
+       frame->ret_addr = (unsigned long) ret_from_fork_asm;
        p->thread.sp = (unsigned long) fork_frame;
        p->thread.io_bitmap = NULL;
        p->thread.iopl_warn = 0;
index 11640c116115fdf912af5353cfa3f428631ba445..5ebe48752ffc4fa62d9eae3e094382fdf08eb2a6 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_FB)               += fbdev.o
+obj-$(CONFIG_FB_CORE)          += fbdev.o
index 643d02900fbb0561492c23bd0a588ff5ce9806b0..a0ea285878dbe168ce5bf8cea61d5c642c4d0b72 100644 (file)
@@ -90,30 +90,35 @@ SYM_CODE_END(xen_cpu_bringup_again)
        ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
        ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz "2.6")
        ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz "xen-3.0")
-#ifdef CONFIG_X86_32
-       ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      _ASM_PTR __PAGE_OFFSET)
-#else
+#ifdef CONFIG_XEN_PV
        ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      _ASM_PTR __START_KERNEL_map)
        /* Map the p2m table to a 512GB-aligned user address. */
        ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M,       .quad (PUD_SIZE * PTRS_PER_PUD))
-#endif
-#ifdef CONFIG_XEN_PV
        ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          _ASM_PTR startup_xen)
-#endif
-       ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
-       ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
-               .ascii "!writable_page_tables|pae_pgdir_above_4gb")
-       ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
-               .long (1 << XENFEAT_writable_page_tables) |       \
-                     (1 << XENFEAT_dom0) |                       \
-                     (1 << XENFEAT_linux_rsdp_unrestricted))
+       ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .ascii "!writable_page_tables")
        ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz "yes")
-       ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz "generic")
        ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
                .quad _PAGE_PRESENT; .quad _PAGE_PRESENT)
-       ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
        ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN,  .long 1)
-       ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW,   _ASM_PTR __HYPERVISOR_VIRT_START)
        ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   _ASM_PTR 0)
+# define FEATURES_PV (1 << XENFEAT_writable_page_tables)
+#else
+# define FEATURES_PV 0
+#endif
+#ifdef CONFIG_XEN_PVH
+# define FEATURES_PVH (1 << XENFEAT_linux_rsdp_unrestricted)
+#else
+# define FEATURES_PVH 0
+#endif
+#ifdef CONFIG_XEN_DOM0
+# define FEATURES_DOM0 (1 << XENFEAT_dom0)
+#else
+# define FEATURES_DOM0 0
+#endif
+       ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
+       ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
+               .long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
+       ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz "generic")
+       ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
 
 #endif /*CONFIG_XEN */
index 20d6b4961001b970aca261d26ad8c07c423addf8..ee97edce2300fab94d0cbffe16fc5dd73d527dec 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * arch/xtensa/kernel/align.S
  *
- * Handle unalignment exceptions in kernel space.
+ * Handle unalignment and load/store exceptions.
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License.  See the file "COPYING" in the main directory of
 #define LOAD_EXCEPTION_HANDLER
 #endif
 
-#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER
+#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
+#define STORE_EXCEPTION_HANDLER
+#endif
+
+#if defined LOAD_EXCEPTION_HANDLER || defined STORE_EXCEPTION_HANDLER
 #define ANY_EXCEPTION_HANDLER
 #endif
 
-#if XCHAL_HAVE_WINDOWED
+#if XCHAL_HAVE_WINDOWED && defined CONFIG_MMU
 #define UNALIGNED_USER_EXCEPTION
 #endif
 
-/*  First-level exception handler for unaligned exceptions.
- *
- *  Note: This handler works only for kernel exceptions.  Unaligned user
- *        access should get a seg fault.
- */
-
 /* Big and little endian 16-bit values are located in
  * different halves of a register.  HWORD_START helps to
  * abstract the notion of extracting a 16-bit value from a
@@ -228,8 +226,6 @@ ENDPROC(fast_load_store)
 #ifdef ANY_EXCEPTION_HANDLER
 ENTRY(fast_unaligned)
 
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-
        call0   .Lsave_and_load_instruction
 
        /* Analyze the instruction (load or store?). */
@@ -244,8 +240,7 @@ ENTRY(fast_unaligned)
        /* 'store indicator bit' not set, jump */
        _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
 
-#endif
-#if XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef STORE_EXCEPTION_HANDLER
 
        /* Store: Jump to table entry to get the value in the source register.*/
 
@@ -254,7 +249,7 @@ ENTRY(fast_unaligned)
        addx8   a5, a6, a5
        jx      a5                      # jump into table
 #endif
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION
+#ifdef LOAD_EXCEPTION_HANDLER
 
        /* Load: Load memory address. */
 
@@ -328,7 +323,7 @@ ENTRY(fast_unaligned)
        mov     a14, a3         ;       _j .Lexit;      .align 8
        mov     a15, a3         ;       _j .Lexit;      .align 8
 #endif
-#if XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef STORE_EXCEPTION_HANDLER
 .Lstore_table:
        l32i    a3, a2, PT_AREG0;       _j .Lstore_w;   .align 8
        mov     a3, a1;                 _j .Lstore_w;   .align 8        # fishy??
@@ -348,7 +343,6 @@ ENTRY(fast_unaligned)
        mov     a3, a15         ;       _j .Lstore_w;   .align 8
 #endif
 
-#ifdef ANY_EXCEPTION_HANDLER
        /* We cannot handle this exception. */
 
        .extern _kernel_exception
@@ -377,8 +371,8 @@ ENTRY(fast_unaligned)
 
 2:     movi    a0, _user_exception
        jx      a0
-#endif
-#if XCHAL_UNALIGNED_STORE_EXCEPTION
+
+#ifdef STORE_EXCEPTION_HANDLER
 
        # a7: instruction pointer, a4: instruction, a3: value
 .Lstore_w:
@@ -444,7 +438,7 @@ ENTRY(fast_unaligned)
        s32i    a6, a4, 4
 #endif
 #endif
-#ifdef ANY_EXCEPTION_HANDLER
+
 .Lexit:
 #if XCHAL_HAVE_LOOPS
        rsr     a4, lend                # check if we reached LEND
@@ -539,7 +533,7 @@ ENTRY(fast_unaligned)
        __src_b a4, a4, a5      # a4 has the instruction
 
        ret
-#endif
+
 ENDPROC(fast_unaligned)
 
 ENTRY(fast_unaligned_fixup)
index 17eb180eff7c0d61595971781167ab929eb3ca19..427c125a137aae95b8635d860c609c60fa500883 100644 (file)
@@ -102,7 +102,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
 #endif
 { EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0,     do_div0 },
 /* EXCCAUSE_PRIVILEGED unhandled */
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \
+               IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)
 #ifdef CONFIG_XTENSA_UNALIGNED_USER
 { EXCCAUSE_UNALIGNED,          USER,      fast_unaligned },
 #endif
index 9ac46ab3a296cb599ab193b60ba0dae62c4c1cd7..85c82cd42188aff4181210953ccded5729e136cf 100644 (file)
@@ -237,7 +237,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
 
        init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
        if (*init == ',') {
-               rem = split_if_spec(init + 1, &mac_str, &dev_name);
+               rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
                if (rem != NULL) {
                        pr_err("%s: extra garbage on specification : '%s'\n",
                               dev->name, rem);
@@ -540,6 +540,7 @@ static void iss_net_configure(int index, char *init)
                rtnl_unlock();
                pr_err("%s: error registering net device!\n", dev->name);
                platform_device_unregister(&lp->pdev);
+               /* dev is freed by the iss_net_pdev_release callback */
                return;
        }
        rtnl_unlock();
index 2a67d3fb63e5ced175d1da86e2e3d78e130c41b4..7fabc883e39f1cef7e7d250ae7df635a4ccc2ae9 100644 (file)
@@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
        unsigned int slot_hashtable_size;
 
        memset(profile, 0, sizeof(*profile));
-       init_rwsem(&profile->lock);
+
+       /*
+        * profile->lock of an underlying device can nest inside profile->lock
+        * of a device-mapper device, so use a dynamic lock class to avoid
+        * false-positive lockdep reports.
+        */
+       lockdep_register_key(&profile->lockdep_key);
+       __init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
 
        if (num_slots == 0)
                return 0;
@@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
        profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
                                  GFP_KERNEL);
        if (!profile->slots)
-               return -ENOMEM;
+               goto err_destroy;
 
        profile->num_slots = num_slots;
 
@@ -435,6 +442,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
 {
        if (!profile)
                return;
+       lockdep_unregister_key(&profile->lockdep_key);
        kvfree(profile->slot_hashtable);
        kvfree_sensitive(profile->slots,
                         sizeof(profile->slots[0]) * profile->num_slots);
index dba392cf22bec6cbae65e007d9890565aceb971a..8220517c2d67dad4e09457b7dd1303e4015a6844 100644 (file)
@@ -189,7 +189,7 @@ static void blk_flush_complete_seq(struct request *rq,
        case REQ_FSEQ_DATA:
                list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
                spin_lock(&q->requeue_lock);
-               list_add_tail(&rq->queuelist, &q->flush_list);
+               list_add(&rq->queuelist, &q->requeue_list);
                spin_unlock(&q->requeue_lock);
                blk_mq_kick_requeue_list(q);
                break;
index 5504719b970d597d8dd853aab85b495c251d9b28..d50b1d62a3d923fc050c53802d87fb08cd9aab49 100644 (file)
@@ -328,8 +328,24 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+/* Set start and alloc time when the allocated request is actually used */
+static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
+{
+       if (blk_mq_need_time_stamp(rq))
+               rq->start_time_ns = ktime_get_ns();
+       else
+               rq->start_time_ns = 0;
+
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+       if (blk_queue_rq_alloc_time(rq->q))
+               rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
+       else
+               rq->alloc_time_ns = 0;
+#endif
+}
+
 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
-               struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
+               struct blk_mq_tags *tags, unsigned int tag)
 {
        struct blk_mq_ctx *ctx = data->ctx;
        struct blk_mq_hw_ctx *hctx = data->hctx;
@@ -356,14 +372,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        }
        rq->timeout = 0;
 
-       if (blk_mq_need_time_stamp(rq))
-               rq->start_time_ns = ktime_get_ns();
-       else
-               rq->start_time_ns = 0;
        rq->part = NULL;
-#ifdef CONFIG_BLK_RQ_ALLOC_TIME
-       rq->alloc_time_ns = alloc_time_ns;
-#endif
        rq->io_start_time_ns = 0;
        rq->stats_sectors = 0;
        rq->nr_phys_segments = 0;
@@ -393,8 +402,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 }
 
 static inline struct request *
-__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
-               u64 alloc_time_ns)
+__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
 {
        unsigned int tag, tag_offset;
        struct blk_mq_tags *tags;
@@ -413,7 +421,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
                tag = tag_offset + i;
                prefetch(tags->static_rqs[tag]);
                tag_mask &= ~(1UL << i);
-               rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
+               rq = blk_mq_rq_ctx_init(data, tags, tag);
                rq_list_add(data->cached_rq, rq);
                nr++;
        }
@@ -474,9 +482,11 @@ retry:
         * Try batched alloc if we want more than 1 tag.
         */
        if (data->nr_tags > 1) {
-               rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
-               if (rq)
+               rq = __blk_mq_alloc_requests_batch(data);
+               if (rq) {
+                       blk_mq_rq_time_init(rq, alloc_time_ns);
                        return rq;
+               }
                data->nr_tags = 1;
        }
 
@@ -499,8 +509,9 @@ retry:
                goto retry;
        }
 
-       return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
-                                       alloc_time_ns);
+       rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
+       blk_mq_rq_time_init(rq, alloc_time_ns);
+       return rq;
 }
 
 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
@@ -555,6 +566,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
                        return NULL;
 
                plug->cached_rq = rq_list_next(rq);
+               blk_mq_rq_time_init(rq, 0);
        }
 
        rq->cmd_flags = opf;
@@ -656,8 +668,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        tag = blk_mq_get_tag(&data);
        if (tag == BLK_MQ_NO_TAG)
                goto out_queue_exit;
-       rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
-                                       alloc_time_ns);
+       rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
+       blk_mq_rq_time_init(rq, alloc_time_ns);
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -2896,6 +2908,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
        plug->cached_rq = rq_list_next(rq);
        rq_qos_throttle(q, *bio);
 
+       blk_mq_rq_time_init(rq, 0);
        rq->cmd_flags = (*bio)->bi_opf;
        INIT_LIST_HEAD(&rq->queuelist);
        return rq;
index 0f9f97cdddd99c797906c58a232a9f137f5ce138..619ee41a51cc8c81b0333f4f95a70e3b3ed7c442 100644 (file)
@@ -442,7 +442,6 @@ struct blk_revalidate_zone_args {
        unsigned long   *conv_zones_bitmap;
        unsigned long   *seq_zones_wlock;
        unsigned int    nr_zones;
-       sector_t        zone_sectors;
        sector_t        sector;
 };
 
@@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
        struct gendisk *disk = args->disk;
        struct request_queue *q = disk->queue;
        sector_t capacity = get_capacity(disk);
+       sector_t zone_sectors = q->limits.chunk_sectors;
+
+       /* Check for bad zones and holes in the zone report */
+       if (zone->start != args->sector) {
+               pr_warn("%s: Zone gap at sectors %llu..%llu\n",
+                       disk->disk_name, args->sector, zone->start);
+               return -ENODEV;
+       }
+
+       if (zone->start >= capacity || !zone->len) {
+               pr_warn("%s: Invalid zone start %llu, length %llu\n",
+                       disk->disk_name, zone->start, zone->len);
+               return -ENODEV;
+       }
 
        /*
         * All zones must have the same size, with the exception on an eventual
         * smaller last zone.
         */
-       if (zone->start == 0) {
-               if (zone->len == 0 || !is_power_of_2(zone->len)) {
-                       pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
-                               disk->disk_name, zone->len);
-                       return -ENODEV;
-               }
-
-               args->zone_sectors = zone->len;
-               args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
-       } else if (zone->start + args->zone_sectors < capacity) {
-               if (zone->len != args->zone_sectors) {
+       if (zone->start + zone->len < capacity) {
+               if (zone->len != zone_sectors) {
                        pr_warn("%s: Invalid zoned device with non constant zone size\n",
                                disk->disk_name);
                        return -ENODEV;
                }
-       } else {
-               if (zone->len > args->zone_sectors) {
-                       pr_warn("%s: Invalid zoned device with larger last zone size\n",
-                               disk->disk_name);
-                       return -ENODEV;
-               }
-       }
-
-       /* Check for holes in the zone report */
-       if (zone->start != args->sector) {
-               pr_warn("%s: Zone gap at sectors %llu..%llu\n",
-                       disk->disk_name, args->sector, zone->start);
+       } else if (zone->len > zone_sectors) {
+               pr_warn("%s: Invalid zoned device with larger last zone size\n",
+                       disk->disk_name);
                return -ENODEV;
        }
 
@@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
  * @disk:      Target disk
  * @update_driver_data:        Callback to update driver data on the frozen disk
  *
- * Helper function for low-level device drivers to (re) allocate and initialize
- * a disk request queue zone bitmaps. This functions should normally be called
- * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
- * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
- * is correct.
+ * Helper function for low-level device drivers to check and (re) allocate and
+ * initialize a disk request queue zone bitmaps. This functions should normally
+ * be called within the disk ->revalidate method for blk-mq based drivers.
+ * Before calling this function, the device driver must already have set the
+ * device zone size (chunk_sector limit) and the max zone append limit.
+ * For BIO based drivers, this function cannot be used. BIO based device drivers
+ * only need to set disk->nr_zones so that the sysfs exposed value is correct.
  * If the @update_driver_data callback function is not NULL, the callback is
  * executed with the device request queue frozen after all zones have been
  * checked.
@@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
                              void (*update_driver_data)(struct gendisk *disk))
 {
        struct request_queue *q = disk->queue;
-       struct blk_revalidate_zone_args args = {
-               .disk           = disk,
-       };
+       sector_t zone_sectors = q->limits.chunk_sectors;
+       sector_t capacity = get_capacity(disk);
+       struct blk_revalidate_zone_args args = { };
        unsigned int noio_flag;
        int ret;
 
@@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
        if (WARN_ON_ONCE(!queue_is_mq(q)))
                return -EIO;
 
-       if (!get_capacity(disk))
-               return -EIO;
+       if (!capacity)
+               return -ENODEV;
+
+       /*
+        * Checks that the device driver indicated a valid zone size and that
+        * the max zone append limit is set.
+        */
+       if (!zone_sectors || !is_power_of_2(zone_sectors)) {
+               pr_warn("%s: Invalid non power of two zone size (%llu)\n",
+                       disk->disk_name, zone_sectors);
+               return -ENODEV;
+       }
+
+       if (!q->limits.max_zone_append_sectors) {
+               pr_warn("%s: Invalid 0 maximum zone append limit\n",
+                       disk->disk_name);
+               return -ENODEV;
+       }
 
        /*
         * Ensure that all memory allocations in this context are done as if
         * GFP_NOIO was specified.
         */
+       args.disk = disk;
+       args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
        noio_flag = memalloc_noio_save();
        ret = disk->fops->report_zones(disk, 0, UINT_MAX,
                                       blk_revalidate_zone_cb, &args);
@@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
         * If zones where reported, make sure that the entire disk capacity
         * has been checked.
         */
-       if (ret > 0 && args.sector != get_capacity(disk)) {
+       if (ret > 0 && args.sector != capacity) {
                pr_warn("%s: Missing zones from sector %llu\n",
                        disk->disk_name, args.sector);
                ret = -ENODEV;
@@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
         */
        blk_mq_freeze_queue(q);
        if (ret > 0) {
-               blk_queue_chunk_sectors(q, args.zone_sectors);
                disk->nr_zones = args.nr_zones;
                swap(disk->seq_zones_wlock, args.seq_zones_wlock);
                swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
index 6aa5daf7ae32f702ab80dacb16014f2a9c2c0edc..02a916ba62ee750d4ad29127604b7d4a0cb474d7 100644 (file)
@@ -176,7 +176,7 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
         * zoned writes, start searching from the start of a zone.
         */
        if (blk_rq_is_seq_zoned_write(rq))
-               pos -= round_down(pos, rq->q->limits.chunk_sectors);
+               pos = round_down(pos, rq->q->limits.chunk_sectors);
 
        while (node) {
                rq = rb_entry_rq(node);
index ed222b9c901bbfae96b252cceb08a38505f062a4..506921095412958132a4f946d63e3bff52bc15dd 100644 (file)
@@ -90,7 +90,7 @@ int amiga_partition(struct parsed_partitions *state)
        }
        blk = be32_to_cpu(rdb->rdb_PartitionList);
        put_dev_sector(sect);
-       for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
+       for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) {
                /* Read in terms partition table understands */
                if (check_mul_overflow(blk, (sector_t) blksize, &blk)) {
                        pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n",
index 6218c773d71c5906c6d2c6a63232502b21b25cd0..06b15b9f661ca0f97b8d1b569becbbeec857eb43 100644 (file)
@@ -992,7 +992,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                ssize_t plen;
 
                /* use the existing memory in an allocated page */
-               if (ctx->merge) {
+               if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
                        sgl = list_entry(ctx->tsgl_list.prev,
                                         struct af_alg_tsgl, list);
                        sg = sgl->sg + sgl->cur - 1;
@@ -1054,6 +1054,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                        ctx->used += plen;
                        copied += plen;
                        size -= plen;
+                       ctx->merge = 0;
                } else {
                        do {
                                struct page *pg;
@@ -1085,12 +1086,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                                size -= plen;
                                sgl->cur++;
                        } while (len && sgl->cur < MAX_SGL_ENTS);
+
+                       ctx->merge = plen & (PAGE_SIZE - 1);
                }
 
                if (!size)
                        sg_mark_end(sg + sgl->cur - 1);
-
-               ctx->merge = plen & (PAGE_SIZE - 1);
        }
 
        err = 0;
index 0ab43e149f0e4a5155b8654bae0916baea152966..82c44d4899b9676d4d43c2f2af7fd9f95758b894 100644 (file)
@@ -68,13 +68,15 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
        struct hash_ctx *ctx = ask->private;
        ssize_t copied = 0;
        size_t len, max_pages, npages;
-       bool continuing = ctx->more, need_init = false;
+       bool continuing, need_init = false;
        int err;
 
        max_pages = min_t(size_t, ALG_MAX_PAGES,
                          DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
 
        lock_sock(sk);
+       continuing = ctx->more;
+
        if (!continuing) {
                /* Discard a previous request that wasn't marked MSG_MORE. */
                hash_free_result(sk, ctx);
index e787598cb3f7eec356b3fd5a27457700be28e1b8..773e159dbbcb83963716338198b26def04bc959d 100644 (file)
@@ -185,8 +185,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
 
        if (issig) {
                sig = crypto_alloc_sig(alg_name, 0, 0);
-               if (IS_ERR(sig))
+               if (IS_ERR(sig)) {
+                       ret = PTR_ERR(sig);
                        goto error_free_key;
+               }
 
                if (pkey->key_is_private)
                        ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
@@ -208,8 +210,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
                }
        } else {
                tfm = crypto_alloc_akcipher(alg_name, 0, 0);
-               if (IS_ERR(tfm))
+               if (IS_ERR(tfm)) {
+                       ret = PTR_ERR(tfm);
                        goto error_free_key;
+               }
 
                if (pkey->key_is_private)
                        ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
@@ -300,8 +304,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
 
        if (issig) {
                sig = crypto_alloc_sig(alg_name, 0, 0);
-               if (IS_ERR(sig))
+               if (IS_ERR(sig)) {
+                       ret = PTR_ERR(sig);
                        goto error_free_key;
+               }
 
                if (pkey->key_is_private)
                        ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
@@ -313,8 +319,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
                ksz = crypto_sig_maxsize(sig);
        } else {
                tfm = crypto_alloc_akcipher(alg_name, 0, 0);
-               if (IS_ERR(tfm))
+               if (IS_ERR(tfm)) {
+                       ret = PTR_ERR(tfm);
                        goto error_free_key;
+               }
 
                if (pkey->key_is_private)
                        ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
@@ -411,8 +419,10 @@ int public_key_verify_signature(const struct public_key *pkey,
 
        key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
                      GFP_KERNEL);
-       if (!key)
+       if (!key) {
+               ret = -ENOMEM;
                goto error_free_tfm;
+       }
 
        memcpy(key, pkey->key, pkey->keylen);
        ptr = key + pkey->keylen;
index 514ae6b24cb23bdfb699a20435146042d760b706..496ca02ee18f4d8b09d32deab3c8fc81842d9da4 100644 (file)
@@ -129,8 +129,6 @@ source "drivers/dma-buf/Kconfig"
 
 source "drivers/dca/Kconfig"
 
-source "drivers/auxdisplay/Kconfig"
-
 source "drivers/uio/Kconfig"
 
 source "drivers/vfio/Kconfig"
index 80f1fb3548ae826d45fa7eb902ac5b62076520d6..e4328b4305640355dc82f9b663270f0f98573d76 100644 (file)
@@ -2,10 +2,13 @@
 # Copyright (C) 2023 Intel Corporation
 
 intel_vpu-y := \
+       ivpu_debugfs.o \
        ivpu_drv.o \
        ivpu_fw.o \
+       ivpu_fw_log.o \
        ivpu_gem.o \
-       ivpu_hw_mtl.o \
+       ivpu_hw_37xx.o \
+       ivpu_hw_40xx.o \
        ivpu_ipc.o \
        ivpu_job.o \
        ivpu_jsm_msg.o \
@@ -13,4 +16,4 @@ intel_vpu-y := \
        ivpu_mmu_context.o \
        ivpu_pm.o
 
-obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
\ No newline at end of file
+obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
new file mode 100644 (file)
index 0000000..5e5996f
--- /dev/null
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_debugfs.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
+#include "ivpu_gem.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+static int bo_list_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct drm_printer p = drm_seq_file_printer(s);
+
+       ivpu_bo_list(node->minor->dev, &p);
+
+       return 0;
+}
+
+static int fw_name_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+       seq_printf(s, "%s\n", vdev->fw->name);
+       return 0;
+}
+
+static int fw_trace_capability_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+       u64 trace_hw_component_mask;
+       u32 trace_destination_mask;
+       int ret;
+
+       ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
+                                           &trace_hw_component_mask);
+       if (!ret) {
+               seq_printf(s,
+                          "trace_destination_mask:  %#18x\n"
+                          "trace_hw_component_mask: %#18llx\n",
+                          trace_destination_mask, trace_hw_component_mask);
+       }
+       return 0;
+}
+
+static int fw_trace_config_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+       /**
+        * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
+        * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
+        */
+       u32 trace_level = vdev->fw->trace_level;
+       u32 trace_destination_mask = vdev->fw->trace_destination_mask;
+       u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
+
+       seq_printf(s,
+                  "trace_level:             %#18x\n"
+                  "trace_destination_mask:  %#18x\n"
+                  "trace_hw_component_mask: %#18llx\n",
+                  trace_level, trace_destination_mask, trace_hw_component_mask);
+
+       return 0;
+}
+
+static int last_bootmode_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+       seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
+
+       return 0;
+}
+
+static int reset_counter_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+       seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
+       return 0;
+}
+
+static int reset_pending_show(struct seq_file *s, void *v)
+{
+       struct drm_info_node *node = (struct drm_info_node *)s->private;
+       struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
+
+       seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+       return 0;
+}
+
+static const struct drm_info_list vdev_debugfs_list[] = {
+       {"bo_list", bo_list_show, 0},
+       {"fw_name", fw_name_show, 0},
+       {"fw_trace_capability", fw_trace_capability_show, 0},
+       {"fw_trace_config", fw_trace_config_show, 0},
+       {"last_bootmode", last_bootmode_show, 0},
+       {"reset_counter", reset_counter_show, 0},
+       {"reset_pending", reset_pending_show, 0},
+};
+
+static int fw_log_show(struct seq_file *s, void *v)
+{
+       struct ivpu_device *vdev = s->private;
+       struct drm_printer p = drm_seq_file_printer(s);
+
+       ivpu_fw_log_print(vdev, true, &p);
+       return 0;
+}
+
+static int fw_log_fops_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, fw_log_show, inode->i_private);
+}
+
+static ssize_t
+fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+       struct seq_file *s = file->private_data;
+       struct ivpu_device *vdev = s->private;
+
+       if (!size)
+               return -EINVAL;
+
+       ivpu_fw_log_clear(vdev);
+       return size;
+}
+
+static const struct file_operations fw_log_fops = {
+       .owner = THIS_MODULE,
+       .open = fw_log_fops_open,
+       .write = fw_log_fops_write,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static ssize_t
+fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
+                                    size_t size, loff_t *pos)
+{
+       struct ivpu_device *vdev = file->private_data;
+       struct ivpu_fw_info *fw = vdev->fw;
+       u32 trace_destination_mask;
+       int ret;
+
+       ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
+       if (ret < 0)
+               return ret;
+
+       fw->trace_destination_mask = trace_destination_mask;
+
+       ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
+                                 fw->trace_hw_component_mask);
+
+       return size;
+}
+
+static const struct file_operations fw_trace_destination_mask_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = fw_trace_destination_mask_fops_write,
+};
+
+static ssize_t
+fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
+                                size_t size, loff_t *pos)
+{
+       struct ivpu_device *vdev = file->private_data;
+       struct ivpu_fw_info *fw = vdev->fw;
+       u64 trace_hw_component_mask;
+       int ret;
+
+       ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
+       if (ret < 0)
+               return ret;
+
+       fw->trace_hw_component_mask = trace_hw_component_mask;
+
+       ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
+                                 trace_hw_component_mask);
+
+       return size;
+}
+
+static const struct file_operations fw_trace_hw_comp_mask_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = fw_trace_hw_comp_mask_fops_write,
+};
+
+static ssize_t
+fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+       struct ivpu_device *vdev = file->private_data;
+       struct ivpu_fw_info *fw = vdev->fw;
+       u32 trace_level;
+       int ret;
+
+       ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
+       if (ret < 0)
+               return ret;
+
+       fw->trace_level = trace_level;
+
+       ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
+                                 fw->trace_hw_component_mask);
+
+       return size;
+}
+
+static const struct file_operations fw_trace_level_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = fw_trace_level_fops_write,
+};
+
+static ssize_t
+ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+       struct ivpu_device *vdev = file->private_data;
+
+       if (!size)
+               return -EINVAL;
+
+       if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+               return -ENODEV;
+       if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+               return -ENODEV;
+
+       return size;
+}
+
+static ssize_t
+ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+       struct ivpu_device *vdev = file->private_data;
+
+       if (!size)
+               return -EINVAL;
+
+       ivpu_pm_schedule_recovery(vdev);
+       return size;
+}
+
+static const struct file_operations ivpu_force_recovery_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = ivpu_force_recovery_fn,
+};
+
+static const struct file_operations ivpu_reset_engine_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = ivpu_reset_engine_fn,
+};
+
+void ivpu_debugfs_init(struct drm_minor *minor)
+{
+       struct ivpu_device *vdev = to_ivpu_device(minor->dev);
+
+       drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list),
+                                minor->debugfs_root, minor);
+
+       debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev,
+                           &ivpu_force_recovery_fops);
+
+       debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev,
+                           &fw_log_fops);
+       debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev,
+                           &fw_trace_destination_mask_fops);
+       debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev,
+                           &fw_trace_hw_comp_mask_fops);
+       debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev,
+                           &fw_trace_level_fops);
+
+       debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev,
+                           &ivpu_reset_engine_fops);
+}
diff --git a/drivers/accel/ivpu/ivpu_debugfs.h b/drivers/accel/ivpu/ivpu_debugfs.h
new file mode 100644 (file)
index 0000000..78f80c1
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_DEBUGFS_H__
+#define __IVPU_DEBUGFS_H__
+
+struct drm_minor;
+
+void ivpu_debugfs_init(struct drm_minor *minor);
+
+#endif /* __IVPU_DEBUGFS_H__ */
index 8396db2b5203082c20d56817ea01f765b36d825e..ba79f397c9e8ed92ba5240dac46737be75f00c18 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/drm_prime.h>
 
 #include "vpu_boot_api.h"
+#include "ivpu_debugfs.h"
 #include "ivpu_drv.h"
 #include "ivpu_fw.h"
 #include "ivpu_gem.h"
@@ -50,6 +51,10 @@ u8 ivpu_pll_max_ratio = U8_MAX;
 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
 
+bool ivpu_disable_mmu_cont_pages;
+module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
+MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
+
 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
 {
        struct ivpu_device *vdev = file_priv->vdev;
@@ -110,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
        kref_put(&file_priv->ref, file_priv_release);
 }
 
+static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+{
+       switch (args->index) {
+       case DRM_IVPU_CAP_METRIC_STREAMER:
+               args->value = 0;
+               break;
+       case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
+               args->value = 1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
        struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -139,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
                args->value = ivpu_get_context_count(vdev);
                break;
        case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
-               args->value = vdev->hw->ranges.user_low.start;
+               args->value = vdev->hw->ranges.user.start;
                break;
        case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
                args->value = file_priv->priority;
@@ -169,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
        case DRM_IVPU_PARAM_SKU:
                args->value = vdev->hw->sku;
                break;
+       case DRM_IVPU_PARAM_CAPABILITIES:
+               ret = ivpu_get_capabilities(vdev, args);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -369,10 +393,11 @@ static const struct drm_driver driver = {
 
        .open = ivpu_open,
        .postclose = ivpu_postclose,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = ivpu_gem_prime_import,
-       .gem_prime_mmap = drm_gem_prime_mmap,
+
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = ivpu_debugfs_init,
+#endif
 
        .ioctls = ivpu_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -427,7 +452,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
                return PTR_ERR(vdev->regb);
        }
 
-       ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38));
+       ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
        if (ret) {
                ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
                return ret;
@@ -437,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
        /* Clear any pending errors */
        pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
 
-       /* VPU MTL does not require PCI spec 10m D3hot delay */
-       if (ivpu_is_mtl(vdev))
+       /* VPU 37XX does not require 10m D3hot delay */
+       if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
                pdev->d3hot_delay = 0;
 
        ret = pcim_enable_device(pdev);
@@ -476,7 +501,14 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
        if (!vdev->pm)
                return -ENOMEM;
 
-       vdev->hw->ops = &ivpu_hw_mtl_ops;
+       if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
+               vdev->hw->ops = &ivpu_hw_40xx_ops;
+               vdev->hw->dma_bits = 48;
+       } else {
+               vdev->hw->ops = &ivpu_hw_37xx_ops;
+               vdev->hw->dma_bits = 38;
+       }
+
        vdev->platform = IVPU_PLATFORM_INVALID;
        vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
        vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
@@ -602,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
 
 static struct pci_device_id ivpu_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
        { }
 };
 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
index d3013fbd13b32fbaaffd2886b27eaaca6f9bf011..9e8c075fe9ef15c447a6705ae5845d9a7792bd2f 100644 (file)
 #define DRIVER_DATE "20230117"
 
 #define PCI_DEVICE_ID_MTL   0x7d1d
+#define PCI_DEVICE_ID_LNL   0x643e
+
+#define IVPU_HW_37XX   37
+#define IVPU_HW_40XX   40
 
 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
 /* SSID 1 is used by the VPU to represent invalid context */
@@ -75,6 +79,8 @@ struct ivpu_wa_table {
        bool punit_disabled;
        bool clear_runtime_mem;
        bool d3hot_after_power_off;
+       bool interrupt_clear_with_0;
+       bool disable_clock_relinquish;
 };
 
 struct ivpu_hw_info;
@@ -131,6 +137,7 @@ struct ivpu_file_priv {
 extern int ivpu_dbg_mask;
 extern u8 ivpu_pll_min_ratio;
 extern u8 ivpu_pll_max_ratio;
+extern bool ivpu_disable_mmu_cont_pages;
 
 #define IVPU_TEST_MODE_DISABLED  0
 #define IVPU_TEST_MODE_FW_TEST   1
@@ -144,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
 int ivpu_boot(struct ivpu_device *vdev);
 int ivpu_shutdown(struct ivpu_device *vdev);
 
-static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
-{
-       return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
-}
-
 static inline u8 ivpu_revision(struct ivpu_device *vdev)
 {
        return to_pci_dev(vdev->drm.dev)->revision;
@@ -159,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
        return to_pci_dev(vdev->drm.dev)->device;
 }
 
+static inline int ivpu_hw_gen(struct ivpu_device *vdev)
+{
+       switch (ivpu_device_id(vdev)) {
+       case PCI_DEVICE_ID_MTL:
+               return IVPU_HW_37XX;
+       case PCI_DEVICE_ID_LNL:
+               return IVPU_HW_40XX;
+       default:
+               ivpu_err(vdev, "Unknown VPU device\n");
+               return 0;
+       }
+}
+
 static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
 {
        return container_of(dev, struct ivpu_device, drm);
index f58951a0d81b10ffb3e2518945f11094e0fa04b0..9827ea4d7b83b46483a26fd2d9a0e622bc4fa9cf 100644 (file)
@@ -11,6 +11,7 @@
 #include "vpu_boot_api.h"
 #include "ivpu_drv.h"
 #include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
 #include "ivpu_gem.h"
 #include "ivpu_hw.h"
 #include "ivpu_ipc.h"
@@ -42,22 +43,39 @@ static char *ivpu_firmware;
 module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
 MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
 
+/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
+static struct {
+       int gen;
+       const char *name;
+} fw_names[] = {
+       { IVPU_HW_37XX, "vpu_37xx.bin" },
+       { IVPU_HW_37XX, "mtl_vpu.bin" },
+       { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
+       { IVPU_HW_40XX, "vpu_40xx.bin" },
+       { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+};
+
 static int ivpu_fw_request(struct ivpu_device *vdev)
 {
-       static const char * const fw_names[] = {
-               "mtl_vpu.bin",
-               "intel/vpu/mtl_vpu_v0.0.bin"
-       };
        int ret = -ENOENT;
        int i;
 
-       if (ivpu_firmware)
-               return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+       if (ivpu_firmware) {
+               ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+               if (!ret)
+                       vdev->fw->name = ivpu_firmware;
+               return ret;
+       }
 
        for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
-               ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
-               if (!ret)
+               if (fw_names[i].gen != ivpu_hw_gen(vdev))
+                       continue;
+
+               ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
+               if (!ret) {
+                       vdev->fw->name = fw_names[i].name;
                        return 0;
+               }
        }
 
        ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
@@ -142,7 +160,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
        }
        ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
                 fw_hdr->header_version, fw_hdr->image_format);
-       ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
+
+       ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
+                 (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
 
        if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
                return -EINVAL;
@@ -158,6 +178,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
        fw->cold_boot_entry_point = fw_hdr->entry_point;
        fw->entry_point = fw->cold_boot_entry_point;
 
+       fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
+       fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
+       fw->trace_hw_component_mask = -1;
+
        ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
                 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
        ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
@@ -182,13 +206,14 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
                return -EINVAL;
        }
 
-       ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
+       ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
        return 0;
 }
 
 static int ivpu_fw_mem_init(struct ivpu_device *vdev)
 {
        struct ivpu_fw_info *fw = vdev->fw;
+       int log_verb_size;
        int ret;
 
        ret = ivpu_fw_update_global_range(vdev);
@@ -201,17 +226,45 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
                return -ENOMEM;
        }
 
+       fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
+                                                 DRM_IVPU_BO_CACHED);
+       if (!fw->mem_log_crit) {
+               ivpu_err(vdev, "Failed to allocate critical log buffer\n");
+               ret = -ENOMEM;
+               goto err_free_fw_mem;
+       }
+
+       if (ivpu_log_level <= IVPU_FW_LOG_INFO)
+               log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
+       else
+               log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
+
+       fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
+       if (!fw->mem_log_verb) {
+               ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
+               ret = -ENOMEM;
+               goto err_free_log_crit;
+       }
+
        if (fw->shave_nn_size) {
-               fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
+               fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
                                                          fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
                if (!fw->mem_shave_nn) {
                        ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
-                       ivpu_bo_free_internal(fw->mem);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_free_log_verb;
                }
        }
 
        return 0;
+
+err_free_log_verb:
+       ivpu_bo_free_internal(fw->mem_log_verb);
+err_free_log_crit:
+       ivpu_bo_free_internal(fw->mem_log_crit);
+err_free_fw_mem:
+       ivpu_bo_free_internal(fw->mem);
+       return ret;
 }
 
 static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
@@ -223,7 +276,12 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
                fw->mem_shave_nn = NULL;
        }
 
+       ivpu_bo_free_internal(fw->mem_log_verb);
+       ivpu_bo_free_internal(fw->mem_log_crit);
        ivpu_bo_free_internal(fw->mem);
+
+       fw->mem_log_verb = NULL;
+       fw->mem_log_crit = NULL;
        fw->mem = NULL;
 }
 
@@ -387,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
         * Uncached region of VPU address space, covers IPC buffers, job queues
         * and log buffers, programmable to L2$ Uncached by VPU MTRR
         */
-       boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
-       boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
-                                         vdev->hw->ranges.global_low.start;
+       boot_params->shared_region_base = vdev->hw->ranges.global.start;
+       boot_params->shared_region_size = vdev->hw->ranges.global.end -
+                                         vdev->hw->ranges.global.start;
 
        boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
        boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
@@ -397,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
        boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
        boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
 
-       boot_params->global_aliased_pio_base =
-               vdev->hw->ranges.global_aliased_pio.start;
-       boot_params->global_aliased_pio_size =
-               ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
+       boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+       boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
 
        /* Allow configuration for L2C_PAGE_TABLE with boot param value */
        boot_params->autoconfig = 1;
@@ -408,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
        /* Enable L2 cache for first 2GB of high memory */
        boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
        boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
-               ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
+               ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
 
        if (vdev->fw->mem_shave_nn)
                boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
@@ -424,6 +480,15 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
        boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
        boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
 
+       boot_params->default_trace_level = vdev->fw->trace_level;
+       boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
+       boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
+       boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
+       boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
+       boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
+       boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
+       boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
+
        boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
        boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
        boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
index 8d275c802d1c8e35ad0f0850257a31912132eab1..8567fdf925fe4acb782c49d0d0a53106224e50b9 100644 (file)
@@ -12,6 +12,7 @@ struct vpu_boot_params;
 
 struct ivpu_fw_info {
        const struct firmware *file;
+       const char *name;
        struct ivpu_bo *mem;
        struct ivpu_bo *mem_shave_nn;
        struct ivpu_bo *mem_log_crit;
@@ -23,6 +24,9 @@ struct ivpu_fw_info {
        u32 shave_nn_size;
        u64 entry_point; /* Cold or warm boot entry point for next boot */
        u64 cold_boot_entry_point;
+       u32 trace_level;
+       u32 trace_destination_mask;
+       u64 trace_hw_component_mask;
 };
 
 int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
new file mode 100644 (file)
index 0000000..95065ca
--- /dev/null
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
+#include "ivpu_gem.h"
+
+#define IVPU_FW_LOG_LINE_LENGTH          256
+
+unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
+module_param(ivpu_log_level, uint, 0444);
+MODULE_PARM_DESC(ivpu_log_level,
+                "VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+                " info=" __stringify(IVPU_FW_LOG_INFO)
+                " warn=" __stringify(IVPU_FW_LOG_WARN)
+                " error=" __stringify(IVPU_FW_LOG_ERROR)
+                " fatal=" __stringify(IVPU_FW_LOG_FATAL));
+
+static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+                     struct vpu_tracing_buffer_header **log_header)
+{
+       struct vpu_tracing_buffer_header *log;
+
+       if ((*offset + sizeof(*log)) > bo->base.size)
+               return -EINVAL;
+
+       log = bo->kvaddr + *offset;
+
+       if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY)
+               return -EINVAL;
+
+       if (log->header_size < sizeof(*log) || log->header_size > 1024) {
+               ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size);
+               return -EINVAL;
+       }
+       if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) {
+               ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size);
+               return -EINVAL;
+       }
+
+       *log_header = log;
+       *offset += log->size;
+
+       ivpu_dbg(vdev, FW_BOOT,
+                "FW log name \"%s\", write offset 0x%x size 0x%x, wrap count %d, hdr version %d size %d format %d, alignment %d",
+                log->name, log->write_index, log->size, log->wrap_count, log->header_version,
+                log->header_size, log->format, log->alignment);
+
+       return 0;
+}
+
+static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+{
+       char line[IVPU_FW_LOG_LINE_LENGTH];
+       u32 index = 0;
+
+       if (!size || !buffer)
+               return;
+
+       while (size--) {
+               if (*buffer == '\n' || *buffer == 0) {
+                       line[index] = 0;
+                       if (index != 0)
+                               drm_printf(p, "%s\n", line);
+                       index = 0;
+                       buffer++;
+                       continue;
+               }
+               if (index == IVPU_FW_LOG_LINE_LENGTH - 1) {
+                       line[index] = 0;
+                       index = 0;
+                       drm_printf(p, "%s\n", line);
+               }
+               if (*buffer != '\r' && (isprint(*buffer) || iscntrl(*buffer)))
+                       line[index++] = *buffer;
+               buffer++;
+       }
+       line[index] = 0;
+       if (index != 0)
+               drm_printf(p, "%s\n", line);
+}
+
+static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
+                               const char *prefix, bool only_new_msgs, struct drm_printer *p)
+{
+       char *log_buffer = (void *)log + log->header_size;
+       u32 log_size = log->size - log->header_size;
+       u32 log_start = log->read_index;
+       u32 log_end = log->write_index;
+
+       if (!(log->write_index || log->wrap_count) ||
+           (log->write_index == log->read_index && only_new_msgs)) {
+               drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+               return;
+       }
+
+       drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
+       if (log->write_index > log->read_index) {
+               buffer_print(log_buffer + log_start, log_end - log_start, p);
+       } else {
+               buffer_print(log_buffer + log_end, log_size - log_end, p);
+               buffer_print(log_buffer, log_end, p);
+       }
+       drm_printf(p, "\x1b[0m");
+       drm_printf(p, "==== %s \"%s\" log end   ====\n", prefix, log->name);
+}
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+{
+       struct vpu_tracing_buffer_header *log_header;
+       u32 next = 0;
+
+       while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+               fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
+
+       next = 0;
+       while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+               fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
+}
+
+void ivpu_fw_log_clear(struct ivpu_device *vdev)
+{
+       struct vpu_tracing_buffer_header *log_header;
+       u32 next = 0;
+
+       while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
+               log_header->read_index = log_header->write_index;
+
+       next = 0;
+       while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
+               log_header->read_index = log_header->write_index;
+}
diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
new file mode 100644 (file)
index 0000000..0b2573f
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_FW_LOG_H__
+#define __IVPU_FW_LOG_H__
+
+#include <linux/types.h>
+
+#include <drm/drm_print.h>
+
+#include "ivpu_drv.h"
+
+#define IVPU_FW_LOG_DEFAULT 0
+#define IVPU_FW_LOG_DEBUG   1
+#define IVPU_FW_LOG_INFO    2
+#define IVPU_FW_LOG_WARN    3
+#define IVPU_FW_LOG_ERROR   4
+#define IVPU_FW_LOG_FATAL   5
+
+extern unsigned int ivpu_log_level;
+
+#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE      SZ_1M
+#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE      SZ_8M
+#define IVPU_FW_CRITICAL_BUFFER_SIZE           SZ_512K
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+void ivpu_fw_log_clear(struct ivpu_device *vdev);
+
+static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
+{
+       struct drm_printer p = drm_info_printer(vdev->drm.dev);
+
+       ivpu_fw_log_print(vdev, false, &p);
+}
+
+#endif /* __IVPU_FW_LOG_H__ */
index 52b339aefadcae0dd01f5d0d5d1a20aec44c1412..2981bb32c75506d00065748a65a01f5757f85fe9 100644 (file)
@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
        int ret;
 
        if (!range) {
-               if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
-                       range = &vdev->hw->ranges.user_high;
+               if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
+                       range = &vdev->hw->ranges.shave;
+               else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
+                       range = &vdev->hw->ranges.dma;
                else
-                       range = &vdev->hw->ranges.user_low;
+                       range = &vdev->hw->ranges.user;
        }
 
        mutex_lock(&ctx->lock);
@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
                fixed_range.end = vpu_addr + size;
                range = &fixed_range;
        } else {
-               range = &vdev->hw->ranges.global_low;
+               range = &vdev->hw->ranges.global;
        }
 
        bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
index 50a9304ab09cf2f3f197448eda89ec0a6636872c..ab341237bcf97a45608d35f965e4d3e2895f91e5 100644 (file)
@@ -38,11 +38,10 @@ struct ivpu_addr_range {
 struct ivpu_hw_info {
        const struct ivpu_hw_ops *ops;
        struct {
-               struct ivpu_addr_range global_low;
-               struct ivpu_addr_range global_high;
-               struct ivpu_addr_range user_low;
-               struct ivpu_addr_range user_high;
-               struct ivpu_addr_range global_aliased_pio;
+               struct ivpu_addr_range global;
+               struct ivpu_addr_range user;
+               struct ivpu_addr_range shave;
+               struct ivpu_addr_range dma;
        } ranges;
        struct {
                u8 min_ratio;
@@ -57,9 +56,11 @@ struct ivpu_hw_info {
        u32 tile_fuse;
        u32 sku;
        u16 config;
+       int dma_bits;
 };
 
-extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
+extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
+extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
 
 static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
 {
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
new file mode 100644 (file)
index 0000000..9eae1c2
--- /dev/null
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw_37xx_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#define TILE_FUSE_ENABLE_BOTH        0x0
+#define TILE_SKU_BOTH_MTL            0x3630
+
+/* Work point configuration values */
+#define CONFIG_1_TILE                0x01
+#define CONFIG_2_TILE                0x02
+#define PLL_RATIO_5_3                0x01
+#define PLL_RATIO_4_3                0x02
+#define WP_CONFIG(tile, ratio)       (((tile) << 8) | (ratio))
+#define WP_CONFIG_1_TILE_5_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_1_TILE_4_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_2_TILE_5_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_2_TILE_4_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_0_TILE_PLL_OFF     WP_CONFIG(0, 0)
+
+#define PLL_REF_CLK_FREQ            (50 * 1000000)
+#define PLL_SIMULATION_FREQ         (10 * 1000000)
+#define PLL_DEFAULT_EPP_VALUE       0x80
+
+#define TIM_SAFE_ENABLE                     0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE     0xffffffff
+
+#define TIMEOUT_US                  (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US              (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US                     (500 * USEC_PER_MSEC)
+
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+                       (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+                          (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+                          (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+                                    (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+       switch (platform) {
+       case IVPU_PLATFORM_SILICON:
+               return "IVPU_PLATFORM_SILICON";
+       case IVPU_PLATFORM_SIMICS:
+               return "IVPU_PLATFORM_SIMICS";
+       case IVPU_PLATFORM_FPGA:
+               return "IVPU_PLATFORM_FPGA";
+       default:
+               return "Invalid platform";
+       }
+}
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+       u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
+       u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
+
+       if  (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
+               vdev->platform = platform;
+       else
+               vdev->platform = IVPU_PLATFORM_SILICON;
+
+       ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+                ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+       vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+       vdev->wa.clear_runtime_mem = false;
+       vdev->wa.d3hot_after_power_off = true;
+
+       if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
+               vdev->wa.interrupt_clear_with_0 = true;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+       if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
+               vdev->timeout.boot = 100000;
+               vdev->timeout.jsm = 50000;
+               vdev->timeout.tdr = 2000000;
+               vdev->timeout.reschedule_suspend = 1000;
+       } else {
+               vdev->timeout.boot = 1000;
+               vdev->timeout.jsm = 500;
+               vdev->timeout.tdr = 2000;
+               vdev->timeout.reschedule_suspend = 10;
+       }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+       return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+/* Send KMD initiated workpoint change */
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+                            u16 target_ratio, u16 config)
+{
+       int ret;
+       u32 val;
+
+       ret = ivpu_pll_wait_for_cmd_send(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
+       val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+       val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
+       val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+       val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
+       val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
+       val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
+
+       ret = ivpu_pll_wait_for_cmd_send(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
+{
+       u32 exp_val = enable ? 0x1 : 0x0;
+
+       if (IVPU_WA(punit_disabled))
+               return 0;
+
+       return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+       if (IVPU_WA(punit_disabled))
+               return 0;
+
+       return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+       struct ivpu_hw_info *hw = vdev->hw;
+       u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
+       u32 fmin_fuse, fmax_fuse;
+
+       fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
+       fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+       fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+       fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
+       fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+       hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+       hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+       hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
+{
+       return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+       struct ivpu_hw_info *hw = vdev->hw;
+       u16 target_ratio;
+       u16 config;
+       int ret;
+
+       if (IVPU_WA(punit_disabled)) {
+               ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
+                        ivpu_platform_to_str(vdev->platform));
+               return 0;
+       }
+
+       if (enable) {
+               target_ratio = hw->pll.pn_ratio;
+               config = hw->config;
+       } else {
+               target_ratio = 0;
+               config = 0;
+       }
+
+       ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
+                config, target_ratio);
+
+       ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
+       if (ret) {
+               ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_pll_wait_for_lock(vdev, enable);
+       if (ret) {
+               ivpu_err(vdev, "Timed out waiting for PLL lock\n");
+               return ret;
+       }
+
+       if (enable) {
+               ret = ivpu_pll_wait_for_status_ready(vdev);
+               if (ret) {
+                       ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+                       return ret;
+               }
+
+               ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
+               if (ret) {
+                       ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+       return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+       return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
+{
+       u32 val = 0;
+
+       val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+
+       REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
+
+       if (enable) {
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+       } else {
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+       }
+
+       REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
+
+       if (enable) {
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+       } else {
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+       }
+
+       REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
+
+       if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
+
+       if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
+
+       if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+
+       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
+
+       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
+
+       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
+{
+       ivpu_boot_host_ss_rst_clr_assert(vdev);
+
+       return ivpu_boot_noc_qreqn_check(vdev, 0x0);
+}
+
+static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
+{
+       REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+       REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
+
+       ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+       if (ret)
+               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+       return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+       if (enable) {
+               val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+               val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+       } else {
+               val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+               val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+       }
+       REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
+
+       ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+       if (ret)
+               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+       return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+
+       REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+
+       REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+       /* FPGA model (UPF) is not power aware, skipped Power Island polling */
+       if (ivpu_is_fpga(vdev))
+               return 0;
+
+       return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
+                            exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+
+       REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+
+       REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ivpu_boot_pwr_island_trickle_drive(vdev, true);
+       ivpu_boot_pwr_island_drive(vdev, true);
+
+       ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+       if (ret) {
+               ivpu_err(vdev, "Timed out waiting for power island status\n");
+               return ret;
+       }
+
+       ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+               return ret;
+       }
+
+       ivpu_boot_host_ss_clk_drive(vdev, true);
+       ivpu_boot_pwr_island_isolation_drive(vdev, false);
+       ivpu_boot_host_ss_rst_drive(vdev, true);
+       ivpu_boot_dpu_active_drive(vdev, true);
+
+       return ret;
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+       REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+       u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
+
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+       REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+       u32 val;
+
+       val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
+       val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
+
+       val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
+       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+       val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+       val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+       val = vdev->fw->entry_point >> 9;
+       REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
+
+       val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+       REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
+
+       ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+                vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
+       if (enable)
+               val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+       else
+               val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
+
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+       if (ret)
+               ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
+{
+       struct ivpu_hw_info *hw = vdev->hw;
+
+       hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
+       hw->sku = TILE_SKU_BOTH_MTL;
+       hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
+
+       ivpu_pll_init_frequency_ratios(vdev);
+
+       ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
+       ivpu_hw_init_range(&hw->ranges.user,   0xc0000000, 255 * SZ_1M);
+       ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
+       ivpu_hw_init_range(&hw->ranges.dma,   0x200000000, SZ_8G);
+
+       return 0;
+}
+
+static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
+{
+       int ret;
+       u32 val;
+
+       if (IVPU_WA(punit_disabled))
+               return 0;
+
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+       val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+       REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+
+       ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret)
+               ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+       return ret;
+}
+
+static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ret = ivpu_boot_d0i3_drive(vdev, true);
+       if (ret)
+               ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+       udelay(5); /* VPU requires 5 us to complete the transition */
+
+       return ret;
+}
+
+static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ret = ivpu_boot_d0i3_drive(vdev, false);
+       if (ret)
+               ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ivpu_hw_read_platform(vdev);
+       ivpu_hw_wa_init(vdev);
+       ivpu_hw_timeouts_init(vdev);
+
+       ret = ivpu_hw_37xx_reset(vdev);
+       if (ret)
+               ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
+
+       ret = ivpu_hw_37xx_d0i3_disable(vdev);
+       if (ret)
+               ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+       ret = ivpu_pll_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_host_ss_configure(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * The control circuitry for vpu_idle indication logic powers up active.
+        * To ensure unnecessary low power mode signal from LRT during bring up,
+        * KMD disables the circuitry prior to bringing up the Main Power island.
+        */
+       ivpu_boot_vpu_idle_gen_disable(vdev);
+
+       ret = ivpu_boot_pwr_domain_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_host_ss_axi_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
+{
+       ivpu_boot_no_snoop_enable(vdev);
+       ivpu_boot_tbu_mmu_enable(vdev);
+       ivpu_boot_soc_cpu_boot(vdev);
+
+       return 0;
+}
+
+static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
+{
+       u32 val;
+
+       if (IVPU_WA(punit_disabled))
+               return true;
+
+       val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
+       return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
+              REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
+{
+       int ret = 0;
+
+       if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
+               ivpu_err(vdev, "Failed to reset the VPU\n");
+
+       if (ivpu_pll_disable(vdev)) {
+               ivpu_err(vdev, "Failed to disable PLL\n");
+               ret = -EIO;
+       }
+
+       if (ivpu_hw_37xx_d0i3_enable(vdev)) {
+               ivpu_err(vdev, "Failed to enter D0I3\n");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
+{
+       u32 val;
+
+       /* Enable writing and set non-zero WDT value */
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+       /* Enable writing and disable watchdog timer */
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
+
+       /* Now clear the timeout interrupt */
+       val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
+       val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+{
+       u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
+       u32 cpu_clock;
+
+       if ((config & 0xff) == PLL_RATIO_4_3)
+               cpu_clock = pll_clock * 2 / 4;
+       else
+               cpu_clock = pll_clock * 2 / 5;
+
+       return cpu_clock;
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+       u32 pll_curr_ratio;
+
+       pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
+       pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
+
+       if (!ivpu_is_silicon(vdev))
+               return PLL_SIMULATION_FREQ;
+
+       return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+}
+
+static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+       u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
+       u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
+
+       REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+       return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+       u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+       return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+       REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
+{
+       REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
+{
+       REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+       REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+       REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
+{
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+       REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+       REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+       REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+}
+
+static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+       ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
+
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+       ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
+
+       ivpu_hw_wdt_disable(vdev);
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+       ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
+
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+       u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+
+       REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+               ivpu_mmu_irq_evtq_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+               ivpu_ipc_irq_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+               ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+               ivpu_mmu_irq_gerr_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+               ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+               ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+               ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
+
+       return status;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+       u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+       bool schedule_recovery = false;
+
+       if (status == 0)
+               return 0;
+
+       /* Disable global interrupt before handling local buttress interrupts */
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
+       if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+               ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
+                        REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
+
+       if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+               ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
+               REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
+               u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
+
+               ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+                        ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+                        REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+                        REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+               REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       /* This must be done after interrupts are cleared at the source. */
+       if (IVPU_WA(interrupt_clear_with_0))
+               /*
+                * Writing 1 triggers an interrupt, so we can't perform read update write.
+                * Clear local interrupt status by writing 0 to all bits.
+                */
+               REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
+       else
+               REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
+
+       /* Re-enable global interrupt */
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
+       if (schedule_recovery)
+               ivpu_pm_schedule_recovery(vdev);
+
+       return status;
+}
+
+static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
+{
+       struct ivpu_device *vdev = ptr;
+       u32 ret_irqv, ret_irqb;
+
+       ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
+       ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
+
+       return IRQ_RETVAL(ret_irqb | ret_irqv);
+}
+
+static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
+{
+       u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+       u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+       if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
+               ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+               ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+               ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+       if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+               ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+       if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
+               ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
+
+       if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
+               u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
+
+               ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+                        ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+                        REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+                        REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+       }
+}
+
+const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
+       .info_init = ivpu_hw_37xx_info_init,
+       .power_up = ivpu_hw_37xx_power_up,
+       .is_idle = ivpu_hw_37xx_is_idle,
+       .power_down = ivpu_hw_37xx_power_down,
+       .boot_fw = ivpu_hw_37xx_boot_fw,
+       .wdt_disable = ivpu_hw_37xx_wdt_disable,
+       .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
+       .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+       .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
+       .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
+       .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
+       .reg_db_set = ivpu_hw_37xx_reg_db_set,
+       .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
+       .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
+       .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
+       .irq_clear = ivpu_hw_37xx_irq_clear,
+       .irq_enable = ivpu_hw_37xx_irq_enable,
+       .irq_disable = ivpu_hw_37xx_irq_disable,
+       .irq_handler = ivpu_hw_37xx_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
new file mode 100644 (file)
index 0000000..6e4e915
--- /dev/null
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_MTL_REG_H__
+#define __IVPU_HW_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE                                       0x00000000u
+
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT                                       0x00000004u
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK                      BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK                  BIT_MASK(1)
+#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK                  BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0                                      0x00000008u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK                       GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK                       GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1                                      0x0000000cu
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK                    GENMASK(15, 0)
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK                             GENMASK(31, 16)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2                                      0x00000010u
+#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK                  GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD                                           0x00000014u
+#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK                         BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD                                  0x00000018u
+#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK                        GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_CURRENT_PLL                                  0x0000001cu
+#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK                               GENMASK(15, 0)
+
+#define VPU_37XX_BUTTRESS_PLL_ENABLE                                           0x00000020u
+
+#define VPU_37XX_BUTTRESS_FMIN_FUSE                                            0x00000024u
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK                             GENMASK(7, 0)
+#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK                              GENMASK(15, 8)
+
+#define VPU_37XX_BUTTRESS_FMAX_FUSE                                            0x00000028u
+#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK                             GENMASK(7, 0)
+
+#define VPU_37XX_BUTTRESS_TILE_FUSE                                            0x0000002cu
+#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK                         BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK                                   GENMASK(3, 2)
+
+#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK                                       0x00000030u
+#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK                                      0x00000034u
+
+#define VPU_37XX_BUTTRESS_PLL_STATUS                                           0x00000040u
+#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK                         BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_STATUS                                           0x00000044u
+#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK                                BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK                         BIT_MASK(1)
+
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL                                     0x00000060u
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK                     BIT_MASK(0)
+#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK                             BIT_MASK(2)
+
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET                                 0x00000050u
+#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK                            BIT_MASK(0)
+
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET                         0x00000080u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE                                   0x00000084u
+#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE                         0x00000088u
+
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0                                        0x000000a0u
+#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1                                        0x000000a4u
+#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR                                        0x000000a8u
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG                                  0x000000b0u
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK                               GENMASK(11, 0)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK                              GENMASK(19, 12)
+#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK                              GENMASK(24, 20)
+
+#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR                                        0x000000b4u
+
+#define VPU_37XX_HOST_SS_CPR_CLK_SET                                   0x00000084u
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_SET                                   0x00000094u
+#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_CPR_RST_CLR                                   0x00000098u
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK                          BIT_MASK(0)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_37XX_HOST_SS_HW_VERSION                                    0x00000108u
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK                  GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK                    GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK                        GENMASK(23, 16)
+
+#define VPU_37XX_HOST_SS_GEN_CTRL                                      0x00000118u
+#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK                              GENMASK(31, 29)
+
+#define VPU_37XX_HOST_SS_NOC_QREQN                                     0x00000154u
+#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK                    BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN                                  0x00000158u
+#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK                 BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_NOC_QDENY                                     0x0000015cu
+#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK                    BIT_MASK(0)
+
+#define MTL_VPU_TOP_NOC_QREQN                                          0x00000160u
+#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK                            BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK                      BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QACCEPTN                                       0x00000164u
+#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK                         BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK                   BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QDENY                                          0x00000168u
+#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK                            BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK                      BIT_MASK(1)
+
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN                                 0x00000170u
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK                        BIT_MASK(0)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK                    BIT_MASK(1)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK                   BIT_MASK(2)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK                     BIT_MASK(3)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK                    BIT_MASK(4)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK                    BIT_MASK(5)
+#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK                        BIT_MASK(6)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_0                                  0x00010210u
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK                 BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK                 BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK                 BIT_MASK(2)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK                 BIT_MASK(3)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK           BIT_MASK(4)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK                       BIT_MASK(5)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK                       BIT_MASK(6)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK                       BIT_MASK(7)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK            BIT_MASK(8)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK      BIT_MASK(30)
+#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK      BIT_MASK(31)
+
+#define VPU_37XX_HOST_SS_ICB_STATUS_1                                  0x00010214u
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK      BIT_MASK(0)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK      BIT_MASK(1)
+#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK      BIT_MASK(2)
+
+#define VPU_37XX_HOST_SS_ICB_CLEAR_0                                   0x00010220u
+#define VPU_37XX_HOST_SS_ICB_CLEAR_1                                   0x00010224u
+#define VPU_37XX_HOST_SS_ICB_ENABLE_0                                  0x00010240u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM                              0x000200f4u
+
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT                             0x000200fcu
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK           GENMASK(7, 0)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK          GENMASK(15, 8)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK             GENMASK(23, 16)
+#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK                  GENMASK(31, 24)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0                                       0x00030020u
+#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK                  BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0                            0x00030024u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK                       BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0                    0x00030028u
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK               BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0                                0x0003002cu
+#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK           BIT_MASK(3)
+
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN                              0x00030200u
+#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK                      BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE                                        0x00030204u
+#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK                        BIT_MASK(0)
+
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO                            0x00041040u
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK                  BIT_MASK(0)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK            GENMASK(2, 1)
+#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK                GENMASK(31, 3)
+
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR                               0x00082020u
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK   GENMASK(15, 0)
+#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK                GENMASK(31, 16)
+
+#define VPU_37XX_HOST_MMU_IDR0                                         0x00200000u
+#define VPU_37XX_HOST_MMU_IDR1                                         0x00200004u
+#define VPU_37XX_HOST_MMU_IDR3                                         0x0020000cu
+#define VPU_37XX_HOST_MMU_IDR5                                         0x00200014u
+#define VPU_37XX_HOST_MMU_CR0                                          0x00200020u
+#define VPU_37XX_HOST_MMU_CR0ACK                                               0x00200024u
+#define VPU_37XX_HOST_MMU_CR1                                          0x00200028u
+#define VPU_37XX_HOST_MMU_CR2                                          0x0020002cu
+#define VPU_37XX_HOST_MMU_IRQ_CTRL                                     0x00200050u
+#define VPU_37XX_HOST_MMU_IRQ_CTRLACK                                  0x00200054u
+
+#define VPU_37XX_HOST_MMU_GERROR                                               0x00200060u
+#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK                             BIT_MASK(0)
+#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK                         BIT_MASK(2)
+#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK                         BIT_MASK(3)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK                     BIT_MASK(4)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK                     BIT_MASK(5)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK                     BIT_MASK(6)
+#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK                          BIT_MASK(7)
+
+#define VPU_37XX_HOST_MMU_GERRORN                                      0x00200064u
+
+#define VPU_37XX_HOST_MMU_STRTAB_BASE                                  0x00200080u
+#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG                              0x00200088u
+#define VPU_37XX_HOST_MMU_CMDQ_BASE                                    0x00200090u
+#define VPU_37XX_HOST_MMU_CMDQ_PROD                                    0x00200098u
+#define VPU_37XX_HOST_MMU_CMDQ_CONS                                    0x0020009cu
+#define VPU_37XX_HOST_MMU_EVTQ_BASE                                    0x002000a0u
+#define VPU_37XX_HOST_MMU_EVTQ_PROD                                    0x002000a8u
+#define VPU_37XX_HOST_MMU_EVTQ_CONS                                    0x002000acu
+#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC                                        (0x002000a8u + SZ_64K)
+#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC                                        (0x002000acu + SZ_64K)
+
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES                             0x00360000u
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK      BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK               BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK               BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK    BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK    BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK    BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK    GENMASK(10, 6)
+#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK    GENMASK(15, 11)
+
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV                                  0x00360004u
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK             BIT_MASK(0)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK             BIT_MASK(1)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK             BIT_MASK(2)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK             BIT_MASK(3)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK             BIT_MASK(4)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK             BIT_MASK(5)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK             BIT_MASK(6)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK             BIT_MASK(7)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK             BIT_MASK(8)
+#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK             BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE                                        0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL                            0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG                              0x04400010u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG                             0x04400014u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG                                0x04400020u
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET                              0x06010004u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK                 BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR                              0x06010018u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK                 BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC                          0x06010040u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK                BIT_MASK(0)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK                BIT_MASK(1)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK                BIT_MASK(2)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK                BIT_MASK(3)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK         GENMASK(31, 4)
+
+#define MTL_VPU_CPU_SS_TIM_WATCHDOG                                    0x0602009cu
+#define MTL_VPU_CPU_SS_TIM_WDOG_EN                                     0x060200a4u
+#define MTL_VPU_CPU_SS_TIM_SAFE                                                0x060200a8u
+#define MTL_VPU_CPU_SS_TIM_IPC_FIFO                                    0x060200f0u
+
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG                                  0x06021008u
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK             BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DOORBELL_0                                      0x06300000u
+#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK                             BIT_MASK(0)
+
+#define MTL_VPU_CPU_SS_DOORBELL_1                                      0x06301000u
+
+#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
new file mode 100644 (file)
index 0000000..34626d6
--- /dev/null
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#include <linux/dmi.h>
+
+#define TILE_MAX_NUM                 6
+#define TILE_MAX_MASK                0x3f
+
+#define LNL_HW_ID                    0x4040
+
+#define SKU_TILE_SHIFT               0u
+#define SKU_TILE_MASK                0x0000ffffu
+#define SKU_HW_ID_SHIFT              16u
+#define SKU_HW_ID_MASK               0xffff0000u
+
+#define PLL_CONFIG_DEFAULT           0x1
+#define PLL_CDYN_DEFAULT             0x80
+#define PLL_EPP_DEFAULT              0x80
+#define PLL_REF_CLK_FREQ            (50 * 1000000)
+#define PLL_RATIO_TO_FREQ(x)        ((x) * PLL_REF_CLK_FREQ)
+
+#define PLL_PROFILING_FREQ_DEFAULT   38400000
+#define PLL_PROFILING_FREQ_HIGH      400000000
+
+#define TIM_SAFE_ENABLE                     0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE     0xffffffff
+
+#define TIMEOUT_US                  (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US              (1500 * USEC_PER_MSEC)
+
+#define WEIGHTS_DEFAULT              0xf711f711u
+#define WEIGHTS_ATS_DEFAULT          0x0000f711u
+
+#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+                       (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
+                          (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+                                    (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+       switch (platform) {
+       case IVPU_PLATFORM_SILICON:
+               return "IVPU_PLATFORM_SILICON";
+       case IVPU_PLATFORM_SIMICS:
+               return "IVPU_PLATFORM_SIMICS";
+       case IVPU_PLATFORM_FPGA:
+               return "IVPU_PLATFORM_FPGA";
+       default:
+               return "Invalid platform";
+       }
+}
+
+static const struct dmi_system_id ivpu_dmi_platform_simulation[] = {
+       {
+               .ident = "Intel Simics",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
+                       DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+                       DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
+               },
+       },
+       {
+               .ident = "Intel Simics",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "Simics"),
+               },
+       },
+       { }
+};
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+       if (dmi_check_system(ivpu_dmi_platform_simulation))
+               vdev->platform = IVPU_PLATFORM_SIMICS;
+       else
+               vdev->platform = IVPU_PLATFORM_SILICON;
+
+       ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+                ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+       vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+       vdev->wa.clear_runtime_mem = false;
+
+       if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
+               vdev->wa.disable_clock_relinquish = true;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+       if (ivpu_is_fpga(vdev)) {
+               vdev->timeout.boot = 100000;
+               vdev->timeout.jsm = 50000;
+               vdev->timeout.tdr = 2000000;
+               vdev->timeout.reschedule_suspend = 1000;
+       } else if (ivpu_is_simics(vdev)) {
+               vdev->timeout.boot = 50;
+               vdev->timeout.jsm = 500;
+               vdev->timeout.tdr = 10000;
+               vdev->timeout.reschedule_suspend = 10;
+       } else {
+               vdev->timeout.boot = 1000;
+               vdev->timeout.jsm = 500;
+               vdev->timeout.tdr = 2000;
+               vdev->timeout.reschedule_suspend = 10;
+       }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+       return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+                            u16 target_ratio, u16 epp, u16 config, u16 cdyn)
+{
+       int ret;
+       u32 val;
+
+       ret = ivpu_pll_wait_for_cmd_send(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+       val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD);
+       val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val);
+
+       ret = ivpu_pll_wait_for_cmd_send(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+       return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+       struct ivpu_hw_info *hw = vdev->hw;
+       u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio;
+       u32 fmin_fuse, fmax_fuse;
+
+       fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE);
+       fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+       fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+       fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE);
+       fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+       hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+       hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+       hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+       u16 config = enable ? PLL_CONFIG_DEFAULT : 0;
+       u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0;
+       u16 epp = enable ? PLL_EPP_DEFAULT : 0;
+       struct ivpu_hw_info *hw = vdev->hw;
+       u16 target_ratio = hw->pll.pn_ratio;
+       int ret;
+
+       ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
+                PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn);
+
+       ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio,
+                               target_ratio, epp, config, cdyn);
+       if (ret) {
+               ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+               return ret;
+       }
+
+       if (enable) {
+               ret = ivpu_pll_wait_for_status_ready(vdev);
+               if (ret) {
+                       ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+       return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+       return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
+
+       if (enable) {
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+       } else {
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
+       }
+
+       REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
+
+       if (enable) {
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+       } else {
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
+       }
+
+       REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+           !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
+
+       REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
+}
+
+static int ivpu_boot_host_ss_check(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ret = ivpu_boot_noc_qreqn_check(vdev, 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+       if (ret)
+               ivpu_err(vdev, "Failed qdeny check %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+       REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
+
+       ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+               return ret;
+       }
+
+       if (enable) {
+               REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
+               REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
+       }
+
+       return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+       return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
+       if (enable) {
+               val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+               val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+       } else {
+               val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
+               val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+       }
+       REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
+
+       ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+       if (ret)
+               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+       return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
+
+       REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+
+       if (enable)
+               ndelay(500);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
+
+       REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
+
+       if (!enable)
+               ndelay(500);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+       if (ivpu_is_fpga(vdev))
+               return 0;
+
+       return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU,
+                            exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
+
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
+
+       REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+       val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+       val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+       REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+       u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
+
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+
+       REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+       u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
+
+       if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
+               return -EIO;
+
+       return 0;
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ivpu_boot_pwr_island_trickle_drive(vdev, true);
+       ivpu_boot_pwr_island_drive(vdev, true);
+
+       ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+       if (ret) {
+               ivpu_err(vdev, "Timed out waiting for power island status\n");
+               return ret;
+       }
+
+       ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+               return ret;
+       }
+
+       ivpu_boot_host_ss_clk_drive(vdev, true);
+       ivpu_boot_host_ss_rst_drive(vdev, true);
+       ivpu_boot_pwr_island_isolation_drive(vdev, false);
+
+       return ret;
+}
+
+static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
+       REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
+
+       ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+       if (ret) {
+               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0);
+       if (ret)
+               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev)
+{
+       return ivpu_boot_soc_cpu_drive(vdev, true);
+}
+
+static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+       int ret;
+       u32 val;
+       u64 val64;
+
+       ret = ivpu_boot_soc_cpu_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
+               return ret;
+       }
+
+       val64 = vdev->fw->entry_point;
+       val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
+       REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
+
+       val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
+       val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
+       REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
+
+       ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+                ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
+
+       return 0;
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+       int ret;
+       u32 val;
+
+       ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL);
+       if (enable)
+               val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+       else
+               val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val);
+
+       ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static bool ivpu_tile_disable_check(u32 config)
+{
+       /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
+       if (config == 0)
+               return true;
+
+       if (config > BIT(TILE_MAX_NUM - 1))
+               return false;
+
+       if ((config & (config - 1)) == 0)
+               return true;
+
+       return false;
+}
+
+static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+{
+       struct ivpu_hw_info *hw = vdev->hw;
+       u32 tile_disable;
+       u32 tile_enable;
+       u32 fuse;
+
+       fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
+       if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) {
+               ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
+               return -EIO;
+       }
+
+       tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse);
+       if (!ivpu_tile_disable_check(tile_disable)) {
+               ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable);
+               return -EIO;
+       }
+
+       if (tile_disable)
+               ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
+                        TILE_MAX_NUM - 1, ffs(tile_disable) - 1);
+       else
+               ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
+
+       tile_enable = (~tile_disable) & TILE_MAX_MASK;
+
+       hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
+       hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
+       hw->tile_fuse = tile_disable;
+       hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+       ivpu_pll_init_frequency_ratios(vdev);
+
+       ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
+       ivpu_hw_init_range(&vdev->hw->ranges.user,   0x80000000, SZ_256M);
+       ivpu_hw_init_range(&vdev->hw->ranges.shave,  0x80000000 + SZ_256M, SZ_2G - SZ_256M);
+       ivpu_hw_init_range(&vdev->hw->ranges.dma,   0x200000000, SZ_8G);
+
+       return 0;
+}
+
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+       int ret;
+       u32 val;
+
+       ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret) {
+               ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
+               return ret;
+       }
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET);
+       val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val);
+
+       ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+       if (ret)
+               ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+       return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       if (IVPU_WA(punit_disabled))
+               return 0;
+
+       ret = ivpu_boot_d0i3_drive(vdev, true);
+       if (ret)
+               ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+       udelay(5); /* VPU requires 5 us to complete the transition */
+
+       return ret;
+}
+
+static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev)
+{
+       int ret;
+
+       if (IVPU_WA(punit_disabled))
+               return 0;
+
+       ret = ivpu_boot_d0i3_drive(vdev, false);
+       if (ret)
+               ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+       return ret;
+}
+
+static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev)
+{
+       u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+       if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
+               val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+       else
+               val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
+
+       REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev)
+{
+       ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
+                REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable");
+}
+
+static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev)
+{
+       u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+
+       val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
+       REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
+}
+
+static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ret = ivpu_hw_40xx_reset(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
+               return ret;
+       }
+
+       ivpu_hw_read_platform(vdev);
+       ivpu_hw_wa_init(vdev);
+       ivpu_hw_timeouts_init(vdev);
+
+       ret = ivpu_hw_40xx_d0i3_disable(vdev);
+       if (ret)
+               ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+       ret = ivpu_pll_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+               return ret;
+       }
+
+       if (IVPU_WA(disable_clock_relinquish))
+               ivpu_hw_40xx_clock_relinquish_disable(vdev);
+       ivpu_hw_40xx_profiling_freq_reg_set(vdev);
+       ivpu_hw_40xx_ats_print(vdev);
+
+       ret = ivpu_boot_host_ss_check(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+               return ret;
+       }
+
+       ivpu_boot_idle_gen_drive(vdev, false);
+
+       ret = ivpu_boot_pwr_domain_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_host_ss_axi_enable(vdev);
+       if (ret) {
+               ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+               return ret;
+       }
+
+       ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+       return ret;
+}
+
+static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ivpu_boot_no_snoop_enable(vdev);
+       ivpu_boot_tbu_mmu_enable(vdev);
+
+       ret = ivpu_boot_soc_cpu_boot(vdev);
+       if (ret)
+               ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
+
+       return ret;
+}
+
+static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
+{
+       u32 val;
+
+       if (IVPU_WA(punit_disabled))
+               return true;
+
+       val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
+       return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) &&
+              REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
+{
+       int ret = 0;
+
+       if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+               ivpu_warn(vdev, "Failed to reset the VPU\n");
+
+       if (ivpu_pll_disable(vdev)) {
+               ivpu_err(vdev, "Failed to disable PLL\n");
+               ret = -EIO;
+       }
+
+       if (ivpu_hw_40xx_d0i3_enable(vdev)) {
+               ivpu_err(vdev, "Failed to enter D0I3\n");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
+{
+       u32 val;
+
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
+
+       val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
+       val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+       u32 pll_curr_ratio;
+
+       pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ);
+       pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK;
+
+       return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+       return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+       u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
+       u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
+
+       REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+       return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+       u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+
+       return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+       REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev)
+{
+       REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev)
+{
+       REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+       REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+       REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+       REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
+{
+       REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+       REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+       REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
+       REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
+}
+
+static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+       /* TODO: For LNN hang consider engine reset instead of full recovery */
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+       ivpu_hw_wdt_disable(vdev);
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+       ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+       u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+       irqreturn_t ret = IRQ_NONE;
+
+       if (!status)
+               return IRQ_NONE;
+
+       REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+               ivpu_mmu_irq_evtq_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+               ret |= ivpu_ipc_irq_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+               ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+               ivpu_mmu_irq_gerr_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+               ivpu_hw_40xx_irq_wdt_mss_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+               ivpu_hw_40xx_irq_wdt_nce_handler(vdev);
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+               ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
+
+       return ret;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+       bool schedule_recovery = false;
+       u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+       if (status == 0)
+               return IRQ_NONE;
+
+       REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+               ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+               ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+                        REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+                        REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+               REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) {
+               ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+               REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) {
+               ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+               REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) {
+               ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+               REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) {
+               ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+               REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1);
+               schedule_recovery = true;
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) {
+               ivpu_err(vdev, "Survivability error detected\n");
+               schedule_recovery = true;
+       }
+
+       if (schedule_recovery)
+               ivpu_pm_schedule_recovery(vdev);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
+{
+       struct ivpu_device *vdev = ptr;
+       irqreturn_t ret = IRQ_NONE;
+
+       ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
+       ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+
+       if (ret & IRQ_WAKE_THREAD)
+               return IRQ_WAKE_THREAD;
+
+       return ret;
+}
+
+static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
+{
+       u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+       u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+       if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev))
+               ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+               ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+               ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+       if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+               ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) {
+               ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
+                        REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
+                        REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
+       }
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb))
+               ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb))
+               ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb))
+               ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb))
+               ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
+                        REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
+
+       if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb))
+               ivpu_err(vdev, "Survivability error detected\n");
+}
+
+const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
+       .info_init = ivpu_hw_40xx_info_init,
+       .power_up = ivpu_hw_40xx_power_up,
+       .is_idle = ivpu_hw_40xx_is_idle,
+       .power_down = ivpu_hw_40xx_power_down,
+       .boot_fw = ivpu_hw_40xx_boot_fw,
+       .wdt_disable = ivpu_hw_40xx_wdt_disable,
+       .diagnose_failure = ivpu_hw_40xx_diagnose_failure,
+       .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+       .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
+       .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
+       .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
+       .reg_db_set = ivpu_hw_40xx_reg_db_set,
+       .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get,
+       .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get,
+       .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set,
+       .irq_clear = ivpu_hw_40xx_irq_clear,
+       .irq_enable = ivpu_hw_40xx_irq_enable,
+       .irq_disable = ivpu_hw_40xx_irq_disable,
+       .irq_handler = ivpu_hw_40xx_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
new file mode 100644 (file)
index 0000000..5139cfe
--- /dev/null
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_40XX_REG_H__
+#define __IVPU_HW_40XX_REG_H__
+
+#include <linux/bits.h>
+
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT                               0x00000000u
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK              BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK                  BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK                 BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK                 BIT_MASK(3)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK                 BIT_MASK(4)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK                 BIT_MASK(5)
+#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK                 BIT_MASK(6)
+
+#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK                               0x00000004u
+#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK                              0x00000008u
+
+#define VPU_40XX_BUTTRESS_HM_ATS                                       0x0000000cu
+
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1                                 0x00000010u
+#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2                                 0x00000014u
+#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR                                        0x00000018u
+
+#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG                                 0x0000001cu
+#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR                               0x00000020u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS                 0x00000024u
+
+#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG                                 0x00000040u
+#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR                               0x00000044u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW                             0x00000048u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH                            0x0000004cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR                           0x00000050u
+
+#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS                     0x00000054u
+
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW                             0x00000058u
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH                            0x0000005cu
+#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR                           0x00000060u
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0                              0x00000130u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK               GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK               GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1                              0x00000134u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK            GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK                     GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2                              0x00000138u
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK                  GENMASK(15, 0)
+#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK                    GENMASK(31, 16)
+
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD                                   0x0000013cu
+#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK                         BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_PLL_FREQ                                     0x00000148u
+#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK                          GENMASK(15, 0)
+
+#define VPU_40XX_BUTTRESS_TILE_FUSE                                    0x00000150u
+#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK                         BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK                                GENMASK(6, 1)
+
+#define VPU_40XX_BUTTRESS_VPU_STATUS                                   0x00000154u
+#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK                                BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK                         BIT_MASK(1)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK                     BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK                     BIT_MASK(11)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK        BIT_MASK(12)
+
+#define VPU_40XX_BUTTRESS_IP_RESET                                     0x00000160u
+#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK                                BIT_MASK(0)
+
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL                                 0x00000164u
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK                 BIT_MASK(0)
+#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK                         BIT_MASK(2)
+
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET                         0x00000168u
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE                           0x0000016cu
+#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE                         0x00000170u
+
+#define VPU_40XX_BUTTRESS_FMIN_FUSE                                    0x00000174u
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK                     GENMASK(7, 0)
+#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK                      GENMASK(15, 8)
+
+#define VPU_40XX_BUTTRESS_FMAX_FUSE                                    0x00000178u
+#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK                     GENMASK(7, 0)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_EN                                    0x00000080u
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK                       BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK                       BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK                       BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_CLK_SET                                   0x00000084u
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_EN                                    0x00000090u
+#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK                       BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK                       BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK                       BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_SET                                   0x00000094u
+#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_CPR_RST_CLR                                   0x00000098u
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK                      BIT_MASK(1)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK                      BIT_MASK(10)
+#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK                      BIT_MASK(11)
+
+#define VPU_40XX_HOST_SS_HW_VERSION                                    0x00000108u
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK                  GENMASK(7, 0)
+#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK                    GENMASK(15, 8)
+#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK                        GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_SW_VERSION                                    0x0000010cu
+
+#define VPU_40XX_HOST_SS_GEN_CTRL                                      0x00000118u
+#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK                              GENMASK(31, 29)
+
+#define VPU_40XX_HOST_SS_NOC_QREQN                                     0x00000154u
+#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK                    BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN                                  0x00000158u
+#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK                 BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_NOC_QDENY                                     0x0000015cu
+#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK                    BIT_MASK(0)
+
+#define VPU_40XX_TOP_NOC_QREQN                                         0x00000160u
+#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK                           BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK                     BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QACCEPTN                                      0x00000164u
+#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK                                BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK                  BIT_MASK(2)
+
+#define VPU_40XX_TOP_NOC_QDENY                                         0x00000168u
+#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK                           BIT_MASK(0)
+#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK                     BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN                                 0x00000170u
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK                        BIT_MASK(0)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK                    BIT_MASK(1)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK                   BIT_MASK(2)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK                     BIT_MASK(3)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK                    BIT_MASK(4)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK                    BIT_MASK(5)
+#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK                        BIT_MASK(6)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_0                                  0x00010210u
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK                 BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK                 BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK                 BIT_MASK(2)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK                 BIT_MASK(3)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK           BIT_MASK(4)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK               BIT_MASK(5)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK               BIT_MASK(6)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK               BIT_MASK(7)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK            BIT_MASK(8)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK      BIT_MASK(30)
+#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK      BIT_MASK(31)
+
+#define VPU_40XX_HOST_SS_ICB_STATUS_1                                  0x00010214u
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK      BIT_MASK(0)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK      BIT_MASK(1)
+#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK      BIT_MASK(2)
+
+#define VPU_40XX_HOST_SS_ICB_CLEAR_0                                   0x00010220u
+#define VPU_40XX_HOST_SS_ICB_CLEAR_1                                   0x00010224u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_0                                  0x00010240u
+#define VPU_40XX_HOST_SS_ICB_ENABLE_1                                  0x00010244u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM                              0x000200f4u
+
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT                             0x000200fcu
+#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK             GENMASK(23, 16)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0                               0x00030020u
+#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK                  BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0                            0x00030024u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK               BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0                    0x00030028u
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK       BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0                                0x0003002cu
+#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK           BIT_MASK(3)
+
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN                                  0x00030200u
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK                          BIT_MASK(0)
+#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK                    BIT_MASK(1)
+
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE                                        0x00030204u
+#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK                        BIT_MASK(0)
+
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO                       0x00040040u
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK             BIT_MASK(0)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK       GENMASK(2, 1)
+#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK   GENMASK(31, 3)
+
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR                       0x00082020u
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK   GENMASK(15, 0)
+#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK                GENMASK(31, 16)
+
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES                             0x00360000u
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK      BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK       BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK       BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK      BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK      BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK      BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK    GENMASK(10, 6)
+#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK    GENMASK(15, 11)
+
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV                                  0x00360004u
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK             BIT_MASK(0)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK             BIT_MASK(1)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK             BIT_MASK(2)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK             BIT_MASK(3)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK             BIT_MASK(4)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK             BIT_MASK(5)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK             BIT_MASK(6)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK             BIT_MASK(7)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK             BIT_MASK(8)
+#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK             BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE                               0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL                           0x04000000u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG                             0x04400010u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG                            0x04400014u
+#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG                       0x04400020u
+
+#define VPU_40XX_CPU_SS_TIM_WATCHDOG                                   0x0102009cu
+#define VPU_40XX_CPU_SS_TIM_WDOG_EN                                    0x010200a4u
+#define VPU_40XX_CPU_SS_TIM_SAFE                                       0x010200a8u
+
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG                                 0x01021008u
+#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK            BIT_MASK(9)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN                                  0x01010030u
+#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK                    BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN                               0x01010034u
+#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK                 BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY                                  0x01010038u
+#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK                    BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_TIM_IPC_FIFO                                   0x010200f0u
+#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT                          0x01029008u
+
+#define VPU_40XX_CPU_SS_DOORBELL_0                                     0x01300000u
+#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK                            BIT_MASK(0)
+
+#define VPU_40XX_CPU_SS_DOORBELL_1                                     0x01301000u
+
+#endif /* __IVPU_HW_40XX_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
deleted file mode 100644 (file)
index fef3542..0000000
+++ /dev/null
@@ -1,1053 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-
-#include "ivpu_drv.h"
-#include "ivpu_fw.h"
-#include "ivpu_hw_mtl_reg.h"
-#include "ivpu_hw_reg_io.h"
-#include "ivpu_hw.h"
-#include "ivpu_ipc.h"
-#include "ivpu_mmu.h"
-#include "ivpu_pm.h"
-
-#define TILE_FUSE_ENABLE_BOTH        0x0
-#define TILE_SKU_BOTH_MTL            0x3630
-
-/* Work point configuration values */
-#define CONFIG_1_TILE                0x01
-#define CONFIG_2_TILE                0x02
-#define PLL_RATIO_5_3                0x01
-#define PLL_RATIO_4_3                0x02
-#define WP_CONFIG(tile, ratio)       (((tile) << 8) | (ratio))
-#define WP_CONFIG_1_TILE_5_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
-#define WP_CONFIG_1_TILE_4_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
-#define WP_CONFIG_2_TILE_5_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
-#define WP_CONFIG_2_TILE_4_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
-#define WP_CONFIG_0_TILE_PLL_OFF     WP_CONFIG(0, 0)
-
-#define PLL_REF_CLK_FREQ            (50 * 1000000)
-#define PLL_SIMULATION_FREQ         (10 * 1000000)
-#define PLL_DEFAULT_EPP_VALUE       0x80
-
-#define TIM_SAFE_ENABLE                     0xf1d0dead
-#define TIM_WATCHDOG_RESET_VALUE     0xffffffff
-
-#define TIMEOUT_US                  (150 * USEC_PER_MSEC)
-#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
-#define PLL_TIMEOUT_US              (1500 * USEC_PER_MSEC)
-#define IDLE_TIMEOUT_US                     (500 * USEC_PER_MSEC)
-
-#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
-
-#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
-                       (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
-
-#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-
-#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
-                          (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
-                          (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
-
-#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
-#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
-
-#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
-                                    (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
-
-static char *ivpu_platform_to_str(u32 platform)
-{
-       switch (platform) {
-       case IVPU_PLATFORM_SILICON:
-               return "IVPU_PLATFORM_SILICON";
-       case IVPU_PLATFORM_SIMICS:
-               return "IVPU_PLATFORM_SIMICS";
-       case IVPU_PLATFORM_FPGA:
-               return "IVPU_PLATFORM_FPGA";
-       default:
-               return "Invalid platform";
-       }
-}
-
-static void ivpu_hw_read_platform(struct ivpu_device *vdev)
-{
-       u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
-       u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
-
-       if  (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
-               vdev->platform = platform;
-       else
-               vdev->platform = IVPU_PLATFORM_SILICON;
-
-       ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
-                ivpu_platform_to_str(vdev->platform), vdev->platform);
-}
-
-static void ivpu_hw_wa_init(struct ivpu_device *vdev)
-{
-       vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
-       vdev->wa.clear_runtime_mem = false;
-       vdev->wa.d3hot_after_power_off = true;
-}
-
-static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
-{
-       if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
-               vdev->timeout.boot = 100000;
-               vdev->timeout.jsm = 50000;
-               vdev->timeout.tdr = 2000000;
-               vdev->timeout.reschedule_suspend = 1000;
-       } else {
-               vdev->timeout.boot = 1000;
-               vdev->timeout.jsm = 500;
-               vdev->timeout.tdr = 2000;
-               vdev->timeout.reschedule_suspend = 10;
-       }
-}
-
-static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
-{
-       return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
-}
-
-/* Send KMD initiated workpoint change */
-static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
-                            u16 target_ratio, u16 config)
-{
-       int ret;
-       u32 val;
-
-       ret = ivpu_pll_wait_for_cmd_send(vdev);
-       if (ret) {
-               ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
-               return ret;
-       }
-
-       val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
-       val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
-       val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
-       REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
-
-       val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
-       val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
-       val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
-       REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
-
-       val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
-       val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
-       REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
-
-       val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
-       val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
-       REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
-
-       ret = ivpu_pll_wait_for_cmd_send(vdev);
-       if (ret)
-               ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
-{
-       u32 exp_val = enable ? 0x1 : 0x0;
-
-       if (IVPU_WA(punit_disabled))
-               return 0;
-
-       return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
-}
-
-static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
-{
-       if (IVPU_WA(punit_disabled))
-               return 0;
-
-       return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
-}
-
-static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
-{
-       struct ivpu_hw_info *hw = vdev->hw;
-       u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
-       u32 fmin_fuse, fmax_fuse;
-
-       fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
-       fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
-       fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
-
-       fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
-       fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
-
-       hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
-       hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
-       hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
-}
-
-static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
-{
-       return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
-}
-
-static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
-{
-       struct ivpu_hw_info *hw = vdev->hw;
-       u16 target_ratio;
-       u16 config;
-       int ret;
-
-       if (IVPU_WA(punit_disabled)) {
-               ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
-                        ivpu_platform_to_str(vdev->platform));
-               return 0;
-       }
-
-       if (enable) {
-               target_ratio = hw->pll.pn_ratio;
-               config = hw->config;
-       } else {
-               target_ratio = 0;
-               config = 0;
-       }
-
-       ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
-                config, target_ratio);
-
-       ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
-       if (ret) {
-               ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_pll_wait_for_lock(vdev, enable);
-       if (ret) {
-               ivpu_err(vdev, "Timed out waiting for PLL lock\n");
-               return ret;
-       }
-
-       if (enable) {
-               ret = ivpu_pll_wait_for_status_ready(vdev);
-               if (ret) {
-                       ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
-                       return ret;
-               }
-
-               ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
-               if (ret) {
-                       ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int ivpu_pll_enable(struct ivpu_device *vdev)
-{
-       return ivpu_pll_drive(vdev, true);
-}
-
-static int ivpu_pll_disable(struct ivpu_device *vdev)
-{
-       return ivpu_pll_drive(vdev, false);
-}
-
-static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
-{
-       u32 val = 0;
-
-       val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
-       val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
-       val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
-
-       REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
-}
-
-static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
-
-       if (enable) {
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
-       } else {
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
-       }
-
-       REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
-}
-
-static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
-
-       if (enable) {
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
-       } else {
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
-       }
-
-       REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
-}
-
-static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
-           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
-           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
-{
-       u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
-
-       if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
-           !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
-               return -EIO;
-
-       return 0;
-}
-
-static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
-{
-       ivpu_boot_host_ss_rst_clr_assert(vdev);
-
-       return ivpu_boot_noc_qreqn_check(vdev, 0x0);
-}
-
-static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
-{
-       REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
-}
-
-static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
-{
-       int ret;
-       u32 val;
-
-       val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
-       if (enable)
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
-       else
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
-       REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
-
-       ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
-       if (ret) {
-               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
-       if (ret)
-               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
-{
-       return ivpu_boot_host_ss_axi_drive(vdev, true);
-}
-
-static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
-{
-       int ret;
-       u32 val;
-
-       val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
-       if (enable) {
-               val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
-               val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
-       } else {
-               val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
-               val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
-       }
-       REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
-
-       ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
-       if (ret) {
-               ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
-       if (ret)
-               ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
-{
-       return ivpu_boot_host_ss_top_noc_drive(vdev, true);
-}
-
-static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
-
-       if (enable)
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
-       else
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
-
-       REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
-}
-
-static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
-
-       if (enable)
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
-       else
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
-
-       REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
-}
-
-static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
-{
-       /* FPGA model (UPF) is not power aware, skipped Power Island polling */
-       if (ivpu_is_fpga(vdev))
-               return 0;
-
-       return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
-                            exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
-}
-
-static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
-
-       if (enable)
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
-       else
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
-
-       REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
-}
-
-static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
-
-       if (enable)
-               val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
-       else
-               val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
-
-       REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
-}
-
-static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
-{
-       int ret;
-
-       ivpu_boot_pwr_island_trickle_drive(vdev, true);
-       ivpu_boot_pwr_island_drive(vdev, true);
-
-       ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
-       if (ret) {
-               ivpu_err(vdev, "Timed out waiting for power island status\n");
-               return ret;
-       }
-
-       ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
-       if (ret) {
-               ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
-               return ret;
-       }
-
-       ivpu_boot_host_ss_clk_drive(vdev, true);
-       ivpu_boot_pwr_island_isolation_drive(vdev, false);
-       ivpu_boot_host_ss_rst_drive(vdev, true);
-       ivpu_boot_dpu_active_drive(vdev, true);
-
-       return ret;
-}
-
-static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
-
-       val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
-       val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
-       val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
-
-       REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
-}
-
-static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
-{
-       u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
-
-       if (ivpu_is_fpga(vdev)) {
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
-       } else {
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
-               val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
-       }
-
-       REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
-}
-
-static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
-{
-       u32 val;
-
-       val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
-       val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
-
-       val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
-       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
-       val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
-       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
-       val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
-       REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
-
-       val = vdev->fw->entry_point >> 9;
-       REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
-
-       val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
-       REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
-
-       ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
-                vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
-}
-
-static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
-{
-       int ret;
-       u32 val;
-
-       ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
-       if (ret) {
-               ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
-               return ret;
-       }
-
-       val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
-       if (enable)
-               val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
-       else
-               val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
-       REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
-
-       ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
-       if (ret)
-               ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
-{
-       struct ivpu_hw_info *hw = vdev->hw;
-
-       hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
-       hw->sku = TILE_SKU_BOTH_MTL;
-       hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
-
-       ivpu_pll_init_frequency_ratios(vdev);
-
-       ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
-       ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
-       ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
-       ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
-       hw->ranges.global_aliased_pio = hw->ranges.user_low;
-
-       return 0;
-}
-
-static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
-{
-       int ret;
-       u32 val;
-
-       if (IVPU_WA(punit_disabled))
-               return 0;
-
-       ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
-       if (ret) {
-               ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
-               return ret;
-       }
-
-       val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
-       val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
-       REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
-
-       ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
-       if (ret)
-               ivpu_err(vdev, "Timed out waiting for RESET completion\n");
-
-       return ret;
-}
-
-static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
-{
-       int ret;
-
-       ret = ivpu_boot_d0i3_drive(vdev, true);
-       if (ret)
-               ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
-
-       udelay(5); /* VPU requires 5 us to complete the transition */
-
-       return ret;
-}
-
-static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
-{
-       int ret;
-
-       ret = ivpu_boot_d0i3_drive(vdev, false);
-       if (ret)
-               ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
-{
-       int ret;
-
-       ivpu_hw_read_platform(vdev);
-       ivpu_hw_wa_init(vdev);
-       ivpu_hw_timeouts_init(vdev);
-
-       ret = ivpu_hw_mtl_reset(vdev);
-       if (ret)
-               ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
-
-       ret = ivpu_hw_mtl_d0i3_disable(vdev);
-       if (ret)
-               ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
-
-       ret = ivpu_pll_enable(vdev);
-       if (ret) {
-               ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_boot_host_ss_configure(vdev);
-       if (ret) {
-               ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
-               return ret;
-       }
-
-       /*
-        * The control circuitry for vpu_idle indication logic powers up active.
-        * To ensure unnecessary low power mode signal from LRT during bring up,
-        * KMD disables the circuitry prior to bringing up the Main Power island.
-        */
-       ivpu_boot_vpu_idle_gen_disable(vdev);
-
-       ret = ivpu_boot_pwr_domain_enable(vdev);
-       if (ret) {
-               ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_boot_host_ss_axi_enable(vdev);
-       if (ret) {
-               ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
-               return ret;
-       }
-
-       ret = ivpu_boot_host_ss_top_noc_enable(vdev);
-       if (ret)
-               ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
-
-       return ret;
-}
-
-static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
-{
-       ivpu_boot_no_snoop_enable(vdev);
-       ivpu_boot_tbu_mmu_enable(vdev);
-       ivpu_boot_soc_cpu_boot(vdev);
-
-       return 0;
-}
-
-static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
-{
-       u32 val;
-
-       if (IVPU_WA(punit_disabled))
-               return true;
-
-       val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
-       return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
-              REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
-}
-
-static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
-{
-       int ret = 0;
-
-       if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
-               ivpu_err(vdev, "Failed to reset the VPU\n");
-       }
-
-       if (ivpu_pll_disable(vdev)) {
-               ivpu_err(vdev, "Failed to disable PLL\n");
-               ret = -EIO;
-       }
-
-       if (ivpu_hw_mtl_d0i3_enable(vdev)) {
-               ivpu_err(vdev, "Failed to enter D0I3\n");
-               ret = -EIO;
-       }
-
-       return ret;
-}
-
-static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
-{
-       u32 val;
-
-       /* Enable writing and set non-zero WDT value */
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
-
-       /* Enable writing and disable watchdog timer */
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
-
-       /* Now clear the timeout interrupt */
-       val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
-       val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
-}
-
-static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
-{
-       u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
-       u32 cpu_clock;
-
-       if ((config & 0xff) == PLL_RATIO_4_3)
-               cpu_clock = pll_clock * 2 / 4;
-       else
-               cpu_clock = pll_clock * 2 / 5;
-
-       return cpu_clock;
-}
-
-/* Register indirect accesses */
-static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
-{
-       u32 pll_curr_ratio;
-
-       pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
-       pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
-
-       if (!ivpu_is_silicon(vdev))
-               return PLL_SIMULATION_FREQ;
-
-       return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
-}
-
-static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
-{
-       return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
-}
-
-static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
-{
-       return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
-}
-
-static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
-{
-       return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
-}
-
-static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
-{
-       u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
-       u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
-
-       REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
-}
-
-static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
-{
-       return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
-}
-
-static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
-{
-       u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
-
-       return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
-}
-
-static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
-{
-       REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
-}
-
-static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
-{
-       REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
-}
-
-static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
-{
-       REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
-       REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
-       REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
-       REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-}
-
-static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
-{
-       REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-       REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
-       REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
-       REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
-}
-
-static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
-{
-       ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
-}
-
-static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
-{
-       ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
-       ivpu_hw_wdt_disable(vdev);
-       ivpu_pm_schedule_recovery(vdev);
-}
-
-static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
-{
-       ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
-}
-
-/* Handler for IRQs from VPU core (irqV) */
-static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
-{
-       u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
-
-       REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
-               ivpu_mmu_irq_evtq_handler(vdev);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
-               ivpu_ipc_irq_handler(vdev);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
-               ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
-               ivpu_mmu_irq_gerr_handler(vdev);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
-               ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
-               ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
-               ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
-
-       return status;
-}
-
-/* Handler for IRQs from Buttress core (irqB) */
-static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
-{
-       u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
-       bool schedule_recovery = false;
-
-       if (status == 0)
-               return 0;
-
-       /* Disable global interrupt before handling local buttress interrupts */
-       REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
-       if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
-               ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
-
-       if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
-               ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
-               REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
-               schedule_recovery = true;
-       }
-
-       if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
-               u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
-
-               ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
-                        ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
-                        REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
-                        REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
-               REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
-               schedule_recovery = true;
-       }
-
-       /*
-        * Clear local interrupt status by writing 0 to all bits.
-        * This must be done after interrupts are cleared at the source.
-        * Writing 1 triggers an interrupt, so we can't perform read update write.
-        */
-       REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
-
-       /* Re-enable global interrupt */
-       REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
-       if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
-
-       return status;
-}
-
-static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
-{
-       struct ivpu_device *vdev = ptr;
-       u32 ret_irqv, ret_irqb;
-
-       ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
-       ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
-
-       return IRQ_RETVAL(ret_irqb | ret_irqv);
-}
-
-static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
-{
-       u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
-       u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
-
-       if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
-               ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
-               ivpu_err(vdev, "WDT MSS timeout detected\n");
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
-               ivpu_err(vdev, "WDT NCE timeout detected\n");
-
-       if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
-               ivpu_err(vdev, "NOC Firewall irq detected\n");
-
-       if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
-               ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
-
-       if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
-               u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
-
-               ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
-                        ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
-                        REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
-                        REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
-       }
-}
-
-const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
-       .info_init = ivpu_hw_mtl_info_init,
-       .power_up = ivpu_hw_mtl_power_up,
-       .is_idle = ivpu_hw_mtl_is_idle,
-       .power_down = ivpu_hw_mtl_power_down,
-       .boot_fw = ivpu_hw_mtl_boot_fw,
-       .wdt_disable = ivpu_hw_mtl_wdt_disable,
-       .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
-       .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
-       .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
-       .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
-       .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
-       .reg_db_set = ivpu_hw_mtl_reg_db_set,
-       .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
-       .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
-       .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
-       .irq_clear = ivpu_hw_mtl_irq_clear,
-       .irq_enable = ivpu_hw_mtl_irq_enable,
-       .irq_disable = ivpu_hw_mtl_irq_disable,
-       .irq_handler = ivpu_hw_mtl_irq_handler,
-};
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
deleted file mode 100644 (file)
index 593b8ff..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-
-#ifndef __IVPU_HW_MTL_REG_H__
-#define __IVPU_HW_MTL_REG_H__
-
-#include <linux/bits.h>
-
-#define MTL_BUTTRESS_INTERRUPT_TYPE                                    0x00000000u
-
-#define MTL_BUTTRESS_INTERRUPT_STAT                                    0x00000004u
-#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK                   BIT_MASK(0)
-#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK                       BIT_MASK(1)
-#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK                       BIT_MASK(2)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0                                   0x00000008u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK                    GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK                    GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1                                   0x0000000cu
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK                 GENMASK(15, 0)
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK                          GENMASK(31, 16)
-
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2                                   0x00000010u
-#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK                       GENMASK(15, 0)
-
-#define MTL_BUTTRESS_WP_REQ_CMD                                                0x00000014u
-#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK                              BIT_MASK(0)
-
-#define MTL_BUTTRESS_WP_DOWNLOAD                                       0x00000018u
-#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK                     GENMASK(15, 0)
-
-#define MTL_BUTTRESS_CURRENT_PLL                                       0x0000001cu
-#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK                            GENMASK(15, 0)
-
-#define MTL_BUTTRESS_PLL_ENABLE                                                0x00000020u
-
-#define MTL_BUTTRESS_FMIN_FUSE                                         0x00000024u
-#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK                          GENMASK(7, 0)
-#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK                           GENMASK(15, 8)
-
-#define MTL_BUTTRESS_FMAX_FUSE                                         0x00000028u
-#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK                          GENMASK(7, 0)
-
-#define MTL_BUTTRESS_TILE_FUSE                                         0x0000002cu
-#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK                              BIT_MASK(0)
-#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK                                        GENMASK(3, 2)
-
-#define MTL_BUTTRESS_LOCAL_INT_MASK                                    0x00000030u
-#define MTL_BUTTRESS_GLOBAL_INT_MASK                                   0x00000034u
-
-#define MTL_BUTTRESS_PLL_STATUS                                                0x00000040u
-#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK                              BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_STATUS                                                0x00000044u
-#define MTL_BUTTRESS_VPU_STATUS_READY_MASK                             BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK                              BIT_MASK(1)
-
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL                                  0x00000060u
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK                  BIT_MASK(0)
-#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK                          BIT_MASK(2)
-
-#define MTL_BUTTRESS_VPU_IP_RESET                                      0x00000050u
-#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK                         BIT_MASK(0)
-
-#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET                              0x00000080u
-#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE                                        0x00000084u
-#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE                              0x00000088u
-
-#define MTL_BUTTRESS_ATS_ERR_LOG_0                                     0x000000a0u
-#define MTL_BUTTRESS_ATS_ERR_LOG_1                                     0x000000a4u
-#define MTL_BUTTRESS_ATS_ERR_CLEAR                                     0x000000a8u
-
-#define MTL_BUTTRESS_UFI_ERR_LOG                                       0x000000b0u
-#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK                            GENMASK(11, 0)
-#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK                           GENMASK(19, 12)
-#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK                           GENMASK(24, 20)
-
-#define MTL_BUTTRESS_UFI_ERR_CLEAR                                     0x000000b4u
-
-#define MTL_VPU_HOST_SS_CPR_CLK_SET                                    0x00000084u
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK                       BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK                       BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK                       BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_SET                                    0x00000094u
-#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK                       BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK                       BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK                       BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_CPR_RST_CLR                                    0x00000098u
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK                           BIT_MASK(0)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK                       BIT_MASK(1)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK                       BIT_MASK(10)
-#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK                       BIT_MASK(11)
-
-#define MTL_VPU_HOST_SS_HW_VERSION                                     0x00000108u
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK                   GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK                     GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK                 GENMASK(23, 16)
-
-#define MTL_VPU_HOST_SS_GEN_CTRL                                       0x00000118u
-#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK                               GENMASK(31, 29)
-
-#define MTL_VPU_HOST_SS_NOC_QREQN                                      0x00000154u
-#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK                     BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN                                   0x00000158u
-#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK                  BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_NOC_QDENY                                      0x0000015cu
-#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK                     BIT_MASK(0)
-
-#define MTL_VPU_TOP_NOC_QREQN                                          0x00000160u
-#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK                            BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK                      BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QACCEPTN                                       0x00000164u
-#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK                         BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK                   BIT_MASK(1)
-
-#define MTL_VPU_TOP_NOC_QDENY                                          0x00000168u
-#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK                            BIT_MASK(0)
-#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK                      BIT_MASK(1)
-
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN                                  0x00000170u
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK                 BIT_MASK(0)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK                     BIT_MASK(1)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK                    BIT_MASK(2)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK                      BIT_MASK(3)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK                     BIT_MASK(4)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK                     BIT_MASK(5)
-#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK                 BIT_MASK(6)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_0                                   0x00010210u
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK                  BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK                  BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK                  BIT_MASK(2)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK                  BIT_MASK(3)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK            BIT_MASK(4)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK                        BIT_MASK(5)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK                        BIT_MASK(6)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK                        BIT_MASK(7)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK             BIT_MASK(8)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK       BIT_MASK(30)
-#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK       BIT_MASK(31)
-
-#define MTL_VPU_HOST_SS_ICB_STATUS_1                                   0x00010214u
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK       BIT_MASK(0)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK       BIT_MASK(1)
-#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK       BIT_MASK(2)
-
-#define MTL_VPU_HOST_SS_ICB_CLEAR_0                                    0x00010220u
-#define MTL_VPU_HOST_SS_ICB_CLEAR_1                                    0x00010224u
-#define MTL_VPU_HOST_SS_ICB_ENABLE_0                                   0x00010240u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM                               0x000200f4u
-
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT                              0x000200fcu
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK            GENMASK(7, 0)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK           GENMASK(15, 8)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK              GENMASK(23, 16)
-#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK                   GENMASK(31, 24)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0                                        0x00030020u
-#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK                   BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0                             0x00030024u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK                        BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0                     0x00030028u
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK                BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0                         0x0003002cu
-#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK            BIT_MASK(3)
-
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN                               0x00030200u
-#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK                       BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE                                 0x00030204u
-#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK                 BIT_MASK(0)
-
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO                             0x00041040u
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK                   BIT_MASK(0)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK             GENMASK(2, 1)
-#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK         GENMASK(31, 3)
-
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR                                0x00082020u
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK    GENMASK(15, 0)
-#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK         GENMASK(31, 16)
-
-#define MTL_VPU_HOST_MMU_IDR0                                          0x00200000u
-#define MTL_VPU_HOST_MMU_IDR1                                          0x00200004u
-#define MTL_VPU_HOST_MMU_IDR3                                          0x0020000cu
-#define MTL_VPU_HOST_MMU_IDR5                                          0x00200014u
-#define MTL_VPU_HOST_MMU_CR0                                           0x00200020u
-#define MTL_VPU_HOST_MMU_CR0ACK                                                0x00200024u
-#define MTL_VPU_HOST_MMU_CR1                                           0x00200028u
-#define MTL_VPU_HOST_MMU_CR2                                           0x0020002cu
-#define MTL_VPU_HOST_MMU_IRQ_CTRL                                      0x00200050u
-#define MTL_VPU_HOST_MMU_IRQ_CTRLACK                                   0x00200054u
-
-#define MTL_VPU_HOST_MMU_GERROR                                                0x00200060u
-#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK                              BIT_MASK(0)
-#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK                          BIT_MASK(2)
-#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK                          BIT_MASK(3)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK                      BIT_MASK(4)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK                      BIT_MASK(5)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK                      BIT_MASK(6)
-#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK                           BIT_MASK(7)
-
-#define MTL_VPU_HOST_MMU_GERRORN                                       0x00200064u
-
-#define MTL_VPU_HOST_MMU_STRTAB_BASE                                   0x00200080u
-#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG                               0x00200088u
-#define MTL_VPU_HOST_MMU_CMDQ_BASE                                     0x00200090u
-#define MTL_VPU_HOST_MMU_CMDQ_PROD                                     0x00200098u
-#define MTL_VPU_HOST_MMU_CMDQ_CONS                                     0x0020009cu
-#define MTL_VPU_HOST_MMU_EVTQ_BASE                                     0x002000a0u
-#define MTL_VPU_HOST_MMU_EVTQ_PROD                                     0x002000a8u
-#define MTL_VPU_HOST_MMU_EVTQ_CONS                                     0x002000acu
-#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC                                 (0x002000a8u + SZ_64K)
-#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC                                 (0x002000acu + SZ_64K)
-
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES                              0x00360000u
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK       BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK                BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK                BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK     BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK     BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK     BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK     GENMASK(10, 6)
-#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK     GENMASK(15, 11)
-
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV                                   0x00360004u
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK              BIT_MASK(0)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK              BIT_MASK(1)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK              BIT_MASK(2)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK              BIT_MASK(3)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK              BIT_MASK(4)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK              BIT_MASK(5)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK              BIT_MASK(6)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK              BIT_MASK(7)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK              BIT_MASK(8)
-#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK              BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE                                        0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL                            0x04000000u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG                              0x04400010u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG                             0x04400014u
-#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG                                0x04400020u
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET                              0x06010004u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK                 BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR                              0x06010018u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK                 BIT_MASK(1)
-
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC                          0x06010040u
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK                BIT_MASK(0)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK                BIT_MASK(1)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK                BIT_MASK(2)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK                BIT_MASK(3)
-#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK         GENMASK(31, 4)
-
-#define MTL_VPU_CPU_SS_TIM_WATCHDOG                                    0x0602009cu
-#define MTL_VPU_CPU_SS_TIM_WDOG_EN                                     0x060200a4u
-#define MTL_VPU_CPU_SS_TIM_SAFE                                                0x060200a8u
-#define MTL_VPU_CPU_SS_TIM_IPC_FIFO                                    0x060200f0u
-
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG                                  0x06021008u
-#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK             BIT_MASK(9)
-
-#define MTL_VPU_CPU_SS_DOORBELL_0                                      0x06300000u
-#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK                             BIT_MASK(0)
-
-#define MTL_VPU_CPU_SS_DOORBELL_1                                      0x06301000u
-
-#endif /* __IVPU_HW_MTL_REG_H__ */
index d45be0615b47621a2913be826c6cd6905f5250af..de9e69f70af7e215ff6f85812473158890ccef68 100644 (file)
@@ -289,15 +289,13 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
 {
        struct ivpu_device *vdev = file_priv->vdev;
        struct ivpu_job *job;
-       size_t buf_size;
        int ret;
 
        ret = ivpu_rpm_get(vdev);
        if (ret < 0)
                return NULL;
 
-       buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
-       job = kzalloc(buf_size, GFP_KERNEL);
+       job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
        if (!job)
                goto err_rpm_put;
 
index b8b259b3aa635d02e53e5a127d05f9ab093fa5fa..baefaf7bb3cbb97c50551fab28655b9cbb1c866c 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/highmem.h>
 
 #include "ivpu_drv.h"
-#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_37xx_reg.h"
 #include "ivpu_hw_reg_io.h"
 #include "ivpu_mmu.h"
 #include "ivpu_mmu_context.h"
 #define IVPU_MMU_CD_0_ASET             BIT(47)
 #define IVPU_MMU_CD_0_ASID             GENMASK_ULL(63, 48)
 
+#define IVPU_MMU_T0SZ_48BIT             16
+#define IVPU_MMU_T0SZ_38BIT             26
+
+#define IVPU_MMU_IPS_48BIT             5
+#define IVPU_MMU_IPS_44BIT             4
+#define IVPU_MMU_IPS_42BIT             3
+#define IVPU_MMU_IPS_40BIT             2
+#define IVPU_MMU_IPS_36BIT             1
+#define IVPU_MMU_IPS_32BIT             0
+
 #define IVPU_MMU_CD_1_TTB0_MASK                GENMASK_ULL(51, 4)
 
 #define IVPU_MMU_STE_0_S1CDMAX         GENMASK_ULL(63, 59)
 #define IVPU_MMU_REG_TIMEOUT_US                (10 * USEC_PER_MSEC)
 #define IVPU_MMU_QUEUE_TIMEOUT_US      (100 * USEC_PER_MSEC)
 
-#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
-                                 (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
+#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
+                                 (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
 
 static char *ivpu_mmu_event_to_str(u32 cmd)
 {
@@ -240,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
        else
                val_ref = IVPU_MMU_IDR0_REF;
 
-       val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
+       val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
        if (val != val_ref)
                ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
 
-       val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
+       val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
        if (val != IVPU_MMU_IDR1_REF)
                ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
 
-       val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
+       val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
        if (val != IVPU_MMU_IDR3_REF)
                ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
 
@@ -259,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
        else
                val_ref = IVPU_MMU_IDR5_REF;
 
-       val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
+       val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
        if (val != val_ref)
                ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
 }
@@ -386,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
        u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
        int ret;
 
-       ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
+       ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
        if (ret)
                return ret;
 
-       return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
+       return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
 }
 
 static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
 {
        struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
 
-       return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
+       return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
                         IVPU_MMU_QUEUE_TIMEOUT_US);
 }
 
@@ -437,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
                return ret;
 
        clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
-       REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
+       REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
 
        ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
        if (ret)
@@ -485,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
        mmu->evtq.prod = 0;
        mmu->evtq.cons = 0;
 
-       ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
+       ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
        if (ret)
                return ret;
 
@@ -495,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
              FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
              FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
              FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
-       REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
+       REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
 
-       REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
-       REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
+       REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
+       REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
 
-       REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
-       REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
-       REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
+       REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
+       REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
+       REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
 
        val = IVPU_MMU_CR0_CMDQEN;
-       ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+       ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
        if (ret)
                return ret;
 
@@ -521,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
        if (ret)
                return ret;
 
-       REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
-       REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
-       REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
+       REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
+       REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
+       REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
 
        val |= IVPU_MMU_CR0_EVTQEN;
-       ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+       ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
        if (ret)
                return ret;
 
        val |= IVPU_MMU_CR0_ATSCHK;
-       ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+       ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
        if (ret)
                return ret;
 
@@ -540,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
                return ret;
 
        val |= IVPU_MMU_CR0_SMMUEN;
-       return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+       return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
 }
 
 static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
@@ -617,12 +627,12 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
        entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
 
        if (cd_dma != 0) {
-               cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
+               cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
                        FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
                        FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
                        FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
                        FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
-                       FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
+                       FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
                        FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
                        IVPU_MMU_CD_0_TCR_EPD1 |
                        IVPU_MMU_CD_0_AA64 |
@@ -791,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
        u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
        u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
 
-       evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
+       evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
        if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
                return NULL;
 
        clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
 
        evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
-       REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+       REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
 
        return evt;
 }
@@ -831,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
 
        ivpu_dbg(vdev, IRQ, "MMU error\n");
 
-       gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
-       gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
+       gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
+       gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
 
        active = gerror_val ^ gerrorn_val;
        if (!(active & IVPU_MMU_GERROR_ERR_MASK))
                return;
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
                ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
                ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
                ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
                ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
                ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
                ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
 
-       if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
+       if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
                ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
 
-       REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
+       REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
 }
 
 int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
index 8ce9b12ac356680c645bfbefe95fa553d59068ee..1d2e554e2c4a071d6b6974051252762c9ee2e790 100644 (file)
 #include "ivpu_mmu.h"
 #include "ivpu_mmu_context.h"
 
-#define IVPU_MMU_PGD_INDEX_MASK          GENMASK(38, 30)
+#define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
+#define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
-#define IVPU_MMU_ENTRY_FLAGS_MASK        GENMASK(11, 0)
+#define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
+#define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
 
-#define IVPU_MMU_PAGE_SIZE    SZ_4K
-#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
-#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
-#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
+#define IVPU_MMU_PAGE_SIZE       SZ_4K
+#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
+#define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
+#define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
+#define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
+#define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
+#define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
 
 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
 {
        dma_addr_t pgd_dma;
-       u64 *pgd;
 
-       pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
-       if (!pgd)
+       pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
+                                                 GFP_KERNEL);
+       if (!pgtable->pgd_dma_ptr)
                return -ENOMEM;
 
-       pgtable->pgd = pgd;
        pgtable->pgd_dma = pgd_dma;
 
        return 0;
 }
 
-static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
 {
-       int pgd_index, pmd_index;
+       if (cpu_addr)
+               dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
+                                 dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+}
+
+static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+       int pgd_idx, pud_idx, pmd_idx;
+       dma_addr_t pud_dma, pmd_dma, pte_dma;
+       u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
 
-       for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
-               u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
-               u64 *pmd = pgtable->pgd_entries[pgd_index];
+       for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
+               pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
+               pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
 
-               if (!pmd_entries)
+               if (!pud_dma_ptr)
                        continue;
 
-               for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
-                       if (pmd_entries[pmd_index])
-                               dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
-                                           pmd_entries[pmd_index],
-                                           pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+               for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
+                       pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
+                       pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
+
+                       if (!pmd_dma_ptr)
+                               continue;
+
+                       for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
+                               pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
+                               pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
+
+                               ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
+                       }
+
+                       kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
+                       ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
                }
 
-               kfree(pmd_entries);
-               dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
-                           pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+               kfree(pgtable->pmd_ptrs[pgd_idx]);
+               kfree(pgtable->pte_ptrs[pgd_idx]);
+               ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
        }
 
-       dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
-                   pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+       ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+}
+
+static u64*
+ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
+{
+       u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
+       dma_addr_t pud_dma;
+
+       if (pud_dma_ptr)
+               return pud_dma_ptr;
+
+       pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
+       if (!pud_dma_ptr)
+               return NULL;
+
+       drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
+       pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+       if (!pgtable->pmd_ptrs[pgd_idx])
+               goto err_free_pud_dma_ptr;
+
+       drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
+       pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+       if (!pgtable->pte_ptrs[pgd_idx])
+               goto err_free_pmd_ptrs;
+
+       pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
+       pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
+
+       return pud_dma_ptr;
+
+err_free_pmd_ptrs:
+       kfree(pgtable->pmd_ptrs[pgd_idx]);
+
+err_free_pud_dma_ptr:
+       ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+       return NULL;
 }
 
 static u64*
-ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
+ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
+                   int pud_idx)
 {
-       u64 **pmd_entries;
+       u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
        dma_addr_t pmd_dma;
-       u64 *pmd;
 
-       if (pgtable->pgd_entries[pgd_index])
-               return pgtable->pgd_entries[pgd_index];
+       if (pmd_dma_ptr)
+               return pmd_dma_ptr;
 
-       pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
-       if (!pmd)
+       pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+       if (!pmd_dma_ptr)
                return NULL;
 
-       pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
-       if (!pmd_entries)
-               goto err_free_pgd;
+       drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
+       pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+       if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
+               goto err_free_pmd_dma_ptr;
 
-       pgtable->pgd_entries[pgd_index] = pmd;
-       pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
-       pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
+       pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
+       pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
 
-       return pmd;
+       return pmd_dma_ptr;
 
-err_free_pgd:
-       dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
+err_free_pmd_dma_ptr:
+       ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
        return NULL;
 }
 
 static u64*
 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
-                   int pgd_index, int pmd_index)
+                   int pgd_idx, int pud_idx, int pmd_idx)
 {
+       u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
        dma_addr_t pte_dma;
-       u64 *pte;
 
-       if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
-               return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
+       if (pte_dma_ptr)
+               return pte_dma_ptr;
 
-       pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
-       if (!pte)
+       pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+       if (!pte_dma_ptr)
                return NULL;
 
-       pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
-       pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
+       pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
+       pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
 
-       return pte;
+       return pte_dma_ptr;
 }
 
 static int
 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
-                         u64 vpu_addr, dma_addr_t dma_addr, int prot)
+                         u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
 {
        u64 *pte;
-       int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
-       int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
-       int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+       int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+       int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+       int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+       int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+       /* Allocate PUD - second level page table if needed */
+       if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
+               return -ENOMEM;
 
-       /* Allocate PMD - second level page table if needed */
-       if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
+       /* Allocate PMD - third level page table if needed */
+       if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
                return -ENOMEM;
 
-       /* Allocate PTE - third level page table if needed */
-       pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
+       /* Allocate PTE - fourth level page table if needed */
+       pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
        if (!pte)
                return -ENOMEM;
 
-       /* Update PTE - third level page table with DMA address */
-       pte[pte_index] = dma_addr | prot;
+       /* Update PTE */
+       pte[pte_idx] = dma_addr | prot;
+
+       return 0;
+}
+
+static int
+ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
+                             dma_addr_t dma_addr, u64 prot)
+{
+       size_t size = IVPU_MMU_CONT_PAGES_SIZE;
+
+       drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
+       drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
+
+       prot |= IVPU_MMU_ENTRY_FLAG_CONT;
+
+       while (size) {
+               int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+
+               if (ret)
+                       return ret;
+
+               size -= IVPU_MMU_PAGE_SIZE;
+               vpu_addr += IVPU_MMU_PAGE_SIZE;
+               dma_addr += IVPU_MMU_PAGE_SIZE;
+       }
 
        return 0;
 }
 
 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
 {
-       int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
-       int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
-       int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+       int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+       int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+       int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+       int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
 
        /* Update PTE with dummy physical address and clear flags */
-       ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
+       ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
 }
 
 static void
 ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
 {
+       struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
        u64 end_addr = vpu_addr + size;
-       u64 *pgd = ctx->pgtable.pgd;
 
        /* Align to PMD entry (2 MB) */
        vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
 
        while (vpu_addr < end_addr) {
-               int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
-               u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
-               u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
-
-               while (vpu_addr < end_addr && vpu_addr < pmd_end) {
-                       int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
-                       u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
-
-                       clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
-                       vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+               int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+               u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
+
+               while (vpu_addr < end_addr && vpu_addr < pud_end) {
+                       int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
+                       u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
+
+                       while (vpu_addr < end_addr && vpu_addr < pmd_end) {
+                               int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+
+                               clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
+                                                   IVPU_MMU_PGTABLE_SIZE);
+                               vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+                       }
+                       clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
+                                           IVPU_MMU_PGTABLE_SIZE);
                }
-               clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
+               clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
        }
-       clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
+       clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
 }
 
 static int
 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
-                          u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
+                          u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
 {
+       int map_size;
+       int ret;
+
        while (size) {
-               int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+               if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
+                   IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
+                       ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
+                       map_size = IVPU_MMU_CONT_PAGES_SIZE;
+               } else {
+                       ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+                       map_size = IVPU_MMU_PAGE_SIZE;
+               }
 
                if (ret)
                        return ret;
 
-               vpu_addr += IVPU_MMU_PAGE_SIZE;
-               dma_addr += IVPU_MMU_PAGE_SIZE;
-               size -= IVPU_MMU_PAGE_SIZE;
+               vpu_addr += map_size;
+               dma_addr += map_size;
+               size -= map_size;
        }
 
        return 0;
@@ -216,8 +322,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                         u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
 {
        struct scatterlist *sg;
-       int prot;
        int ret;
+       u64 prot;
        u64 i;
 
        if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
@@ -237,7 +343,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
        mutex_lock(&ctx->lock);
 
        for_each_sgtable_dma_sg(sgt, sg, i) {
-               u64 dma_addr = sg_dma_address(sg) - sg->offset;
+               dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
                size_t size = sg_dma_len(sg) + sg->offset;
 
                ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
@@ -293,8 +399,14 @@ ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
 {
        lockdep_assert_held(&ctx->lock);
 
-       return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
-                                         0, range->start, range->end, DRM_MM_INSERT_BEST);
+       if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
+               if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
+                                                range->start, range->end, DRM_MM_INSERT_BEST))
+                       return 0;
+       }
+
+       return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
+                                          range->start, range->end, DRM_MM_INSERT_BEST);
 }
 
 void
@@ -319,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
                return ret;
 
        if (!context_id) {
-               start = vdev->hw->ranges.global_low.start;
-               end = vdev->hw->ranges.global_high.end;
+               start = vdev->hw->ranges.global.start;
+               end = vdev->hw->ranges.shave.end;
        } else {
-               start = vdev->hw->ranges.user_low.start;
-               end = vdev->hw->ranges.user_high.end;
+               start = vdev->hw->ranges.user.start;
+               end = vdev->hw->ranges.dma.end;
        }
 
        drm_mm_init(&ctx->mm, start, end - start);
@@ -334,11 +446,15 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
 
 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
 {
-       drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
+       if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
+               return;
 
        mutex_destroy(&ctx->lock);
-       ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
+       ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
        drm_mm_takedown(&ctx->mm);
+
+       ctx->pgtable.pgd_dma_ptr = NULL;
+       ctx->pgtable.pgd_dma = 0;
 }
 
 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
index ddf11b95023a0eb4416572e3363d9d56a2a6ffc6..961a0d6a6c7ff4942c80171aa760e863ee736707 100644 (file)
@@ -12,12 +12,13 @@ struct ivpu_device;
 struct ivpu_file_priv;
 struct ivpu_addr_range;
 
-#define IVPU_MMU_PGTABLE_ENTRIES       512
+#define IVPU_MMU_PGTABLE_ENTRIES       512ull
 
 struct ivpu_mmu_pgtable {
-       u64             **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES];
-       u64             *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES];
-       u64             *pgd;
+       u64             ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+       u64             **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+       u64             *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
+       u64             *pgd_dma_ptr;
        dma_addr_t      pgd_dma;
 };
 
index aa4d56dc52b39266173d0ec9f55c6e072d03af8b..e6f27daf5560b691a8483ff7963f89aa69dbea65 100644 (file)
@@ -259,6 +259,7 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
        pm_runtime_get_sync(vdev->drm.dev);
 
        ivpu_dbg(vdev, PM, "Pre-reset..\n");
+       atomic_inc(&vdev->pm->reset_counter);
        atomic_set(&vdev->pm->in_reset, 1);
        ivpu_shutdown(vdev);
        ivpu_pm_prepare_cold_boot(vdev);
index baca981872551cc6e9a1380075c4fc749a735515..fd4eada1290fb4bc2f1e8e8c4a0c852d1b796e07 100644 (file)
@@ -14,6 +14,7 @@ struct ivpu_pm_info {
        struct ivpu_device *vdev;
        struct work_struct recovery_work;
        atomic_t in_reset;
+       atomic_t reset_counter;
        bool is_warmboot;
        u32 suspend_reschedule_counter;
 };
index e9a1cb779b3056aedb3cb561926d5472aab56d11..a90b64b325b4559f444bab8d5065f641a1f5f032 100644 (file)
@@ -1292,7 +1292,6 @@ static void update_profiling_data(struct drm_file *file_priv,
 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
                                   bool is_partial)
 {
-       struct qaic_partial_execute_entry *pexec;
        struct qaic_execute *args = data;
        struct qaic_execute_entry *exec;
        struct dma_bridge_chan *dbc;
@@ -1312,7 +1311,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
 
        received_ts = ktime_get_ns();
 
-       size = is_partial ? sizeof(*pexec) : sizeof(*exec);
+       size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
        n = (unsigned long)size * args->hdr.count;
        if (args->hdr.count == 0 || n / args->hdr.count != size)
                return -EINVAL;
@@ -1320,7 +1319,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
        user_data = u64_to_user_ptr(args->data);
 
        exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
-       pexec = (struct qaic_partial_execute_entry *)exec;
        if (!exec)
                return -ENOMEM;
 
index b5ba550a0c0400e03d3875bcbc5095c73582c06d..b5de82e6eb4d56a41b1acabe03a95413522ba3d3 100644 (file)
@@ -165,7 +165,6 @@ static const struct drm_driver qaic_accel_driver = {
 
        .ioctls                 = qaic_drm_ioctls,
        .num_ioctls             = ARRAY_SIZE(qaic_drm_ioctls),
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import       = qaic_gem_prime_import,
 };
 
index c2cab7e2b12684b807035ad5d005a2b0ce293fb2..729845bcc803afebb3ffec6bdab24b6f77f0e6cd 100644 (file)
@@ -79,7 +79,6 @@ static int cfag12864bfb_probe(struct platform_device *device)
        info->var = cfag12864bfb_var;
        info->pseudo_palette = NULL;
        info->par = NULL;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        if (register_framebuffer(info) < 0)
                goto fballoced;
index 0c5cd5193fbfbc54130798df4d6488c9bf059658..3a2d883872249e98713b1b813be84b0657388ca9 100644 (file)
@@ -646,7 +646,6 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
        fbdev->info->var = ht16k33_fb_var;
        fbdev->info->bl_dev = bl;
        fbdev->info->pseudo_palette = NULL;
-       fbdev->info->flags = FBINFO_FLAG_DEFAULT;
        fbdev->info->par = priv;
 
        err = register_framebuffer(fbdev->info);
index ced0dcf86e0bf8318457ca837ef522f028aff43f..45fd13ef13fc645ef7b31233308e6b35f6ef1222 100644 (file)
@@ -717,7 +717,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
                if (!d->config_buf)
                        goto err_alloc;
 
-               for (i = 0; i < chip->num_config_regs; i++) {
+               for (i = 0; i < chip->num_config_bases; i++) {
                        d->config_buf[i] = kcalloc(chip->num_config_regs,
                                                   sizeof(**d->config_buf),
                                                   GFP_KERNEL);
index 635ce0648133dd28932ff5bab9447cecbf4af6ca..55c5b48bc276fe74b538d469f0db241632aee824 100644 (file)
@@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb)
        disk_set_zoned(nullb->disk, BLK_ZONED_HM);
        blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
        blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-
-       if (queue_is_mq(q)) {
-               int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
-
-               if (ret)
-                       return ret;
-       } else {
-               blk_queue_chunk_sectors(q, dev->zone_size_sects);
-               nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
-       }
-
+       blk_queue_chunk_sectors(q, dev->zone_size_sects);
+       nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
        blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
        disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
        disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
 
+       if (queue_is_mq(q))
+               return blk_revalidate_disk_zones(nullb->disk, NULL);
+
        return 0;
 }
 
index b47358da92a231ee15bf5452c236df93452fe15d..1fe011676d070ed275490e0f265ca958f8b080bf 100644 (file)
@@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
 {
        u32 v, wg;
        u8 model;
-       int ret;
 
        virtio_cread(vdev, struct virtio_blk_config,
                     zoned.model, &model);
@@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
                        vblk->zone_sectors);
                return -ENODEV;
        }
+       blk_queue_chunk_sectors(q, vblk->zone_sectors);
        dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
 
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
@@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
                blk_queue_max_discard_sectors(q, 0);
        }
 
-       ret = blk_revalidate_disk_zones(vblk->disk, NULL);
-       if (!ret) {
-               virtio_cread(vdev, struct virtio_blk_config,
-                            zoned.max_append_sectors, &v);
-               if (!v) {
-                       dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
-                       return -ENODEV;
-               }
-               if ((v << SECTOR_SHIFT) < wg) {
-                       dev_err(&vdev->dev,
-                               "write granularity %u exceeds max_append_sectors %u limit\n",
-                               wg, v);
-                       return -ENODEV;
-               }
-
-               blk_queue_max_zone_append_sectors(q, v);
-               dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
+       virtio_cread(vdev, struct virtio_blk_config,
+                    zoned.max_append_sectors, &v);
+       if (!v) {
+               dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
+               return -ENODEV;
+       }
+       if ((v << SECTOR_SHIFT) < wg) {
+               dev_err(&vdev->dev,
+                       "write granularity %u exceeds max_append_sectors %u limit\n",
+                       wg, v);
+               return -ENODEV;
        }
+       blk_queue_max_zone_append_sectors(q, v);
+       dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
 
-       return ret;
+       return blk_revalidate_disk_zones(vblk->disk, NULL);
 }
 
 #else
index d3510cfdb3eb43e99d5f704feb7609d975bf99dd..2783d3d55fceeefeb80e3ec8bc5ccddaceff671a 100644 (file)
@@ -269,7 +269,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
        return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
 }
 
-static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
        unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
index 91d1ed5581366aef483e0385a26d07f3c72821ef..6c3657679a88089bad00319c8517e4524af45126 100644 (file)
@@ -117,7 +117,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
        return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
 }
 
-static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+static int us3_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
        unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
index 6cfbbf0720bdc634a6c645fa14700360fae5646d..b5b62e40ccc17ce773d73de71cc0db83e501777d 100644 (file)
@@ -33,7 +33,7 @@
  * into their address space. This necessitated the creation of the DMA-BUF sysfs
  * statistics interface to provide per-buffer information on production systems.
  *
- * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about
+ * The interface at ``/sys/kernel/dmabuf/buffers`` exposes information about
  * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled.
  *
  * The following stats are exposed by the interface:
index aa4ea8530cb35ab08178e3aad7899af47f287be2..21916bba77d58be342d2d82dc654c7df330f01fb 100644 (file)
@@ -131,7 +131,6 @@ static struct file_system_type dma_buf_fs_type = {
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
        struct dma_buf *dmabuf;
-       int ret;
 
        if (!is_dma_buf_file(file))
                return -EINVAL;
@@ -147,11 +146,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
-       dma_resv_lock(dmabuf->resv, NULL);
-       ret = dmabuf->ops->mmap(dmabuf, vma);
-       dma_resv_unlock(dmabuf->resv);
-
-       return ret;
+       return dmabuf->ops->mmap(dmabuf, vma);
 }
 
 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@@ -850,6 +845,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
  *     - &dma_buf_ops.release()
  *     - &dma_buf_ops.begin_cpu_access()
  *     - &dma_buf_ops.end_cpu_access()
+ *     - &dma_buf_ops.mmap()
  *
  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
  *    reservation and exporter can't take the lock:
@@ -858,7 +854,6 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
  *     - &dma_buf_ops.unpin()
  *     - &dma_buf_ops.map_dma_buf()
  *     - &dma_buf_ops.unmap_dma_buf()
- *     - &dma_buf_ops.mmap()
  *     - &dma_buf_ops.vmap()
  *     - &dma_buf_ops.vunmap()
  *
@@ -1463,8 +1458,6 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                 unsigned long pgoff)
 {
-       int ret;
-
        if (WARN_ON(!dmabuf || !vma))
                return -EINVAL;
 
@@ -1485,11 +1478,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
        vma_set_file(vma, dmabuf->file);
        vma->vm_pgoff = pgoff;
 
-       dma_resv_lock(dmabuf->resv, NULL);
-       ret = dmabuf->ops->mmap(dmabuf, vma);
-       dma_resv_unlock(dmabuf->resv);
-
-       return ret;
+       return dmabuf->ops->mmap(dmabuf, vma);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
 
index 7002bca792ff0832cc626b5dcb8db659d428272e..c625bb2b5d563647f7a0ec40e0322a519056ea94 100644 (file)
@@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
 {
        struct dma_fence_array *result;
        struct dma_fence *tmp, **array;
+       ktime_t timestamp;
        unsigned int i;
        size_t count;
 
        count = 0;
+       timestamp = ns_to_ktime(0);
        for (i = 0; i < num_fences; ++i) {
-               dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
-                       if (!dma_fence_is_signaled(tmp))
+               dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+                       if (!dma_fence_is_signaled(tmp)) {
                                ++count;
+                       } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+                                           &tmp->flags)) {
+                               if (ktime_after(tmp->timestamp, timestamp))
+                                       timestamp = tmp->timestamp;
+                       } else {
+                               /*
+                                * Use the current time if the fence is
+                                * currently signaling.
+                                */
+                               timestamp = ktime_get();
+                       }
+               }
        }
 
+       /*
+        * If we couldn't find a pending fence just return a private signaled
+        * fence with the timestamp of the last signaled one.
+        */
        if (count == 0)
-               return dma_fence_get_stub();
+               return dma_fence_allocate_private_stub(timestamp);
 
        array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
        if (!array)
@@ -138,7 +156,7 @@ restart:
        } while (tmp);
 
        if (count == 0) {
-               tmp = dma_fence_get_stub();
+               tmp = dma_fence_allocate_private_stub(ktime_get());
                goto return_tmp;
        }
 
index f177c56269bb095cea113da495067cbca39bea79..8aa8f8cb7071efcce29b62cf5ac3bd96c7308056 100644 (file)
@@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
 
 /**
  * dma_fence_allocate_private_stub - return a private, signaled fence
+ * @timestamp: timestamp when the fence was signaled
  *
  * Return a newly allocated and signaled stub fence.
  */
-struct dma_fence *dma_fence_allocate_private_stub(void)
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
 {
        struct dma_fence *fence;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (fence == NULL)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        dma_fence_init(fence,
                       &dma_fence_stub_ops,
@@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
        set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
                &fence->flags);
 
-       dma_fence_signal(fence);
+       dma_fence_signal_timestamp(fence, timestamp);
 
        return fence;
 }
index a7f048048864a20fefd8896ad9943ee179620969..ee899f8e67215f6036734795cb5b90ab77a293a3 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-heap.h>
 #include <linux/dma-map-ops.h>
-#include <linux/dma-resv.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
@@ -183,8 +182,6 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
        struct cma_heap_buffer *buffer = dmabuf->priv;
 
-       dma_resv_assert_held(dmabuf->resv);
-
        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
                return -EINVAL;
 
index ee7059399e9c7a31b25551e93e5c5339b3454004..9076d47ed2ef46cdb2e300eca3303ee15eb14639 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-heap.h>
-#include <linux/dma-resv.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
@@ -201,8 +200,6 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        struct sg_page_iter piter;
        int ret;
 
-       dma_resv_assert_held(dmabuf->resv);
-
        for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
                struct page *page = sg_page_iter_page(&piter);
 
index 12cf6bb2e3ce370043968935728d25b371894862..c406459996489ce7c7844f5a2687ddd76d4c9113 100644 (file)
@@ -51,8 +51,6 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 {
        struct udmabuf *ubuf = buf->priv;
 
-       dma_resv_assert_held(buf->resv);
-
        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
                return -EINVAL;
 
index cc4dcaea67fa67f4ae5ba312a6c3cdb575663a45..2f1902e5d407536bc4bd23a713fb12c77445d737 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
 #include <linux/efi.h>
+#include <linux/screen_info.h>
+
 #include <asm/efi.h>
 
 #include "efistub.h"
index 4be1c4d1f922becd08ddd13d542f4d2fbd6e1883..a51ec201ca3cbef97c1ef911102b179b9b0efb0f 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <linux/efi.h>
+#include <linux/screen_info.h>
+
 #include <asm/efi.h>
 
 #include "efistub.h"
index afb3b2f5f42530138e71eb0006193e26f1d728c3..9d1f0e04fd565d9aeccbb953f6878d8a6feb222f 100644 (file)
@@ -9,6 +9,9 @@ menuconfig DRM
        tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
        depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
        select DRM_PANEL_ORIENTATION_QUIRKS
+       select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+       select FB_CORE if DRM_FBDEV_EMULATION
+       select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
        select HDMI
        select I2C
        select DMA_SHARED_BUFFER
@@ -80,6 +83,7 @@ config DRM_KUNIT_TEST
        select DRM_BUDDY
        select DRM_EXPORT_FOR_TESTS if m
        select DRM_KUNIT_TEST_HELPERS
+       select DRM_EXEC
        default KUNIT_ALL_TESTS
        help
          This builds unit tests for DRM. This option is not useful for
@@ -95,7 +99,6 @@ config DRM_KUNIT_TEST
 config DRM_KMS_HELPER
        tristate
        depends on DRM
-       select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
        help
          CRTC helpers for KMS drivers.
 
@@ -131,9 +134,7 @@ config DRM_DEBUG_MODESET_LOCK
 
 config DRM_FBDEV_EMULATION
        bool "Enable legacy fbdev support for your modesetting driver"
-       depends on DRM_KMS_HELPER
-       depends on FB=y || FB=DRM_KMS_HELPER
-       select FRAMEBUFFER_CONSOLE if !EXPERT
+       depends on DRM
        select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
        default y
        help
@@ -194,6 +195,27 @@ config DRM_TTM
          GPU memory types. Will be enabled automatically if a device driver
          uses it.
 
+config DRM_TTM_KUNIT_TEST
+        tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+        default n
+        depends on DRM && KUNIT
+        select DRM_TTM
+        select DRM_EXPORT_FOR_TESTS if m
+        select DRM_KUNIT_TEST_HELPERS
+        default KUNIT_ALL_TESTS
+        help
+          Enables unit tests for TTM, a GPU memory manager subsystem used
+          to manage memory buffers. This option is mostly useful for kernel
+          developers.
+
+          If in doubt, say "N".
+
+config DRM_EXEC
+       tristate
+       depends on DRM
+       help
+         Execution context for command submissions
+
 config DRM_BUDDY
        tristate
        depends on DRM
@@ -216,7 +238,7 @@ config DRM_TTM_HELPER
 config DRM_GEM_DMA_HELPER
        tristate
        depends on DRM
-       select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+       select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
        help
          Choose this if you need the GEM DMA helper functions
 
@@ -323,6 +345,8 @@ source "drivers/gpu/drm/v3d/Kconfig"
 
 source "drivers/gpu/drm/vc4/Kconfig"
 
+source "drivers/gpu/drm/loongson/Kconfig"
+
 source "drivers/gpu/drm/etnaviv/Kconfig"
 
 source "drivers/gpu/drm/hisilicon/Kconfig"
index 7a09a89b493befd9d93d65cb4b303f1b57261c1a..215e78e791250b8ad0d0a273e627b84296ecc4d9 100644 (file)
@@ -45,6 +45,7 @@ drm-y := \
        drm_vblank.o \
        drm_vblank_work.o \
        drm_vma_manager.o \
+       drm_gpuva_mgr.o \
        drm_writeback.o
 drm-$(CONFIG_DRM_LEGACY) += \
        drm_agpsupport.o \
@@ -78,6 +79,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
 #
 # Memory-management helpers
 #
+#
+obj-$(CONFIG_DRM_EXEC) += drm_exec.o
 
 obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
 
@@ -194,3 +197,4 @@ obj-y                       += gud/
 obj-$(CONFIG_DRM_HYPERV) += hyperv/
 obj-y                  += solomon/
 obj-$(CONFIG_DRM_SPRD) += sprd/
+obj-$(CONFIG_DRM_LOONGSON) += loongson/
index b91e79c721e2bc62793f32997364f51c9fe3ba3d..22d88f8ef5279a0f8ab1b1fc0f318a749b06ff46 100644 (file)
@@ -21,6 +21,7 @@ config DRM_AMDGPU
        select INTERVAL_TREE
        select DRM_BUDDY
        select DRM_SUBALLOC_HELPER
+       select DRM_EXEC
        # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
        # ACPI_VIDEO's dependencies must also be selected.
        select INPUT if ACPI
index 8d16f280b6950357090ac7a91f622381429d8e26..29325981778a0c7c0265b5c0ae2b49dce51cb06d 100644 (file)
@@ -62,7 +62,7 @@ subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
 amdgpu-y := amdgpu_drv.o
 
 # add KMS driver
-amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
        amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
        atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
        amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
@@ -98,7 +98,7 @@ amdgpu-y += \
        vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
        nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
        sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
-       nbio_v7_9.o aqua_vanjaram_reg_init.o
+       nbio_v7_9.o aqua_vanjaram.o
 
 # add DF block
 amdgpu-y += \
index 2f9c14aca73cfbc07b8ff38a1411e7b331dfd265..035437d7d73af991b8375ddbb3aa4610cb3294f8 100644 (file)
@@ -53,7 +53,6 @@
 
 #include <drm/ttm/ttm_bo.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_execbuf_util.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_gem.h>
@@ -1034,7 +1033,6 @@ struct amdgpu_device {
        bool                            has_pr3;
 
        bool                            ucode_sysfs_en;
-       bool                            psp_sysfs_en;
 
        /* Chip product information */
        char                            product_number[20];
@@ -1129,7 +1127,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
                                     u64 reg_addr, u32 reg_data);
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
-                            uint32_t reg, uint32_t v);
+                            uint32_t reg, uint32_t v, uint32_t xcc_id);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 
@@ -1296,6 +1294,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
+bool amdgpu_device_pcie_dynamic_switching_supported(void);
 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
 bool amdgpu_device_aspm_support_quirk(void);
 
@@ -1506,4 +1505,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
 
 int amdgpu_in_reset(struct amdgpu_device *adev);
 
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+extern const struct attribute_group amdgpu_flash_attr_group;
+
 #endif
index 385c6acb5728b7406f6211b3dab9166f1e48914a..a5a2b06c6588cb24891ea01efd4750fc759b1c90 100644 (file)
@@ -868,7 +868,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
        if (!numa_info) {
                struct sysinfo info;
 
-               numa_info = kzalloc(sizeof *numa_info, GFP_KERNEL);
+               numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
                if (!numa_info)
                        return NULL;
 
index b4fcad0e62f7ec23b22a1f27b3a8b352a3e0b1aa..629ca1ad75a86a2cb90bd5c69eec9fb78f473da4 100644 (file)
@@ -830,3 +830,53 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
                return adev->gmc.real_vram_size;
        }
 }
+
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+                           u32 inst)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+       struct amdgpu_ring *kiq_ring = &kiq->ring;
+       struct amdgpu_ring_funcs *ring_funcs;
+       struct amdgpu_ring *ring;
+       int r = 0;
+
+       if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+               return -EINVAL;
+
+       ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
+       if (!ring_funcs)
+               return -ENOMEM;
+
+       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring) {
+               r = -ENOMEM;
+               goto free_ring_funcs;
+       }
+
+       ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
+       ring->doorbell_index = doorbell_off;
+       ring->funcs = ring_funcs;
+
+       spin_lock(&kiq->ring_lock);
+
+       if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+               spin_unlock(&kiq->ring_lock);
+               r = -ENOMEM;
+               goto free_ring;
+       }
+
+       kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
+
+       if (kiq_ring->sched.ready && !adev->job_hang)
+               r = amdgpu_ring_test_helper(kiq_ring);
+
+       spin_unlock(&kiq->ring_lock);
+
+free_ring:
+       kfree(ring);
+
+free_ring_funcs:
+       kfree(ring_funcs);
+
+       return r;
+}
index 2d0406bff84ecbeca3651d7e74fa3d588cfd161c..082c9f4cfd34416dcc7eb952bfa7abc06ce76a2d 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef AMDGPU_AMDKFD_H_INCLUDED
 #define AMDGPU_AMDKFD_H_INCLUDED
 
+#include <linux/list.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/kthread.h>
@@ -32,7 +33,6 @@
 #include <linux/mmu_notifier.h>
 #include <linux/memremap.h>
 #include <kgd_kfd_interface.h>
-#include <drm/ttm/ttm_execbuf_util.h>
 #include "amdgpu_sync.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_xcp.h"
@@ -71,8 +71,7 @@ struct kgd_mem {
        struct hmm_range *range;
        struct list_head attachments;
        /* protected by amdkfd_process_info.lock */
-       struct ttm_validate_buffer validate_list;
-       struct ttm_validate_buffer resv_list;
+       struct list_head validate_list;
        uint32_t domain;
        unsigned int mapped_to_gpu_memory;
        uint64_t va;
@@ -252,6 +251,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
 int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
 int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
                                        uint32_t *payload);
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+                               u32 inst);
 
 /* Read user wptr from a specified user address space with page fault
  * disabled. The memory must be pinned and mapped to the hardware when
index 60f9e027fb6607bc1e444d0d03b0e9af10e63bee..e2fed6edbdd0e7d47f60ede939971741786eb5e0 100644 (file)
@@ -23,6 +23,7 @@
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_amdkfd_arcturus.h"
 #include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
 #include "gc/gc_9_4_2_offset.h"
 #include "gc/gc_9_4_2_sh_mask.h"
 #include <uapi/linux/kfd_ioctl.h>
@@ -36,7 +37,7 @@
  * initialize the debug mode registers after it has disabled GFX off during the
  * debug session.
  */
-static uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
                                            bool restore_dbg_registers,
                                            uint32_t vmid)
 {
@@ -107,7 +108,7 @@ static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device
        return data;
 }
 
-static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
                                        uint8_t wave_launch_mode,
                                        uint32_t vmid)
 {
@@ -125,7 +126,8 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch(
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid)
+                                       uint32_t debug_vmid,
+                                       uint32_t inst)
 {
        uint32_t watch_address_high;
        uint32_t watch_address_low;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h
new file mode 100644 (file)
index 0000000..a7bdaf8
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+                                       bool restore_dbg_registers,
+                                       uint32_t vmid);
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+                                       uint8_t wave_launch_mode,
+                                       uint32_t vmid);
index 5b4b7f8b92a51c830a0b44cf72cdc7cd10d9b04a..490c8f5ddb602a71299deca0c95d0f53903c0e6e 100644 (file)
@@ -22,6 +22,7 @@
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
 #include "gc/gc_9_4_3_offset.h"
 #include "gc/gc_9_4_3_sh_mask.h"
 #include "athub/athub_1_8_0_offset.h"
@@ -32,6 +33,7 @@
 #include "soc15.h"
 #include "sdma/sdma_4_4_2_offset.h"
 #include "sdma/sdma_4_4_2_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
 
 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
 {
@@ -361,6 +363,156 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
        return 0;
 }
 
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev,
+                                               bool keep_trap_enabled,
+                                               uint32_t vmid)
+{
+       uint32_t data = 0;
+
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+       return data;
+}
+
+static int kgd_gfx_v9_4_3_validate_trap_override_request(
+                               struct amdgpu_device *adev,
+                               uint32_t trap_override,
+                               uint32_t *trap_mask_supported)
+{
+       *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+                               KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+                               KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+                               KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+                               KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+                               KFD_DBG_TRAP_MASK_FP_INEXACT |
+                               KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+                               KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+                               KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION |
+                               KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+                               KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+       if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+                       trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+               return -EPERM;
+
+       return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+       uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+       uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+       uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+                               KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+                               KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+                               KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+                               KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+                               KFD_DBG_TRAP_MASK_FP_INEXACT |
+                               KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+                               KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+                               KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+       uint32_t ret;
+
+       ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+       ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+       ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+       return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+       uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+       if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+               ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+       if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+               ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+       return ret;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override(
+                               struct amdgpu_device *adev,
+                               uint32_t vmid,
+                               uint32_t trap_override,
+                               uint32_t trap_mask_bits,
+                               uint32_t trap_mask_request,
+                               uint32_t *trap_mask_prev,
+                               uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+       uint32_t data = 0;
+
+       *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+       data = (trap_mask_bits & trap_mask_request) |
+              (*trap_mask_prev & ~trap_mask_request);
+       data = trap_mask_map_sw_to_hw(data);
+
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+       return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v9_4_3_set_address_watch(
+                               struct amdgpu_device *adev,
+                               uint64_t watch_address,
+                               uint32_t watch_address_mask,
+                               uint32_t watch_id,
+                               uint32_t watch_mode,
+                               uint32_t debug_vmid,
+                               uint32_t inst)
+{
+       uint32_t watch_address_high;
+       uint32_t watch_address_low;
+       uint32_t watch_address_cntl;
+
+       watch_address_cntl = 0;
+       watch_address_low = lower_32_bits(watch_address);
+       watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+       watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+                       TCP_WATCH0_CNTL,
+                       MODE,
+                       watch_mode);
+
+       watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+                       TCP_WATCH0_CNTL,
+                       MASK,
+                       watch_address_mask >> 7);
+
+       watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+                       TCP_WATCH0_CNTL,
+                       VALID,
+                       1);
+
+       WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+                       regTCP_WATCH0_ADDR_H) +
+                       (watch_id * TCP_WATCH_STRIDE)),
+                       watch_address_high);
+
+       WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+                       regTCP_WATCH0_ADDR_L) +
+                       (watch_id * TCP_WATCH_STRIDE)),
+                       watch_address_low);
+
+       return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
+                               uint32_t watch_id)
+{
+       return 0;
+}
+
 const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
        .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@@ -379,6 +531,19 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base =
                                kgd_gfx_v9_set_vm_context_page_table_base,
+       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
        .program_trap_handler_settings =
-                               kgd_gfx_v9_program_trap_handler_settings
+                               kgd_gfx_v9_program_trap_handler_settings,
+       .build_grace_period_packet_info =
+                               kgd_gfx_v9_build_grace_period_packet_info,
+       .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+       .enable_debug_trap = kgd_aldebaran_enable_debug_trap,
+       .disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
+       .validate_trap_override_request =
+                       kgd_gfx_v9_4_3_validate_trap_override_request,
+       .set_wave_launch_trap_override =
+                       kgd_gfx_v9_4_3_set_wave_launch_trap_override,
+       .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
+       .set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
+       .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
 };
index 8ad7a7779e147ea2ed5b55020b5acb2bb33ce282..f1f2c24de081eb1f300b2664a53f221591f50e06 100644 (file)
@@ -886,7 +886,8 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid)
+                                       uint32_t debug_vmid,
+                                       uint32_t inst)
 {
        uint32_t watch_address_high;
        uint32_t watch_address_low;
@@ -968,7 +969,8 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
  *     deq_retry_wait_time      -- Wait Count for Global Wave Syncs.
  */
 void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
-                                       uint32_t *wait_times)
+                                       uint32_t *wait_times,
+                                       uint32_t inst)
 
 {
        *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
@@ -978,7 +980,8 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
                                                uint32_t wait_times,
                                                uint32_t grace_period,
                                                uint32_t *reg_offset,
-                                               uint32_t *reg_data)
+                                               uint32_t *reg_data,
+                                               uint32_t inst)
 {
        *reg_data = wait_times;
 
index e6b70196071ac114fe03ab497c108296fb29ab43..ecaead24e8c96bbe288fffcac96b067f33fb1589 100644 (file)
@@ -44,12 +44,16 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid);
+                                       uint32_t debug_vmid,
+                                       uint32_t inst);
 uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_id);
-void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times);
+void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
+                               uint32_t *wait_times,
+                               uint32_t inst);
 void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
                                               uint32_t wait_times,
                                               uint32_t grace_period,
                                               uint32_t *reg_offset,
-                                              uint32_t *reg_data);
+                                              uint32_t *reg_data,
+                                              uint32_t inst);
index 91c3574ebed303871a372b6d0d9841c34a578d3c..d67d003bada2c4ce322f56adcfd4494fde768a3e 100644 (file)
@@ -637,7 +637,7 @@ static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev,
 {
        uint32_t data = 0;
 
-       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled);
+       data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
        data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
        data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
 
@@ -743,7 +743,8 @@ static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid)
+                                       uint32_t debug_vmid,
+                                       uint32_t inst)
 {
        uint32_t watch_address_high;
        uint32_t watch_address_low;
index 51d93fb13ea3bf95a4c3ab909d96fc5cc832a075..28963726bc976e905ed50fefe5bc31857eaf20a3 100644 (file)
@@ -822,7 +822,8 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid)
+                                       uint32_t debug_vmid,
+                                       uint32_t inst)
 {
        uint32_t watch_address_high;
        uint32_t watch_address_low;
@@ -903,10 +904,12 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
  *     deq_retry_wait_time      -- Wait Count for Global Wave Syncs.
  */
 void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
-                                       uint32_t *wait_times)
+                                       uint32_t *wait_times,
+                                       uint32_t inst)
 
 {
-       *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
+       *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+                       mmCP_IQ_WAIT_TIME2));
 }
 
 void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
@@ -1100,12 +1103,13 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
                uint32_t wait_times,
                uint32_t grace_period,
                uint32_t *reg_offset,
-               uint32_t *reg_data)
+               uint32_t *reg_data,
+               uint32_t inst)
 {
        *reg_data = wait_times;
 
        /*
-        * The CP cannont handle a 0 grace period input and will result in
+        * The CP cannot handle a 0 grace period input and will result in
         * an infinite grace period being set so set to 1 to prevent this.
         */
        if (grace_period == 0)
@@ -1116,7 +1120,8 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
                        SCH_WAVE,
                        grace_period);
 
-       *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
+       *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+                       mmCP_IQ_WAIT_TIME2);
 }
 
 void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
index 5f54bff0db496c3fc75da01b8b5d76c9e241d78e..936e501908cef3760865ed6d888127a59bbffcbc 100644 (file)
@@ -89,12 +89,16 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid);
+                                       uint32_t debug_vmid,
+                                       uint32_t inst);
 uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
                                        uint32_t watch_id);
-void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times);
+void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
+                               uint32_t *wait_times,
+                               uint32_t inst);
 void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
                                               uint32_t wait_times,
                                               uint32_t grace_period,
                                               uint32_t *reg_offset,
-                                              uint32_t *reg_data);
+                                              uint32_t *reg_data,
+                                              uint32_t inst);
index f61527b800e62970b03ea769c643486eefc6e7be..a136fba9f29ba454a10794076fa92bd64eafad63 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/sched/task.h>
 #include <drm/ttm/ttm_tt.h>
 
+#include <drm/drm_exec.h>
+
 #include "amdgpu_object.h"
 #include "amdgpu_gem.h"
 #include "amdgpu_vm.h"
@@ -964,28 +966,20 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
                                struct amdkfd_process_info *process_info,
                                bool userptr)
 {
-       struct ttm_validate_buffer *entry = &mem->validate_list;
-       struct amdgpu_bo *bo = mem->bo;
-
-       INIT_LIST_HEAD(&entry->head);
-       entry->num_shared = 1;
-       entry->bo = &bo->tbo;
        mutex_lock(&process_info->lock);
        if (userptr)
-               list_add_tail(&entry->head, &process_info->userptr_valid_list);
+               list_add_tail(&mem->validate_list,
+                             &process_info->userptr_valid_list);
        else
-               list_add_tail(&entry->head, &process_info->kfd_bo_list);
+               list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
        mutex_unlock(&process_info->lock);
 }
 
 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
                struct amdkfd_process_info *process_info)
 {
-       struct ttm_validate_buffer *bo_list_entry;
-
-       bo_list_entry = &mem->validate_list;
        mutex_lock(&process_info->lock);
-       list_del(&bo_list_entry->head);
+       list_del(&mem->validate_list);
        mutex_unlock(&process_info->lock);
 }
 
@@ -1072,13 +1066,12 @@ out:
  * object can track VM updates.
  */
 struct bo_vm_reservation_context {
-       struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
-       unsigned int n_vms;                 /* Number of VMs reserved       */
-       struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
-       struct ww_acquire_ctx ticket;       /* Reservation ticket           */
-       struct list_head list, duplicates;  /* BO lists                     */
-       struct amdgpu_sync *sync;           /* Pointer to sync object       */
-       bool reserved;                      /* Whether BOs are reserved     */
+       /* DRM execution context for the reservation */
+       struct drm_exec exec;
+       /* Number of VMs reserved */
+       unsigned int n_vms;
+       /* Pointer to sync object */
+       struct amdgpu_sync *sync;
 };
 
 enum bo_vm_match {
@@ -1102,35 +1095,26 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
 
        WARN_ON(!vm);
 
-       ctx->reserved = false;
        ctx->n_vms = 1;
        ctx->sync = &mem->sync;
-
-       INIT_LIST_HEAD(&ctx->list);
-       INIT_LIST_HEAD(&ctx->duplicates);
-
-       ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
-       if (!ctx->vm_pd)
-               return -ENOMEM;
-
-       ctx->kfd_bo.priority = 0;
-       ctx->kfd_bo.tv.bo = &bo->tbo;
-       ctx->kfd_bo.tv.num_shared = 1;
-       list_add(&ctx->kfd_bo.tv.head, &ctx->list);
-
-       amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
-
-       ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
-       if (ret) {
-               pr_err("Failed to reserve buffers in ttm.\n");
-               kfree(ctx->vm_pd);
-               ctx->vm_pd = NULL;
-               return ret;
+       drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&ctx->exec) {
+               ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
+               drm_exec_retry_on_contention(&ctx->exec);
+               if (unlikely(ret))
+                       goto error;
+
+               ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
+               drm_exec_retry_on_contention(&ctx->exec);
+               if (unlikely(ret))
+                       goto error;
        }
-
-       ctx->reserved = true;
        return 0;
+
+error:
+       pr_err("Failed to reserve buffers in ttm.\n");
+       drm_exec_fini(&ctx->exec);
+       return ret;
 }
 
 /**
@@ -1147,63 +1131,39 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
                                struct amdgpu_vm *vm, enum bo_vm_match map_type,
                                struct bo_vm_reservation_context *ctx)
 {
-       struct amdgpu_bo *bo = mem->bo;
        struct kfd_mem_attachment *entry;
-       unsigned int i;
+       struct amdgpu_bo *bo = mem->bo;
        int ret;
 
-       ctx->reserved = false;
-       ctx->n_vms = 0;
-       ctx->vm_pd = NULL;
        ctx->sync = &mem->sync;
+       drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&ctx->exec) {
+               ctx->n_vms = 0;
+               list_for_each_entry(entry, &mem->attachments, list) {
+                       if ((vm && vm != entry->bo_va->base.vm) ||
+                               (entry->is_mapped != map_type
+                               && map_type != BO_VM_ALL))
+                               continue;
 
-       INIT_LIST_HEAD(&ctx->list);
-       INIT_LIST_HEAD(&ctx->duplicates);
-
-       list_for_each_entry(entry, &mem->attachments, list) {
-               if ((vm && vm != entry->bo_va->base.vm) ||
-                       (entry->is_mapped != map_type
-                       && map_type != BO_VM_ALL))
-                       continue;
-
-               ctx->n_vms++;
-       }
-
-       if (ctx->n_vms != 0) {
-               ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
-                                    GFP_KERNEL);
-               if (!ctx->vm_pd)
-                       return -ENOMEM;
-       }
-
-       ctx->kfd_bo.priority = 0;
-       ctx->kfd_bo.tv.bo = &bo->tbo;
-       ctx->kfd_bo.tv.num_shared = 1;
-       list_add(&ctx->kfd_bo.tv.head, &ctx->list);
-
-       i = 0;
-       list_for_each_entry(entry, &mem->attachments, list) {
-               if ((vm && vm != entry->bo_va->base.vm) ||
-                       (entry->is_mapped != map_type
-                       && map_type != BO_VM_ALL))
-                       continue;
-
-               amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
-                               &ctx->vm_pd[i]);
-               i++;
-       }
+                       ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
+                                               &ctx->exec, 2);
+                       drm_exec_retry_on_contention(&ctx->exec);
+                       if (unlikely(ret))
+                               goto error;
+                       ++ctx->n_vms;
+               }
 
-       ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
-       if (ret) {
-               pr_err("Failed to reserve buffers in ttm.\n");
-               kfree(ctx->vm_pd);
-               ctx->vm_pd = NULL;
-               return ret;
+               ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
+               drm_exec_retry_on_contention(&ctx->exec);
+               if (unlikely(ret))
+                       goto error;
        }
-
-       ctx->reserved = true;
        return 0;
+
+error:
+       pr_err("Failed to reserve buffers in ttm.\n");
+       drm_exec_fini(&ctx->exec);
+       return ret;
 }
 
 /**
@@ -1224,15 +1184,8 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
        if (wait)
                ret = amdgpu_sync_wait(ctx->sync, intr);
 
-       if (ctx->reserved)
-               ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
-       kfree(ctx->vm_pd);
-
+       drm_exec_fini(&ctx->exec);
        ctx->sync = NULL;
-
-       ctx->reserved = false;
-       ctx->vm_pd = NULL;
-
        return ret;
 }
 
@@ -1709,7 +1662,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                        alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
                        AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
                }
-               xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
+               xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
+                                       0 : fpriv->xcp_id;
        } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
                domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
                alloc_flags = 0;
@@ -1854,7 +1808,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        bool use_release_notifier = (mem->bo->kfd_bo == mem);
        struct kfd_mem_attachment *entry, *tmp;
        struct bo_vm_reservation_context ctx;
-       struct ttm_validate_buffer *bo_list_entry;
        unsigned int mapped_to_gpu_memory;
        int ret;
        bool is_imported = false;
@@ -1882,9 +1835,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        }
 
        /* Make sure restore workers don't access the BO any more */
-       bo_list_entry = &mem->validate_list;
        mutex_lock(&process_info->lock);
-       list_del(&bo_list_entry->head);
+       list_del(&mem->validate_list);
        mutex_unlock(&process_info->lock);
 
        /* Cleanup user pages and MMU notifiers */
@@ -2451,14 +2403,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
        /* Move all invalidated BOs to the userptr_inval_list */
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_valid_list,
-                                validate_list.head)
+                                validate_list)
                if (mem->invalid)
-                       list_move_tail(&mem->validate_list.head,
+                       list_move_tail(&mem->validate_list,
                                       &process_info->userptr_inval_list);
 
        /* Go through userptr_inval_list and update any invalid user_pages */
        list_for_each_entry(mem, &process_info->userptr_inval_list,
-                           validate_list.head) {
+                           validate_list) {
                invalid = mem->invalid;
                if (!invalid)
                        /* BO hasn't been invalidated since the last
@@ -2538,50 +2490,41 @@ unlock_out:
  */
 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
 {
-       struct amdgpu_bo_list_entry *pd_bo_list_entries;
-       struct list_head resv_list, duplicates;
-       struct ww_acquire_ctx ticket;
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_sync sync;
+       struct drm_exec exec;
 
        struct amdgpu_vm *peer_vm;
        struct kgd_mem *mem, *tmp_mem;
        struct amdgpu_bo *bo;
-       struct ttm_operation_ctx ctx = { false, false };
-       int i, ret;
-
-       pd_bo_list_entries = kcalloc(process_info->n_vms,
-                                    sizeof(struct amdgpu_bo_list_entry),
-                                    GFP_KERNEL);
-       if (!pd_bo_list_entries) {
-               pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
-               ret = -ENOMEM;
-               goto out_no_mem;
-       }
-
-       INIT_LIST_HEAD(&resv_list);
-       INIT_LIST_HEAD(&duplicates);
+       int ret;
 
-       /* Get all the page directory BOs that need to be reserved */
-       i = 0;
-       list_for_each_entry(peer_vm, &process_info->vm_list_head,
-                           vm_list_node)
-               amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
-                                   &pd_bo_list_entries[i++]);
-       /* Add the userptr_inval_list entries to resv_list */
-       list_for_each_entry(mem, &process_info->userptr_inval_list,
-                           validate_list.head) {
-               list_add_tail(&mem->resv_list.head, &resv_list);
-               mem->resv_list.bo = mem->validate_list.bo;
-               mem->resv_list.num_shared = mem->validate_list.num_shared;
-       }
+       amdgpu_sync_create(&sync);
 
+       drm_exec_init(&exec, 0);
        /* Reserve all BOs and page tables for validation */
-       ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
-       WARN(!list_empty(&duplicates), "Duplicates should be empty");
-       if (ret)
-               goto out_free;
+       drm_exec_until_all_locked(&exec) {
+               /* Reserve all the page directories */
+               list_for_each_entry(peer_vm, &process_info->vm_list_head,
+                                   vm_list_node) {
+                       ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(ret))
+                               goto unreserve_out;
+               }
 
-       amdgpu_sync_create(&sync);
+               /* Reserve the userptr_inval_list entries to resv_list */
+               list_for_each_entry(mem, &process_info->userptr_inval_list,
+                                   validate_list) {
+                       struct drm_gem_object *gobj;
+
+                       gobj = &mem->bo->tbo.base;
+                       ret = drm_exec_prepare_obj(&exec, gobj, 1);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(ret))
+                               goto unreserve_out;
+               }
+       }
 
        ret = process_validate_vms(process_info);
        if (ret)
@@ -2590,7 +2533,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        /* Validate BOs and update GPUVM page tables */
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_inval_list,
-                                validate_list.head) {
+                                validate_list) {
                struct kfd_mem_attachment *attachment;
 
                bo = mem->bo;
@@ -2632,12 +2575,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        ret = process_update_pds(process_info, &sync);
 
 unreserve_out:
-       ttm_eu_backoff_reservation(&ticket, &resv_list);
+       drm_exec_fini(&exec);
        amdgpu_sync_wait(&sync, false);
        amdgpu_sync_free(&sync);
-out_free:
-       kfree(pd_bo_list_entries);
-out_no_mem:
 
        return ret;
 }
@@ -2653,7 +2593,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
 
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_inval_list,
-                                validate_list.head) {
+                                validate_list) {
                bool valid;
 
                /* keep mem without hmm range at userptr_inval_list */
@@ -2677,7 +2617,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
                        continue;
                }
 
-               list_move_tail(&mem->validate_list.head,
+               list_move_tail(&mem->validate_list,
                               &process_info->userptr_valid_list);
        }
 
@@ -2787,50 +2727,44 @@ unlock_out:
  */
 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 {
-       struct amdgpu_bo_list_entry *pd_bo_list;
        struct amdkfd_process_info *process_info = info;
        struct amdgpu_vm *peer_vm;
        struct kgd_mem *mem;
-       struct bo_vm_reservation_context ctx;
        struct amdgpu_amdkfd_fence *new_fence;
-       int ret = 0, i;
        struct list_head duplicate_save;
        struct amdgpu_sync sync_obj;
        unsigned long failed_size = 0;
        unsigned long total_size = 0;
+       struct drm_exec exec;
+       int ret;
 
        INIT_LIST_HEAD(&duplicate_save);
-       INIT_LIST_HEAD(&ctx.list);
-       INIT_LIST_HEAD(&ctx.duplicates);
 
-       pd_bo_list = kcalloc(process_info->n_vms,
-                            sizeof(struct amdgpu_bo_list_entry),
-                            GFP_KERNEL);
-       if (!pd_bo_list)
-               return -ENOMEM;
-
-       i = 0;
        mutex_lock(&process_info->lock);
-       list_for_each_entry(peer_vm, &process_info->vm_list_head,
-                       vm_list_node)
-               amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
 
-       /* Reserve all BOs and page tables/directory. Add all BOs from
-        * kfd_bo_list to ctx.list
-        */
-       list_for_each_entry(mem, &process_info->kfd_bo_list,
-                           validate_list.head) {
-
-               list_add_tail(&mem->resv_list.head, &ctx.list);
-               mem->resv_list.bo = mem->validate_list.bo;
-               mem->resv_list.num_shared = mem->validate_list.num_shared;
-       }
+       drm_exec_init(&exec, 0);
+       drm_exec_until_all_locked(&exec) {
+               list_for_each_entry(peer_vm, &process_info->vm_list_head,
+                                   vm_list_node) {
+                       ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(ret))
+                               goto ttm_reserve_fail;
+               }
 
-       ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
-                                    false, &duplicate_save);
-       if (ret) {
-               pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
-               goto ttm_reserve_fail;
+               /* Reserve all BOs and page tables/directory. Add all BOs from
+                * kfd_bo_list to ctx.list
+                */
+               list_for_each_entry(mem, &process_info->kfd_bo_list,
+                                   validate_list) {
+                       struct drm_gem_object *gobj;
+
+                       gobj = &mem->bo->tbo.base;
+                       ret = drm_exec_prepare_obj(&exec, gobj, 1);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(ret))
+                               goto ttm_reserve_fail;
+               }
        }
 
        amdgpu_sync_create(&sync_obj);
@@ -2848,7 +2782,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 
        /* Validate BOs and map them to GPUVM (update VM page tables). */
        list_for_each_entry(mem, &process_info->kfd_bo_list,
-                           validate_list.head) {
+                           validate_list) {
 
                struct amdgpu_bo *bo = mem->bo;
                uint32_t domain = mem->domain;
@@ -2881,6 +2815,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                        if (!attachment->is_mapped)
                                continue;
 
+                       if (attachment->bo_va->base.bo->tbo.pin_count)
+                               continue;
+
                        kfd_mem_dmaunmap_attachment(mem, attachment);
                        ret = update_gpuvm_pte(mem, attachment, &sync_obj);
                        if (ret) {
@@ -2921,8 +2858,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        *ef = dma_fence_get(&new_fence->base);
 
        /* Attach new eviction fence to all BOs except pinned ones */
-       list_for_each_entry(mem, &process_info->kfd_bo_list,
-               validate_list.head) {
+       list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
                if (mem->bo->tbo.pin_count)
                        continue;
 
@@ -2941,11 +2877,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        }
 
 validate_map_fail:
-       ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
        amdgpu_sync_free(&sync_obj);
 ttm_reserve_fail:
+       drm_exec_fini(&exec);
        mutex_unlock(&process_info->lock);
-       kfree(pd_bo_list);
        return ret;
 }
 
index f4e3c133a16ca0c56dd4e3b8d893d877aaf0928c..dce9e7d5e4ec672827f574fb64816ca205ef96ee 100644 (file)
@@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct atom_context *ctx = adev->mode_info.atom_context;
 
-       return sysfs_emit(buf, "%s\n", ctx->vbios_version);
+       return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
 }
 
 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
index d6d986be906a963b3e95aba7f9033f6e02f1b6d7..6f241c5746658eb1fc11a5a8c1939f7441281312 100644 (file)
@@ -74,24 +74,29 @@ struct atpx_mux {
        u16 mux;
 } __packed;
 
-bool amdgpu_has_atpx(void) {
+bool amdgpu_has_atpx(void)
+{
        return amdgpu_atpx_priv.atpx_detected;
 }
 
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+bool amdgpu_has_atpx_dgpu_power_cntl(void)
+{
        return amdgpu_atpx_priv.atpx.functions.power_cntl;
 }
 
-bool amdgpu_is_atpx_hybrid(void) {
+bool amdgpu_is_atpx_hybrid(void)
+{
        return amdgpu_atpx_priv.atpx.is_hybrid;
 }
 
-bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+bool amdgpu_atpx_dgpu_req_power_for_displays(void)
+{
        return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
 }
 
 #if defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void) {
+void *amdgpu_atpx_get_dhandle(void)
+{
        return amdgpu_atpx_priv.dhandle;
 }
 #endif
@@ -113,6 +118,8 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
        union acpi_object atpx_arg_elements[2];
        struct acpi_object_list atpx_arg;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_device *adev = container_of(handle, struct acpi_device, handle);
+       struct device *dev = &adev->dev;
 
        atpx_arg.count = 2;
        atpx_arg.pointer = &atpx_arg_elements[0];
@@ -134,8 +141,8 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
 
        /* Fail only if calling the method fails and ATPX is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk("failed to evaluate ATPX got %s\n",
-                      acpi_format_exception(status));
+               dev_err(dev, "failed to evaluate ATPX got %s\n",
+                       acpi_format_exception(status));
                kfree(buffer.pointer);
                return NULL;
        }
@@ -176,6 +183,8 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
        u32 valid_bits = 0;
+       struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle);
+       struct device *dev = &adev->dev;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
@@ -190,7 +199,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 
                size = *(u16 *) info->buffer.pointer;
                if (size < 10) {
-                       printk("ATPX buffer is too small: %zu\n", size);
+                       dev_err(dev, "ATPX buffer is too small: %zu\n", size);
                        kfree(info);
                        return -EINVAL;
                }
@@ -223,11 +232,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
                if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
-                       printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+                       dev_info(dev, "ATPX Hybrid Graphics, forcing to ATPX\n");
                        atpx->functions.power_cntl = true;
                        atpx->is_hybrid = false;
                } else {
-                       printk("ATPX Hybrid Graphics\n");
+                       dev_info(dev, "ATPX Hybrid Graphics\n");
                        /*
                         * Disable legacy PM methods only when pcie port PM is usable,
                         * otherwise the device might fail to power off or power on.
@@ -260,6 +269,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
        struct atpx_verify_interface output;
        size_t size;
        int err = 0;
+       struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle);
+       struct device *dev = &adev->dev;
 
        info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
        if (!info)
@@ -278,8 +289,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
        memcpy(&output, info->buffer.pointer, size);
 
        /* TODO: check version? */
-       printk("ATPX version %u, functions 0x%08x\n",
-              output.version, output.function_bits);
+       dev_info(dev, "ATPX version %u, functions 0x%08x\n",
+                output.version, output.function_bits);
 
        amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
 
index 252a876b072586a6cf2b8a01b529b1c7d5857718..b6298e901cbd4f44752105d47841db3bd61ed568 100644 (file)
@@ -28,6 +28,7 @@
  *    Christian König <deathsimple@vodafone.de>
  */
 
+#include <linux/sort.h>
 #include <linux/uaccess.h>
 
 #include "amdgpu.h"
@@ -50,13 +51,20 @@ static void amdgpu_bo_list_free(struct kref *ref)
                                                   refcount);
        struct amdgpu_bo_list_entry *e;
 
-       amdgpu_bo_list_for_each_entry(e, list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+       amdgpu_bo_list_for_each_entry(e, list)
+               amdgpu_bo_unref(&e->bo);
+       call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+}
 
-               amdgpu_bo_unref(&bo);
-       }
+static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b)
+{
+       const struct amdgpu_bo_list_entry *a = _a, *b = _b;
 
-       call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+       if (a->priority > b->priority)
+               return 1;
+       if (a->priority < b->priority)
+               return -1;
+       return 0;
 }
 
 int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
@@ -118,7 +126,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 
                entry->priority = min(info[i].bo_priority,
                                      AMDGPU_BO_LIST_MAX_PRIORITY);
-               entry->tv.bo = &bo->tbo;
+               entry->bo = bo;
 
                if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
                        list->gds_obj = bo;
@@ -133,6 +141,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 
        list->first_userptr = first_userptr;
        list->num_entries = num_entries;
+       sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry),
+            amdgpu_bo_list_entry_cmp, NULL);
 
        trace_amdgpu_cs_bo_status(list->num_entries, total_size);
 
@@ -141,16 +151,10 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
        return 0;
 
 error_free:
-       for (i = 0; i < last_entry; ++i) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
-
-               amdgpu_bo_unref(&bo);
-       }
-       for (i = first_userptr; i < num_entries; ++i) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
-
-               amdgpu_bo_unref(&bo);
-       }
+       for (i = 0; i < last_entry; ++i)
+               amdgpu_bo_unref(&array[i].bo);
+       for (i = first_userptr; i < num_entries; ++i)
+               amdgpu_bo_unref(&array[i].bo);
        kvfree(list);
        return r;
 
@@ -182,41 +186,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
        return -ENOENT;
 }
 
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
-                            struct list_head *validated)
-{
-       /* This is based on the bucket sort with O(n) time complexity.
-        * An item with priority "i" is added to bucket[i]. The lists are then
-        * concatenated in descending order.
-        */
-       struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
-       struct amdgpu_bo_list_entry *e;
-       unsigned i;
-
-       for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
-               INIT_LIST_HEAD(&bucket[i]);
-
-       /* Since buffers which appear sooner in the relocation list are
-        * likely to be used more often than buffers which appear later
-        * in the list, the sort mustn't change the ordering of buffers
-        * with the same priority, i.e. it must be stable.
-        */
-       amdgpu_bo_list_for_each_entry(e, list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-               unsigned priority = e->priority;
-
-               if (!bo->parent)
-                       list_add_tail(&e->tv.head, &bucket[priority]);
-
-               e->user_pages = NULL;
-               e->range = NULL;
-       }
-
-       /* Connect the sorted buckets in the output list. */
-       for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
-               list_splice(&bucket[i], validated);
-}
-
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
 {
        kref_put(&list->refcount, amdgpu_bo_list_free);
index ededdc01ca282b83f5d688afd6d0fa9d36b12946..26c01cb131f21b93076d2b0f2100311e14bc817a 100644 (file)
@@ -23,7 +23,6 @@
 #ifndef __AMDGPU_BO_LIST_H__
 #define __AMDGPU_BO_LIST_H__
 
-#include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/amdgpu_drm.h>
 
 struct hmm_range;
@@ -36,7 +35,7 @@ struct amdgpu_bo_va;
 struct amdgpu_fpriv;
 
 struct amdgpu_bo_list_entry {
-       struct ttm_validate_buffer      tv;
+       struct amdgpu_bo                *bo;
        struct amdgpu_bo_va             *bo_va;
        uint32_t                        priority;
        struct page                     **user_pages;
@@ -60,8 +59,6 @@ struct amdgpu_bo_list {
 
 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
                       struct amdgpu_bo_list **result);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
-                            struct list_head *validated);
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
                                      struct drm_amdgpu_bo_list_entry **info_param);
index 040f4cb6ab2d0fb20ac7d97af1a7d6d10f246c03..977e1804718d0671e820a2dd6c22226a1b0f4264 100644 (file)
@@ -65,6 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
        }
 
        amdgpu_sync_create(&p->sync);
+       drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
        return 0;
 }
 
@@ -125,7 +126,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
                                   uint32_t *offset)
 {
        struct drm_gem_object *gobj;
-       struct amdgpu_bo *bo;
        unsigned long size;
        int r;
 
@@ -133,18 +133,16 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
        if (gobj == NULL)
                return -EINVAL;
 
-       bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-       p->uf_entry.priority = 0;
-       p->uf_entry.tv.bo = &bo->tbo;
+       p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
        drm_gem_object_put(gobj);
 
-       size = amdgpu_bo_size(bo);
+       size = amdgpu_bo_size(p->uf_bo);
        if (size != PAGE_SIZE || (data->offset + 8) > size) {
                r = -EINVAL;
                goto error_unref;
        }
 
-       if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+       if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
                r = -EINVAL;
                goto error_unref;
        }
@@ -154,7 +152,7 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
        return 0;
 
 error_unref:
-       amdgpu_bo_unref(&bo);
+       amdgpu_bo_unref(&p->uf_bo);
        return r;
 }
 
@@ -311,7 +309,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
                goto free_all_kdata;
        }
 
-       if (p->uf_entry.tv.bo)
+       if (p->uf_bo)
                p->gang_leader->uf_addr = uf_offset;
        kvfree(chunk_array);
 
@@ -356,7 +354,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
        ib = &job->ibs[job->num_ibs++];
 
        /* MM engine doesn't support user fences */
-       if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
+       if (p->uf_bo && ring->funcs->no_user_fence)
                return -EINVAL;
 
        if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
@@ -841,55 +839,18 @@ retry:
        return r;
 }
 
-static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
-                           struct list_head *validated)
-{
-       struct ttm_operation_ctx ctx = { true, false };
-       struct amdgpu_bo_list_entry *lobj;
-       int r;
-
-       list_for_each_entry(lobj, validated, tv.head) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
-               struct mm_struct *usermm;
-
-               usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
-               if (usermm && usermm != current->mm)
-                       return -EPERM;
-
-               if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
-                   lobj->user_invalidated && lobj->user_pages) {
-                       amdgpu_bo_placement_from_domain(bo,
-                                                       AMDGPU_GEM_DOMAIN_CPU);
-                       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-                       if (r)
-                               return r;
-
-                       amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
-                                                    lobj->user_pages);
-               }
-
-               r = amdgpu_cs_bo_validate(p, bo);
-               if (r)
-                       return r;
-
-               kvfree(lobj->user_pages);
-               lobj->user_pages = NULL;
-       }
-       return 0;
-}
-
 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                union drm_amdgpu_cs *cs)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct ttm_operation_ctx ctx = { true, false };
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_list_entry *e;
-       struct list_head duplicates;
+       struct drm_gem_object *obj;
+       unsigned long index;
        unsigned int i;
        int r;
 
-       INIT_LIST_HEAD(&p->validated);
-
        /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
        if (cs->in.bo_list_handle) {
                if (p->bo_list)
@@ -909,29 +870,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
        mutex_lock(&p->bo_list->bo_list_mutex);
 
-       /* One for TTM and one for each CS job */
-       amdgpu_bo_list_for_each_entry(e, p->bo_list)
-               e->tv.num_shared = 1 + p->gang_size;
-       p->uf_entry.tv.num_shared = 1 + p->gang_size;
-
-       amdgpu_bo_list_get_list(p->bo_list, &p->validated);
-
-       INIT_LIST_HEAD(&duplicates);
-       amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
-
-       /* Two for VM updates, one for TTM and one for each CS job */
-       p->vm_pd.tv.num_shared = 3 + p->gang_size;
-
-       if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
-               list_add(&p->uf_entry.tv.head, &p->validated);
-
        /* Get userptr backing pages. If pages are updated after registered
         * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
         * amdgpu_ttm_backend_bind() to flush and invalidate new pages
         */
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                bool userpage_invalidated = false;
+               struct amdgpu_bo *bo = e->bo;
                int i;
 
                e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
@@ -959,18 +904,56 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                e->user_invalidated = userpage_invalidated;
        }
 
-       r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
-                                  &duplicates);
-       if (unlikely(r != 0)) {
-               if (r != -ERESTARTSYS)
-                       DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
-               goto out_free_user_pages;
+       drm_exec_until_all_locked(&p->exec) {
+               r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
+               drm_exec_retry_on_contention(&p->exec);
+               if (unlikely(r))
+                       goto out_free_user_pages;
+
+               amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+                       /* One fence for TTM and one for each CS job */
+                       r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
+                                                1 + p->gang_size);
+                       drm_exec_retry_on_contention(&p->exec);
+                       if (unlikely(r))
+                               goto out_free_user_pages;
+
+                       e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
+               }
+
+               if (p->uf_bo) {
+                       r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
+                                                1 + p->gang_size);
+                       drm_exec_retry_on_contention(&p->exec);
+                       if (unlikely(r))
+                               goto out_free_user_pages;
+               }
        }
 
-       amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+       amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+               struct mm_struct *usermm;
 
-               e->bo_va = amdgpu_vm_bo_find(vm, bo);
+               usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
+               if (usermm && usermm != current->mm) {
+                       r = -EPERM;
+                       goto out_free_user_pages;
+               }
+
+               if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
+                   e->user_invalidated && e->user_pages) {
+                       amdgpu_bo_placement_from_domain(e->bo,
+                                                       AMDGPU_GEM_DOMAIN_CPU);
+                       r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
+                                           &ctx);
+                       if (r)
+                               goto out_free_user_pages;
+
+                       amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
+                                                    e->user_pages);
+               }
+
+               kvfree(e->user_pages);
+               e->user_pages = NULL;
        }
 
        amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -982,25 +965,21 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                      amdgpu_cs_bo_validate, p);
        if (r) {
                DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
-               goto error_validate;
+               goto out_free_user_pages;
        }
 
-       r = amdgpu_cs_list_validate(p, &duplicates);
-       if (r)
-               goto error_validate;
-
-       r = amdgpu_cs_list_validate(p, &p->validated);
-       if (r)
-               goto error_validate;
-
-       if (p->uf_entry.tv.bo) {
-               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+       drm_exec_for_each_locked_object(&p->exec, index, obj) {
+               r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
+               if (unlikely(r))
+                       goto out_free_user_pages;
+       }
 
-               r = amdgpu_ttm_alloc_gart(&uf->tbo);
-               if (r)
-                       goto error_validate;
+       if (p->uf_bo) {
+               r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
+               if (unlikely(r))
+                       goto out_free_user_pages;
 
-               p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
+               p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
        }
 
        amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
@@ -1012,12 +991,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                         p->bo_list->oa_obj);
        return 0;
 
-error_validate:
-       ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-
 out_free_user_pages:
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+               struct amdgpu_bo *bo = e->bo;
 
                if (!e->user_pages)
                        continue;
@@ -1123,7 +1099,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_list_entry *e;
        struct amdgpu_bo_va *bo_va;
-       struct amdgpu_bo *bo;
        unsigned int i;
        int r;
 
@@ -1152,11 +1127,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        }
 
        amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-               /* ignore duplicates */
-               bo = ttm_to_amdgpu_bo(e->tv.bo);
-               if (!bo)
-                       continue;
-
                bo_va = e->bo_va;
                if (bo_va == NULL)
                        continue;
@@ -1194,7 +1164,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (amdgpu_vm_debug) {
                /* Invalidate all BOs to test for userspace bugs */
                amdgpu_bo_list_for_each_entry(e, p->bo_list) {
-                       struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+                       struct amdgpu_bo *bo = e->bo;
 
                        /* ignore duplicates */
                        if (!bo)
@@ -1211,8 +1181,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct drm_gpu_scheduler *sched;
-       struct amdgpu_bo_list_entry *e;
+       struct drm_gem_object *obj;
        struct dma_fence *fence;
+       unsigned long index;
        unsigned int i;
        int r;
 
@@ -1223,8 +1194,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                return r;
        }
 
-       list_for_each_entry(e, &p->validated, tv.head) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+       drm_exec_for_each_locked_object(&p->exec, index, obj) {
+               struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
                struct dma_resv *resv = bo->tbo.base.resv;
                enum amdgpu_sync_mode sync_mode;
 
@@ -1288,6 +1260,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_job *leader = p->gang_leader;
        struct amdgpu_bo_list_entry *e;
+       struct drm_gem_object *gobj;
+       unsigned long index;
        unsigned int i;
        uint64_t seq;
        int r;
@@ -1326,9 +1300,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
         */
        r = 0;
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
-               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-
-               r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+               r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
+                                                       e->range);
                e->range = NULL;
        }
        if (r) {
@@ -1338,20 +1311,22 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        p->fence = dma_fence_get(&leader->base.s_fence->finished);
-       list_for_each_entry(e, &p->validated, tv.head) {
+       drm_exec_for_each_locked_object(&p->exec, index, gobj) {
+
+               ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
 
                /* Everybody except for the gang leader uses READ */
                for (i = 0; i < p->gang_size; ++i) {
                        if (p->jobs[i] == leader)
                                continue;
 
-                       dma_resv_add_fence(e->tv.bo->base.resv,
+                       dma_resv_add_fence(gobj->resv,
                                           &p->jobs[i]->base.s_fence->finished,
                                           DMA_RESV_USAGE_READ);
                }
 
-               /* The gang leader is remembered as writer */
-               e->tv.num_shared = 0;
+               /* The gang leader as remembered as writer */
+               dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
        }
 
        seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
@@ -1367,7 +1342,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        cs->out.handle = seq;
        leader->uf_sequence = seq;
 
-       amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+       amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
        for (i = 0; i < p->gang_size; ++i) {
                amdgpu_job_free_resources(p->jobs[i]);
                trace_amdgpu_cs_ioctl(p->jobs[i]);
@@ -1376,7 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
-       ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
 
        mutex_unlock(&p->adev->notifier_lock);
        mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1389,6 +1363,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
        unsigned int i;
 
        amdgpu_sync_free(&parser->sync);
+       drm_exec_fini(&parser->exec);
+
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
                kfree(parser->post_deps[i].chain);
@@ -1409,11 +1385,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
                if (parser->jobs[i])
                        amdgpu_job_free(parser->jobs[i]);
        }
-       if (parser->uf_entry.tv.bo) {
-               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
-
-               amdgpu_bo_unref(&uf);
-       }
+       amdgpu_bo_unref(&parser->uf_bo);
 }
 
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -1474,7 +1446,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        return 0;
 
 error_backoff:
-       ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
        mutex_unlock(&parser.bo_list->bo_list_mutex);
 
 error_fini:
@@ -1809,7 +1780,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
        *map = mapping;
 
        /* Double check that the BO is reserved by this CS */
-       if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
+       if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
                return -EINVAL;
 
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
index fb3e3d56d427daaf404cd71ef73d6615320bbea9..39c33ad100cb7cc71be5eeef8b0f23b666e2a9ff 100644 (file)
@@ -24,6 +24,7 @@
 #define __AMDGPU_CS_H__
 
 #include <linux/ww_mutex.h>
+#include <drm/drm_exec.h>
 
 #include "amdgpu_job.h"
 #include "amdgpu_bo_list.h"
@@ -62,11 +63,9 @@ struct amdgpu_cs_parser {
        struct amdgpu_job       *gang_leader;
 
        /* buffer objects */
-       struct ww_acquire_ctx           ticket;
+       struct drm_exec                 exec;
        struct amdgpu_bo_list           *bo_list;
        struct amdgpu_mn                *mn;
-       struct amdgpu_bo_list_entry     vm_pd;
-       struct list_head                validated;
        struct dma_fence                *fence;
        uint64_t                        bytes_moved_threshold;
        uint64_t                        bytes_moved_vis_threshold;
@@ -74,7 +73,7 @@ struct amdgpu_cs_parser {
        uint64_t                        bytes_moved_vis;
 
        /* user fence */
-       struct amdgpu_bo_list_entry     uf_entry;
+       struct amdgpu_bo                *uf_bo;
 
        unsigned                        num_post_deps;
        struct amdgpu_cs_post_dep       *post_deps;
index 23d054526e7c7ba21673d0b9b64e4e3bef718bbc..7200110197415f530243907691526b14084086cf 100644 (file)
@@ -22,6 +22,8 @@
  * * Author: Monk.liu@amd.com
  */
 
+#include <drm/drm_exec.h>
+
 #include "amdgpu.h"
 
 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
@@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                          struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
                          uint64_t csa_addr, uint32_t size)
 {
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
+       struct drm_exec exec;
        int r;
 
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
-       csa_tv.bo = &bo->tbo;
-       csa_tv.num_shared = 1;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
-               return r;
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               if (likely(!r))
+                       r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r)) {
+                       DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+                       goto error;
+               }
        }
 
        *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
        if (!*bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
-               DRM_ERROR("failed to create bo_va for static CSA\n");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto error;
        }
 
        r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
@@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (r) {
                DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
                amdgpu_vm_bo_del(adev, *bo_va);
-               ttm_eu_backoff_reservation(&ticket, &list);
-               return r;
+               goto error;
        }
 
-       ttm_eu_backoff_reservation(&ticket, &list);
-       return 0;
+error:
+       drm_exec_fini(&exec);
+       return r;
 }
 
 int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                            struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
                            uint64_t csa_addr)
 {
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
+       struct drm_exec exec;
        int r;
 
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
-       csa_tv.bo = &bo->tbo;
-       csa_tv.num_shared = 1;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
-               return r;
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               if (likely(!r))
+                       r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r)) {
+                       DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+                       goto error;
+               }
        }
 
        r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
        if (r) {
                DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
-               ttm_eu_backoff_reservation(&ticket, &list);
-               return r;
+               goto error;
        }
 
        amdgpu_vm_bo_del(adev, bo_va);
 
-       ttm_eu_backoff_reservation(&ticket, &list);
-
-       return 0;
+error:
+       drm_exec_fini(&exec);
+       return r;
 }
index 56e89e76ff179a6eaa83c412e270328c0e143fb1..00ab0b3c82771ca910e832e14aec9f0032c56e8a 100644 (file)
@@ -154,7 +154,7 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
                } else {
                        r = get_user(value, (uint32_t *)buf);
                        if (!r)
-                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
+                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
                }
                if (r) {
                        result = r;
@@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
                } else {
                        r = get_user(value, (uint32_t *)buf);
                        if (!r)
-                               amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
+                               amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id);
                }
                if (r) {
                        result = r;
index a92c6189b4b60ce7c7651a034e2a728a58c1c17c..dc0e5227119b1214f050f4f267067bbf57b69791 100644 (file)
@@ -159,7 +159,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
        return sysfs_emit(buf, "%llu\n", cnt);
 }
 
-static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
+static DEVICE_ATTR(pcie_replay_count, 0444,
                amdgpu_device_get_pcie_replay_count, NULL);
 
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
@@ -183,7 +183,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
        return sysfs_emit(buf, "%s\n", adev->product_name);
 }
 
-static DEVICE_ATTR(product_name, S_IRUGO,
+static DEVICE_ATTR(product_name, 0444,
                amdgpu_device_get_product_name, NULL);
 
 /**
@@ -205,7 +205,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
        return sysfs_emit(buf, "%s\n", adev->product_number);
 }
 
-static DEVICE_ATTR(product_number, S_IRUGO,
+static DEVICE_ATTR(product_number, 0444,
                amdgpu_device_get_product_number, NULL);
 
 /**
@@ -227,7 +227,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
        return sysfs_emit(buf, "%s\n", adev->serial);
 }
 
-static DEVICE_ATTR(serial_number, S_IRUGO,
+static DEVICE_ATTR(serial_number, 0444,
                amdgpu_device_get_serial_number, NULL);
 
 /**
@@ -481,8 +481,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 /*
  * MMIO register read with bytes helper functions
  * @offset:bytes offset from MMIO start
- *
-*/
+ */
 
 /**
  * amdgpu_mm_rreg8 - read a memory mapped IO register
@@ -506,8 +505,8 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
  * MMIO register write with bytes helper functions
  * @offset:bytes offset from MMIO start
  * @value: the value want to be written to the register
- *
-*/
+ */
+
 /**
  * amdgpu_mm_wreg8 - read a memory mapped IO register
  *
@@ -571,7 +570,8 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
  * this function is invoked only for the debugfs register access
  */
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
-                            uint32_t reg, uint32_t v)
+                            uint32_t reg, uint32_t v,
+                            uint32_t xcc_id)
 {
        if (amdgpu_device_skip_hw_access(adev))
                return;
@@ -580,7 +580,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
            adev->gfx.rlc.funcs &&
            adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
-                       return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
+                       return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
        } else if ((reg * 4) >= adev->rmmio_size) {
                adev->pcie_wreg(adev, reg * 4, v);
        } else {
@@ -588,94 +588,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
        }
 }
 
-/**
- * amdgpu_mm_rdoorbell - read a doorbell dword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- *
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (CIK).
- */
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
-{
-       if (amdgpu_device_skip_hw_access(adev))
-               return 0;
-
-       if (index < adev->doorbell.num_kernel_doorbells) {
-               return readl(adev->doorbell.ptr + index);
-       } else {
-               DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
-               return 0;
-       }
-}
-
-/**
- * amdgpu_mm_wdoorbell - write a doorbell dword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- * @v: value to write
- *
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (CIK).
- */
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
-{
-       if (amdgpu_device_skip_hw_access(adev))
-               return;
-
-       if (index < adev->doorbell.num_kernel_doorbells) {
-               writel(v, adev->doorbell.ptr + index);
-       } else {
-               DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
-       }
-}
-
-/**
- * amdgpu_mm_rdoorbell64 - read a doorbell Qword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- *
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (VEGA10+).
- */
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
-{
-       if (amdgpu_device_skip_hw_access(adev))
-               return 0;
-
-       if (index < adev->doorbell.num_kernel_doorbells) {
-               return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
-       } else {
-               DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
-               return 0;
-       }
-}
-
-/**
- * amdgpu_mm_wdoorbell64 - write a doorbell Qword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- * @v: value to write
- *
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (VEGA10+).
- */
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
-{
-       if (amdgpu_device_skip_hw_access(adev))
-               return;
-
-       if (index < adev->doorbell.num_kernel_doorbells) {
-               atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
-       } else {
-               DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
-       }
-}
-
 /**
  * amdgpu_device_indirect_rreg - read an indirect register
  *
@@ -1078,7 +990,7 @@ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
  * @registers: pointer to the register array
  * @array_size: size of the register array
  *
- * Programs an array or registers with and and or masks.
+ * Programs an array or registers with and or masks.
  * This is a helper for setting golden registers.
  */
 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
@@ -1135,83 +1047,6 @@ int amdgpu_device_pci_reset(struct amdgpu_device *adev)
        return pci_reset_function(adev->pdev);
 }
 
-/*
- * GPU doorbell aperture helpers function.
- */
-/**
- * amdgpu_device_doorbell_init - Init doorbell driver information.
- *
- * @adev: amdgpu_device pointer
- *
- * Init doorbell driver information (CIK)
- * Returns 0 on success, error on failure.
- */
-static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
-{
-
-       /* No doorbell on SI hardware generation */
-       if (adev->asic_type < CHIP_BONAIRE) {
-               adev->doorbell.base = 0;
-               adev->doorbell.size = 0;
-               adev->doorbell.num_kernel_doorbells = 0;
-               adev->doorbell.ptr = NULL;
-               return 0;
-       }
-
-       if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
-               return -EINVAL;
-
-       amdgpu_asic_init_doorbell_index(adev);
-
-       /* doorbell bar mapping */
-       adev->doorbell.base = pci_resource_start(adev->pdev, 2);
-       adev->doorbell.size = pci_resource_len(adev->pdev, 2);
-
-       if (adev->enable_mes) {
-               adev->doorbell.num_kernel_doorbells =
-                       adev->doorbell.size / sizeof(u32);
-       } else {
-               adev->doorbell.num_kernel_doorbells =
-                       min_t(u32, adev->doorbell.size / sizeof(u32),
-                             adev->doorbell_index.max_assignment+1);
-               if (adev->doorbell.num_kernel_doorbells == 0)
-                       return -EINVAL;
-
-               /* For Vega, reserve and map two pages on doorbell BAR since SDMA
-                * paging queue doorbell use the second page. The
-                * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
-                * doorbells are in the first page. So with paging queue enabled,
-                * the max num_kernel_doorbells should + 1 page (0x400 in dword)
-                */
-               if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
-                   adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0))
-                       adev->doorbell.num_kernel_doorbells += 0x400;
-       }
-
-       adev->doorbell.ptr = ioremap(adev->doorbell.base,
-                                    adev->doorbell.num_kernel_doorbells *
-                                    sizeof(u32));
-       if (adev->doorbell.ptr == NULL)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/**
- * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down doorbell driver information (CIK)
- */
-static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
-{
-       iounmap(adev->doorbell.ptr);
-       adev->doorbell.ptr = NULL;
-}
-
-
-
 /*
  * amdgpu_device_wb_*()
  * Writeback is the method by which the GPU updates special pages in memory
@@ -1321,10 +1156,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
        int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
        struct pci_bus *root;
        struct resource *res;
-       unsigned i;
+       unsigned int i;
        u16 cmd;
        int r;
 
+       if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+               return 0;
+
        /* Bypass for VF */
        if (amdgpu_sriov_vf(adev))
                return 0;
@@ -1359,7 +1197,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
                              cmd & ~PCI_COMMAND_MEMORY);
 
        /* Free the VRAM and doorbell BAR, we most likely need to move both. */
-       amdgpu_device_doorbell_fini(adev);
+       amdgpu_doorbell_fini(adev);
        if (adev->asic_type >= CHIP_BONAIRE)
                pci_release_resource(adev->pdev, 2);
 
@@ -1376,7 +1214,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
        /* When the doorbell or fb BAR isn't available we have no chance of
         * using the device.
         */
-       r = amdgpu_device_doorbell_init(adev);
+       r = amdgpu_doorbell_init(adev);
        if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
                return -ENODEV;
 
@@ -1387,9 +1225,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
 
 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
 {
-       if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) {
+       if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
                return false;
-       }
 
        return true;
 }
@@ -1425,6 +1262,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
                if (adev->asic_type == CHIP_FIJI) {
                        int err;
                        uint32_t fw_ver;
+
                        err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
                        /* force vPost if error occured */
                        if (err)
@@ -1458,6 +1296,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
        return true;
 }
 
+/*
+ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+ * speed switching. Until we have confirmation from Intel that a specific host
+ * supports it, it's safer that we keep it disabled for all.
+ *
+ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
+ */
+bool amdgpu_device_pcie_dynamic_switching_supported(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+       struct cpuinfo_x86 *c = &cpu_data(0);
+
+       if (c->x86_vendor == X86_VENDOR_INTEL)
+               return false;
+#endif
+       return true;
+}
+
 /**
  * amdgpu_device_should_use_aspm - check if the device should program ASPM
  *
@@ -1508,6 +1365,7 @@ static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
                bool state)
 {
        struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
+
        amdgpu_asic_set_vga_state(adev, state);
        if (state)
                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -1530,7 +1388,8 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
 {
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
-        * page table and the remaining bits are in the page directory */
+        * page table and the remaining bits are in the page directory
+        */
        if (amdgpu_vm_block_size == -1)
                return;
 
@@ -1762,7 +1621,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
 
-       /*
+       /*
        * FIXME: open_count is protected by drm_global_mutex but that would lead to
        * locking inversion with the driver load path. And the access here is
        * completely racy anyway. So don't bother with locking for now.
@@ -3407,7 +3266,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
  *
  * Main resume function for hardware IPs.  The hardware IPs
  * are split into two resume functions because they are
- * are also used in in recovering from a GPU reset and some additional
+ * also used in recovering from a GPU reset and some additional
  * steps need to be take between them.  In this case (S3/S4) they are
  * run sequentially.
  * Returns 0 on success, negative error code on failure.
@@ -3509,8 +3368,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 #else
        default:
                if (amdgpu_dc > 0)
-                       DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
-                                        "but isn't supported by ASIC, ignoring\n");
+                       DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
                return false;
 #endif
        }
@@ -3758,7 +3616,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
 
        /* mutex initialization are all done here so we
-        * can recall function without having locking issues */
+        * can recall function without having locking issues
+        */
        mutex_init(&adev->firmware.mutex);
        mutex_init(&adev->pm.mutex);
        mutex_init(&adev->gfx.gpu_clock_mutex);
@@ -3835,11 +3694,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
 
        adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
-       if (adev->rmmio == NULL) {
+       if (!adev->rmmio)
                return -ENOMEM;
-       }
+
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
-       DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
+       DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
 
        /*
         * Reset domain needs to be present early, before XGMI hive discovered
@@ -3907,7 +3766,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                dev_info(adev->dev, "PCIE atomic ops is not supported\n");
 
        /* doorbell bar mapping and doorbell index init*/
-       amdgpu_device_doorbell_init(adev);
+       amdgpu_doorbell_init(adev);
 
        if (amdgpu_emu_mode == 1) {
                /* post the asic on emulation mode */
@@ -4050,14 +3909,6 @@ fence_driver_init:
        } else
                adev->ucode_sysfs_en = true;
 
-       r = amdgpu_psp_sysfs_init(adev);
-       if (r) {
-               adev->psp_sysfs_en = false;
-               if (!amdgpu_sriov_vf(adev))
-                       DRM_ERROR("Creating psp sysfs failed\n");
-       } else
-               adev->psp_sysfs_en = true;
-
        /*
         * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
         * Otherwise the mgpu fan boost feature will be skipped due to the
@@ -4101,7 +3952,8 @@ fence_driver_init:
 
        /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
        /* this will fail for cards that aren't VGA class devices, just
-        * ignore it */
+        * ignore it
+        */
        if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
                vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
 
@@ -4153,7 +4005,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
        unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
 
        /* Unmap all mapped bars - Doorbell, registers and VRAM */
-       amdgpu_device_doorbell_fini(adev);
+       amdgpu_doorbell_fini(adev);
 
        iounmap(adev->rmmio);
        adev->rmmio = NULL;
@@ -4184,7 +4036,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
 
        /* make sure IB test finished before entering exclusive mode
         * to avoid preemption on IB test
-        * */
+        */
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_request_full_gpu(adev, false);
                amdgpu_virt_fini_data_exchange(adev);
@@ -4207,8 +4059,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
                amdgpu_pm_sysfs_fini(adev);
        if (adev->ucode_sysfs_en)
                amdgpu_ucode_sysfs_fini(adev);
-       if (adev->psp_sysfs_en)
-               amdgpu_psp_sysfs_fini(adev);
        sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
 
        /* disable ras feature must before hw fini */
@@ -4267,7 +4117,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
 
                iounmap(adev->rmmio);
                adev->rmmio = NULL;
-               amdgpu_device_doorbell_fini(adev);
+               amdgpu_doorbell_fini(adev);
                drm_dev_exit(idx);
        }
 
@@ -4727,6 +4577,9 @@ retry:
        if (r)
                return r;
 
+       /* some sw clean up VF needs to do before recover */
+       amdgpu_virt_post_reset(adev);
+
        /* Resume IP prior to SMC */
        r = amdgpu_device_ip_reinit_early_sriov(adev);
        if (r)
@@ -4920,8 +4773,9 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                if (!ring || !ring->sched.thread)
                        continue;
 
-               /*clear job fence from fence drv to avoid force_completion
-                *leave NULL and vm flush fence in fence drv */
+               /* Clear job fence from fence drv to avoid force_completion
+                * leave NULL and vm flush fence in fence drv
+                */
                amdgpu_fence_driver_clear_job_fences(ring);
 
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
@@ -4935,7 +4789,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
        r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
        /* If reset handler not implemented, continue; otherwise return */
-       if (r == -ENOSYS)
+       if (r == -EOPNOTSUPP)
                r = 0;
        else
                return r;
@@ -5053,7 +4907,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
        reset_context->reset_device_list = device_list_handle;
        r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
        /* If reset handler not implemented, continue; otherwise return */
-       if (r == -ENOSYS)
+       if (r == -EOPNOTSUPP)
                r = 0;
        else
                return r;
@@ -5542,9 +5396,8 @@ skip_hw_reset:
                if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
                        amdgpu_mes_self_test(tmp_adev);
 
-               if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
-               }
 
                if (tmp_adev->asic_reset_res)
                        r = tmp_adev->asic_reset_res;
index 8e1cfc87122d675c46c06593f30cfc2c60901ca1..c21140da9d9e5b7b701dc5dbbcdc7478093a783b 100644 (file)
@@ -1750,6 +1750,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(13, 0, 8):
        case IP_VERSION(13, 0, 10):
        case IP_VERSION(13, 0, 11):
+       case IP_VERSION(14, 0, 0):
                amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
                break;
        case IP_VERSION(13, 0, 4):
index b702f499f5fb3585f8db55ec91c7e0c14f0a4215..d20dd3f852fc3e957f1e989a0a0f62d47baf7136 100644 (file)
@@ -124,7 +124,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
 
        struct drm_crtc *crtc = &amdgpu_crtc->base;
        unsigned long flags;
-       unsigned i;
+       unsigned int i;
        int vpos, hpos;
 
        for (i = 0; i < work->shared_count; ++i)
@@ -201,7 +201,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
        u64 tiling_flags;
        int i, r;
 
-       work = kzalloc(sizeof *work, GFP_KERNEL);
+       work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL)
                return -ENOMEM;
 
@@ -332,13 +332,15 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
 
        adev = drm_to_adev(dev);
        /* if we have active crtcs and we don't have a power ref,
-          take the current one */
+        * take the current one
+        */
        if (active && !adev->have_disp_power_ref) {
                adev->have_disp_power_ref = true;
                return ret;
        }
        /* if we have no active crtcs, then drop the power ref
-          we got before */
+        * we got before
+        */
        if (!active && adev->have_disp_power_ref) {
                pm_runtime_put_autosuspend(dev->dev);
                adev->have_disp_power_ref = false;
@@ -507,11 +509,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
        if (amdgpu_connector->router.ddc_valid)
                amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
 
-       if (use_aux) {
+       if (use_aux)
                ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
-       } else {
+       else
                ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
-       }
 
        if (ret != 2)
                /* Couldn't find an accessible DDC on this connector */
@@ -520,10 +521,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
         * EDID header starts with:
         * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
         * Only the first 6 bytes must be valid as
-        * drm_edid_block_valid() can fix the last 2 bytes */
+        * drm_edid_block_valid() can fix the last 2 bytes
+        */
        if (drm_edid_header_is_valid(buf) < 6) {
                /* Couldn't find an accessible EDID on this
-                * connector */
+                * connector
+                */
                return false;
        }
        return true;
@@ -1216,8 +1219,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
 
        obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
        if (obj ==  NULL) {
-               drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
-                           "can't create framebuffer\n", mode_cmd->handles[0]);
+               drm_dbg_kms(dev,
+                           "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
+                           mode_cmd->handles[0]);
+
                return ERR_PTR(-ENOENT);
        }
 
@@ -1410,6 +1415,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
        }
        if (amdgpu_crtc->rmx_type != RMX_OFF) {
                fixed20_12 a, b;
+
                a.full = dfixed_const(src_v);
                b.full = dfixed_const(dst_v);
                amdgpu_crtc->vsc.full = dfixed_div(a, b);
@@ -1429,7 +1435,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  *
  * \param dev Device to query.
  * \param pipe Crtc to query.
- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  *              For driver internal use only also supports these flags:
  *
  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
@@ -1504,8 +1510,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
 
        /* Called from driver internal vblank counter query code? */
        if (flags & GET_DISTANCE_TO_VBLANKSTART) {
-           /* Caller wants distance from real vbl_start in *hpos */
-           *hpos = *vpos - vbl_start;
+               /* Caller wants distance from real vbl_start in *hpos */
+               *hpos = *vpos - vbl_start;
        }
 
        /* Fudge vblank to start a few scanlines earlier to handle the
@@ -1527,7 +1533,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
 
        /* In vblank? */
        if (in_vbl)
-           ret |= DRM_SCANOUTPOS_IN_VBLANK;
+               ret |= DRM_SCANOUTPOS_IN_VBLANK;
 
        /* Called from driver internal vblank counter query code? */
        if (flags & GET_DISTANCE_TO_VBLANKSTART) {
@@ -1635,6 +1641,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
 
                if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
                        struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
                        r = amdgpu_bo_reserve(aobj, true);
                        if (r == 0) {
                                amdgpu_bo_unpin(aobj);
@@ -1642,9 +1649,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
                        }
                }
 
-               if (fb == NULL || fb->obj[0] == NULL) {
+               if (!fb || !fb->obj[0])
                        continue;
-               }
+
                robj = gem_to_amdgpu_bo(fb->obj[0]);
                if (!amdgpu_display_robj_is_fb(adev, robj)) {
                        r = amdgpu_bo_reserve(robj, true);
@@ -1671,6 +1678,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
 
                if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
                        struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
                        r = amdgpu_bo_reserve(aobj, true);
                        if (r == 0) {
                                r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
index f637574644c0ee16955365dc59cb4f6d4c82264f..0e593cfeb57095a0fcf602b05dda2a8a79cf69ed 100644 (file)
@@ -90,8 +90,7 @@ struct amdgpu_doorbell_index {
        uint32_t xcc_doorbell_range;
 };
 
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
+enum AMDGPU_DOORBELL_ASSIGNMENT {
        AMDGPU_DOORBELL_KIQ                     = 0x000,
        AMDGPU_DOORBELL_HIQ                     = 0x001,
        AMDGPU_DOORBELL_DIQ                     = 0x002,
@@ -109,10 +108,10 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
        AMDGPU_DOORBELL_IH                      = 0x1E8,
        AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
        AMDGPU_DOORBELL_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
+};
+
+enum AMDGPU_VEGA20_DOORBELL_ASSIGNMENT {
 
-typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
-{
        /* Compute + GFX: 0~255 */
        AMDGPU_VEGA20_DOORBELL_KIQ                     = 0x000,
        AMDGPU_VEGA20_DOORBELL_HIQ                     = 0x001,
@@ -176,10 +175,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
 
        AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT            = 0x1F7,
        AMDGPU_VEGA20_DOORBELL_INVALID                   = 0xFFFF
-} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+};
+
+enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT {
 
-typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT
-{
        /* Compute + GFX: 0~255 */
        AMDGPU_NAVI10_DOORBELL_KIQ                      = 0x000,
        AMDGPU_NAVI10_DOORBELL_HIQ                      = 0x001,
@@ -227,13 +226,12 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT
 
        AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT           = 0x18F,
        AMDGPU_NAVI10_DOORBELL_INVALID                  = 0xFFFF
-} AMDGPU_NAVI10_DOORBELL_ASSIGNMENT;
+};
 
 /*
  * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
  */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
+enum AMDGPU_DOORBELL64_ASSIGNMENT {
        /*
         * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
         * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
@@ -309,9 +307,10 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
 
        AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
        AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
+};
+
+enum AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 {
 
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 {
        /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */
 
        /* KIQ/HIQ/DIQ */
@@ -339,13 +338,19 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 {
 
        AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT          = 0x1D4,
        AMDGPU_DOORBELL_LAYOUT1_INVALID                 = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1;
+};
 
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
 
+/*
+ * GPU doorbell aperture helpers function.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev);
+void amdgpu_doorbell_fini(struct amdgpu_device *adev);
+
 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
 #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
new file mode 100644 (file)
index 0000000..31db526
--- /dev/null
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+
+/**
+ * amdgpu_mm_rdoorbell - read a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
+{
+       if (amdgpu_device_skip_hw_access(adev))
+               return 0;
+
+       if (index < adev->doorbell.num_kernel_doorbells)
+               return readl(adev->doorbell.ptr + index);
+
+       DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+       return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell - write a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
+{
+       if (amdgpu_device_skip_hw_access(adev))
+               return;
+
+       if (index < adev->doorbell.num_kernel_doorbells)
+               writel(v, adev->doorbell.ptr + index);
+       else
+               DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/**
+ * amdgpu_mm_rdoorbell64 - read a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
+{
+       if (amdgpu_device_skip_hw_access(adev))
+               return 0;
+
+       if (index < adev->doorbell.num_kernel_doorbells)
+               return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
+
+       DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+       return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell64 - write a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
+{
+       if (amdgpu_device_skip_hw_access(adev))
+               return;
+
+       if (index < adev->doorbell.num_kernel_doorbells)
+               atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
+       else
+               DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+/**
+ * amdgpu_doorbell_init - Init doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev)
+{
+
+       /* No doorbell on SI hardware generation */
+       if (adev->asic_type < CHIP_BONAIRE) {
+               adev->doorbell.base = 0;
+               adev->doorbell.size = 0;
+               adev->doorbell.num_kernel_doorbells = 0;
+               adev->doorbell.ptr = NULL;
+               return 0;
+       }
+
+       if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
+               return -EINVAL;
+
+       amdgpu_asic_init_doorbell_index(adev);
+
+       /* doorbell bar mapping */
+       adev->doorbell.base = pci_resource_start(adev->pdev, 2);
+       adev->doorbell.size = pci_resource_len(adev->pdev, 2);
+
+       if (adev->enable_mes) {
+               adev->doorbell.num_kernel_doorbells =
+                       adev->doorbell.size / sizeof(u32);
+       } else {
+               adev->doorbell.num_kernel_doorbells =
+                       min_t(u32, adev->doorbell.size / sizeof(u32),
+                             adev->doorbell_index.max_assignment+1);
+               if (adev->doorbell.num_kernel_doorbells == 0)
+                       return -EINVAL;
+
+               /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+                * paging queue doorbell use the second page. The
+                * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+                * doorbells are in the first page. So with paging queue enabled,
+                * the max num_kernel_doorbells should + 1 page (0x400 in dword)
+                */
+               if (adev->asic_type >= CHIP_VEGA10)
+                       adev->doorbell.num_kernel_doorbells += 0x400;
+       }
+
+       adev->doorbell.ptr = ioremap(adev->doorbell.base,
+                                    adev->doorbell.num_kernel_doorbells *
+                                    sizeof(u32));
+       if (adev->doorbell.ptr == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+{
+       iounmap(adev->doorbell.ptr);
+       adev->doorbell.ptr = NULL;
+}
index 0593ef8fe0a63e270cf1117eb0264f06ed6b658c..d6439d56dcd59d551aa7758b1db358ccfa412d5a 100644 (file)
@@ -313,9 +313,7 @@ module_param_named(msi, amdgpu_msi, int, 0444);
  * jobs is 10000. The timeout for compute is 60000.
  */
 MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
-               "for passthrough or sriov, 10000 for all jobs."
-               " 0: keep default value. negative: infinity timeout), "
-               "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+               "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
                "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
 module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
 
@@ -584,7 +582,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
  */
 #ifdef CONFIG_DRM_AMDGPU_SI
 
-#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
 int amdgpu_si_support = 0;
 MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
 #else
@@ -603,7 +601,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444);
  */
 #ifdef CONFIG_DRM_AMDGPU_CIK
 
-#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
 int amdgpu_cik_support = 0;
 MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
 #else
@@ -620,8 +618,7 @@ module_param_named(cik_support, amdgpu_cik_support, int, 0444);
  * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
  */
 MODULE_PARM_DESC(smu_memory_pool_size,
-       "reserve gtt for smu debug usage, 0 = disable,"
-               "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+       "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
 module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
 
 /**
@@ -791,9 +788,9 @@ module_param(hws_gws_support, bool, 0444);
 MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
 
 /**
 * DOC: queue_preemption_timeout_ms (int)
 * queue preemption timeout in ms (1 = Minimum, 9000 = default)
 */
+ * DOC: queue_preemption_timeout_ms (int)
+ * queue preemption timeout in ms (1 = Minimum, 9000 = default)
+ */
 int queue_preemption_timeout_ms = 9000;
 module_param(queue_preemption_timeout_ms, int, 0644);
 MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
@@ -2417,7 +2414,6 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
                        amdgpu_amdkfd_device_init(adev);
                amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
-       return;
 }
 
 static int amdgpu_pmops_prepare(struct device *dev)
@@ -2614,6 +2610,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        /* wait for all rings to drain before suspending */
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
+
                if (ring && ring->sched.ready) {
                        ret = amdgpu_fence_wait_empty(ring);
                        if (ret)
@@ -2738,6 +2735,7 @@ long amdgpu_drm_ioctl(struct file *filp,
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
        long ret;
+
        dev = file_priv->minor->dev;
        ret = pm_runtime_get_sync(dev->dev);
        if (ret < 0)
@@ -2802,9 +2800,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
        if (!filp)
                return -EINVAL;
 
-       if (filp->f_op != &amdgpu_driver_kms_fops) {
+       if (filp->f_op != &amdgpu_driver_kms_fops)
                return -EINVAL;
-       }
 
        file = filp->private_data;
        *fpriv = file->driver_priv;
@@ -2850,10 +2847,7 @@ static const struct drm_driver amdgpu_kms_driver = {
        .show_fdinfo = amdgpu_show_fdinfo,
 #endif
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = amdgpu_gem_prime_import,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -2877,10 +2871,7 @@ const struct drm_driver amdgpu_partition_driver = {
        .fops = &amdgpu_driver_kms_fops,
        .release = &amdgpu_driver_release_kms,
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = amdgpu_gem_prime_import,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -2897,16 +2888,13 @@ static struct pci_error_handlers amdgpu_pci_err_handler = {
        .resume         = amdgpu_pci_resume,
 };
 
-extern const struct attribute_group amdgpu_vram_mgr_attr_group;
-extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
-
 static const struct attribute_group *amdgpu_sysfs_groups[] = {
        &amdgpu_vram_mgr_attr_group,
        &amdgpu_gtt_mgr_attr_group,
+       &amdgpu_flash_attr_group,
        NULL,
 };
 
-
 static struct pci_driver amdgpu_kms_pci_driver = {
        .name = DRIVER_NAME,
        .id_table = pciidlist,
index 7d2a908438e9242fe0f73b941f5932e5af743bba..e71768661ca8d25808ac3d5a2e6bf59bc8b8d98b 100644 (file)
@@ -183,6 +183,8 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
 {
        const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
        u16 limit;
+       u16 ps; /* Partial size */
+       int res = 0, r;
 
        if (!quirks)
                limit = 0;
@@ -200,28 +202,25 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
                                    eeprom_addr, buf_size,
                                    read ? "read" : "write", EEPROM_OFFSET_SIZE);
                return -EINVAL;
-       } else {
-               u16 ps; /* Partial size */
-               int res = 0, r;
-
-               /* The "limit" includes all data bytes sent/received,
-                * which would include the EEPROM_OFFSET_SIZE bytes.
-                * Account for them here.
-                */
-               limit -= EEPROM_OFFSET_SIZE;
-               for ( ; buf_size > 0;
-                     buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
-                       ps = min(limit, buf_size);
-
-                       r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
-                                                eeprom_buf, ps, read);
-                       if (r < 0)
-                               return r;
-                       res += r;
-               }
+       }
 
-               return res;
+       /* The "limit" includes all data bytes sent/received,
+        * which would include the EEPROM_OFFSET_SIZE bytes.
+        * Account for them here.
+        */
+       limit -= EEPROM_OFFSET_SIZE;
+       for ( ; buf_size > 0;
+             buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+               ps = min(limit, buf_size);
+
+               r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
+                                        eeprom_buf, ps, read);
+               if (r < 0)
+                       return r;
+               res += r;
        }
+
+       return res;
 }
 
 int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
index 13d7413d4ca3cdc6cb8fcc5c0f402172604c59ee..6038b5021b27be69678c2c5d581d2fa6b913d9c4 100644 (file)
@@ -89,7 +89,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
        drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
        drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name);
        drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
-       drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
+       drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context);
        drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
        drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
        drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
@@ -109,7 +109,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
                if (!usage[hw_ip])
                        continue;
 
-               drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
+               drm_printf(p, "drm-engine-%s:\t%lld ns\n", amdgpu_ip_name[hw_ip],
                           ktime_to_ns(usage[hw_ip]));
        }
 }
index 4620c4712ce32af3e1fee6e7838700255c108d8e..8c3ee042556a46e89fb4731f2aa910bca50bb06e 100644 (file)
@@ -60,10 +60,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
        switch (adev->asic_type) {
        case CHIP_VEGA20:
                /* D161 and D163 are the VG20 server SKUs */
-               if (strnstr(atom_ctx->vbios_version, "D161",
-                           sizeof(atom_ctx->vbios_version)) ||
-                   strnstr(atom_ctx->vbios_version, "D163",
-                           sizeof(atom_ctx->vbios_version))) {
+               if (strnstr(atom_ctx->vbios_pn, "D161",
+                           sizeof(atom_ctx->vbios_pn)) ||
+                   strnstr(atom_ctx->vbios_pn, "D163",
+                           sizeof(atom_ctx->vbios_pn))) {
                        if (fru_addr)
                                *fru_addr = FRU_EEPROM_MADDR_6;
                        return true;
@@ -72,22 +72,23 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
                }
        case CHIP_ALDEBARAN:
                /* All Aldebaran SKUs have an FRU */
-               if (!strnstr(atom_ctx->vbios_version, "D673",
-                            sizeof(atom_ctx->vbios_version)))
+               if (!strnstr(atom_ctx->vbios_pn, "D673",
+                            sizeof(atom_ctx->vbios_pn)))
                        if (fru_addr)
                                *fru_addr = FRU_EEPROM_MADDR_6;
                return true;
        case CHIP_SIENNA_CICHLID:
-               if (strnstr(atom_ctx->vbios_version, "D603",
-                           sizeof(atom_ctx->vbios_version))) {
-                       if (strnstr(atom_ctx->vbios_version, "D603GLXE",
-                                   sizeof(atom_ctx->vbios_version))) {
+               if (strnstr(atom_ctx->vbios_pn, "D603",
+                           sizeof(atom_ctx->vbios_pn))) {
+                       if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
+                                   sizeof(atom_ctx->vbios_pn))) {
                                return false;
-                       } else {
-                               if (fru_addr)
-                                       *fru_addr = FRU_EEPROM_MADDR_6;
-                               return true;
                        }
+
+                       if (fru_addr)
+                               *fru_addr = FRU_EEPROM_MADDR_6;
+                       return true;
+
                } else {
                        return false;
                }
index 2ca3c329de6ddc0e363a175bc2ebf8df3f0eab41..2d4b67175b55bec2d187cb5263240046b9a7f118 100644 (file)
 #include "soc15_common.h"
 
 #define FW_ATTESTATION_DB_COOKIE        0x143b6a37
-#define FW_ATTESTATION_RECORD_VALID    1
+#define FW_ATTESTATION_RECORD_VALID    1
 #define FW_ATTESTATION_MAX_SIZE                4096
 
-typedef struct FW_ATT_DB_HEADER
-{
+struct FW_ATT_DB_HEADER {
        uint32_t AttDbVersion;           /* version of the fwar feature */
        uint32_t AttDbCookie;            /* cookie as an extra check for corrupt data */
-} FW_ATT_DB_HEADER;
+};
 
-typedef struct FW_ATT_RECORD
-{
+struct FW_ATT_RECORD {
        uint16_t AttFwIdV1;              /* Legacy FW Type field */
        uint16_t AttFwIdV2;              /* V2 FW ID field */
        uint32_t AttFWVersion;           /* FW Version */
@@ -50,7 +48,7 @@ typedef struct FW_ATT_RECORD
        uint8_t  AttSource;              /* FW source indicator */
        uint8_t  RecordValid;            /* Indicates whether the record is a valid entry */
        uint32_t AttFwTaId;              /* Ta ID (only in TA Attestation Table) */
-} FW_ATT_RECORD;
+};
 
 static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
                                                  char __user *buf,
@@ -60,15 +58,15 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
        struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
        uint64_t records_addr = 0;
        uint64_t vram_pos = 0;
-       FW_ATT_DB_HEADER fw_att_hdr = {0};
-       FW_ATT_RECORD fw_att_record = {0};
+       struct FW_ATT_DB_HEADER fw_att_hdr = {0};
+       struct FW_ATT_RECORD fw_att_record = {0};
 
-       if (size < sizeof(FW_ATT_RECORD)) {
+       if (size < sizeof(struct FW_ATT_RECORD)) {
                DRM_WARN("FW attestation input buffer not enough memory");
                return -EINVAL;
        }
 
-       if ((*pos + sizeof(FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
+       if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
                DRM_WARN("FW attestation out of bounds");
                return 0;
        }
@@ -83,8 +81,8 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
        if (*pos == 0) {
                amdgpu_device_vram_access(adev,
                                          vram_pos,
-                                         (uint32_t*)&fw_att_hdr,
-                                         sizeof(FW_ATT_DB_HEADER),
+                                         (uint32_t *)&fw_att_hdr,
+                                         sizeof(struct FW_ATT_DB_HEADER),
                                          false);
 
                if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) {
@@ -96,20 +94,20 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
        }
 
        amdgpu_device_vram_access(adev,
-                                 vram_pos + sizeof(FW_ATT_DB_HEADER) + *pos,
-                                 (uint32_t*)&fw_att_record,
-                                 sizeof(FW_ATT_RECORD),
+                                 vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos,
+                                 (uint32_t *)&fw_att_record,
+                                 sizeof(struct FW_ATT_RECORD),
                                  false);
 
        if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID)
                return 0;
 
-       if (copy_to_user(buf, (void*)&fw_att_record, sizeof(FW_ATT_RECORD)))
+       if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD)))
                return -EINVAL;
 
-       *pos += sizeof(FW_ATT_RECORD);
+       *pos += sizeof(struct FW_ATT_RECORD);
 
-       return sizeof(FW_ATT_RECORD);
+       return sizeof(struct FW_ATT_RECORD);
 }
 
 static const struct file_operations amdgpu_fw_attestation_debugfs_ops = {
@@ -136,7 +134,7 @@ void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev)
                return;
 
        debugfs_create_file("amdgpu_fw_attestation",
-                           S_IRUSR,
+                           0400,
                            adev_to_drm(adev)->primary->debugfs_root,
                            adev,
                            &amdgpu_fw_attestation_debugfs_ops);
index 74055cba3dc9aaf38ed74377c7ecf3ce5476749e..693b1fd1191a56c4a4c4f741b4e392f4c49d4484 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
 #include <drm/drm_gem_ttm_helper.h>
 #include <drm/ttm/ttm_tt.h>
 
@@ -181,11 +182,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
                return r;
 
        bo_va = amdgpu_vm_bo_find(vm, abo);
-       if (!bo_va) {
+       if (!bo_va)
                bo_va = amdgpu_vm_bo_add(adev, vm, abo);
-       } else {
+       else
                ++bo_va->ref_count;
-       }
        amdgpu_bo_unreserve(abo);
        return 0;
 }
@@ -198,29 +198,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
 
-       struct amdgpu_bo_list_entry vm_pd;
-       struct list_head list, duplicates;
        struct dma_fence *fence = NULL;
-       struct ttm_validate_buffer tv;
-       struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
+       struct drm_exec exec;
        long r;
 
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&duplicates);
-
-       tv.bo = &bo->tbo;
-       tv.num_shared = 2;
-       list_add(&tv.head, &list);
-
-       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
-       if (r) {
-               dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%ld)\n", r);
-               return;
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_until_all_locked(&exec) {
+               r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
+
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
        }
+
        bo_va = amdgpu_vm_bo_find(vm, bo);
        if (!bo_va || --bo_va->ref_count)
                goto out_unlock;
@@ -230,6 +225,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
                goto out_unlock;
 
        r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       if (unlikely(r < 0))
+               dev_err(adev->dev, "failed to clear page "
+                       "tables on GEM object close (%ld)\n", r);
        if (r || !fence)
                goto out_unlock;
 
@@ -237,10 +235,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
        dma_fence_put(fence);
 
 out_unlock:
-       if (unlikely(r < 0))
-               dev_err(adev->dev, "failed to clear page "
-                       "tables on GEM object close (%ld)\n", r);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       if (r)
+               dev_err(adev->dev, "leaking bo va (%ld)\n", r);
+       drm_exec_fini(&exec);
 }
 
 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
@@ -463,9 +460,9 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
        struct amdgpu_bo *robj;
 
        gobj = drm_gem_object_lookup(filp, handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
        if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
@@ -482,6 +479,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 {
        union drm_amdgpu_gem_mmap *args = data;
        uint32_t handle = args->in.handle;
+
        memset(args, 0, sizeof(*args));
        return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 }
@@ -508,7 +506,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 
        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
        /*  clamp timeout to avoid unsigned-> signed overflow */
-       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
                return MAX_SCHEDULE_TIMEOUT - 1;
 
        return timeout_jiffies;
@@ -526,9 +524,9 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        long ret;
 
        gobj = drm_gem_object_lookup(filp, handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
        ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
                                    true, timeout);
@@ -555,7 +553,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_bo *robj;
        int r = -1;
 
-       DRM_DEBUG("%d \n", args->handle);
+       DRM_DEBUG("%d\n", args->handle);
        gobj = drm_gem_object_lookup(filp, args->handle);
        if (gobj == NULL)
                return -ENOENT;
@@ -675,17 +673,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct amdgpu_bo *abo;
        struct amdgpu_bo_va *bo_va;
-       struct amdgpu_bo_list_entry vm_pd;
-       struct ttm_validate_buffer tv;
-       struct ww_acquire_ctx ticket;
-       struct list_head list, duplicates;
+       struct drm_exec exec;
        uint64_t va_flags;
        uint64_t vm_size;
        int r = 0;
 
        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
                dev_dbg(dev->dev,
-                       "va_address 0x%LX is in reserved area 0x%LX\n",
+                       "va_address 0x%llx is in reserved area 0x%llx\n",
                        args->va_address, AMDGPU_VA_RESERVED_SIZE);
                return -EINVAL;
        }
@@ -693,7 +688,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        if (args->va_address >= AMDGPU_GMC_HOLE_START &&
            args->va_address < AMDGPU_GMC_HOLE_END) {
                dev_dbg(dev->dev,
-                       "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+                       "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
                        args->va_address, AMDGPU_GMC_HOLE_START,
                        AMDGPU_GMC_HOLE_END);
                return -EINVAL;
@@ -728,36 +723,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&duplicates);
        if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
            !(args->flags & AMDGPU_VM_PAGE_PRT)) {
                gobj = drm_gem_object_lookup(filp, args->handle);
                if (gobj == NULL)
                        return -ENOENT;
                abo = gem_to_amdgpu_bo(gobj);
-               tv.bo = &abo->tbo;
-               if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
-                       tv.num_shared = 1;
-               else
-                       tv.num_shared = 0;
-               list_add(&tv.head, &list);
        } else {
                gobj = NULL;
                abo = NULL;
        }
 
-       amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+                     DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_until_all_locked(&exec) {
+               if (gobj) {
+                       r = drm_exec_lock_obj(&exec, gobj);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(r))
+                               goto error;
+               }
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
-       if (r)
-               goto error_unref;
+               r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error;
+       }
 
        if (abo) {
                bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
                if (!bo_va) {
                        r = -ENOENT;
-                       goto error_backoff;
+                       goto error;
                }
        } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
                bo_va = fpriv->prt_va;
@@ -794,10 +791,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
                                        args->operation);
 
-error_backoff:
-       ttm_eu_backoff_reservation(&ticket, &list);
-
-error_unref:
+error:
+       drm_exec_fini(&exec);
        drm_gem_object_put(gobj);
        return r;
 }
@@ -813,9 +808,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
        int r;
 
        gobj = drm_gem_object_lookup(filp, args->handle);
-       if (gobj == NULL) {
+       if (!gobj)
                return -ENOENT;
-       }
+
        robj = gem_to_amdgpu_bo(gobj);
 
        r = amdgpu_bo_reserve(robj, false);
@@ -941,9 +936,9 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
        r = drm_gem_handle_create(file_priv, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_put(gobj);
-       if (r) {
+       if (r)
                return r;
-       }
+
        args->handle = handle;
        return 0;
 }
index a33d4bc34cee746cdee7e70103190fee0aa9f2cd..c76b6bfc4dabeaed2b7d9acd2c8b00ce716ac2b5 100644 (file)
@@ -110,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
  * The bitmask of CUs to be disabled in the shader array determined by se and
  * sh is stored in mask[se * max_sh + sh].
  */
-void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
 {
-       unsigned se, sh, cu;
+       unsigned int se, sh, cu;
        const char *p;
 
        memset(mask, 0, sizeof(*mask) * max_se * max_sh);
@@ -124,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
        for (;;) {
                char *next;
                int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+
                if (ret < 3) {
                        DRM_ERROR("amdgpu: could not parse disable_cu\n");
                        return;
@@ -349,7 +350,7 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
 }
 
 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
-                       unsigned hpd_size, int xcc_id)
+                       unsigned int hpd_size, int xcc_id)
 {
        int r;
        u32 *hpd;
@@ -376,7 +377,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
 
 /* create MQD for each compute/gfx queue */
 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
-                          unsigned mqd_size, int xcc_id)
+                          unsigned int mqd_size, int xcc_id)
 {
        int r, i, j;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
@@ -407,8 +408,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
 
                /* prepare MQD backup */
                kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
-               if (!kiq->mqd_backup)
-                               dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+               if (!kiq->mqd_backup) {
+                       dev_warn(adev->dev,
+                                "no memory to create MQD backup for ring %s\n", ring->name);
+                       return -ENOMEM;
+               }
        }
 
        if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
@@ -427,8 +431,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
                                ring->mqd_size = mqd_size;
                                /* prepare MQD backup */
                                adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
-                               if (!adev->gfx.me.mqd_backup[i])
+                               if (!adev->gfx.me.mqd_backup[i]) {
                                        dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+                                       return -ENOMEM;
+                               }
                        }
                }
        }
@@ -449,8 +455,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
                        ring->mqd_size = mqd_size;
                        /* prepare MQD backup */
                        adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
-                       if (!adev->gfx.mec.mqd_backup[j])
+                       if (!adev->gfx.mec.mqd_backup[j]) {
                                dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+                               return -ENOMEM;
+                       }
                }
        }
 
@@ -1281,11 +1289,11 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
        return sysfs_emit(buf, "%s\n", supported_partition);
 }
 
-static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(current_compute_partition, 0644,
                   amdgpu_gfx_get_current_compute_partition,
                   amdgpu_gfx_set_compute_partition);
 
-static DEVICE_ATTR(available_compute_partition, S_IRUGO,
+static DEVICE_ATTR(available_compute_partition, 0444,
                   amdgpu_gfx_get_available_compute_partition, NULL);
 
 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
index 56d73fade56850b5c0d9b45b450ef8bdf466332c..fdc25cd559b60cd0603f67bf6ece451030fb20ef 100644 (file)
@@ -331,6 +331,8 @@ struct amdgpu_gmc {
        u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
        u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
        u64 MC_VM_MX_L1_TLB_CNTL;
+
+       u64 noretry_flags;
 };
 
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
index ebeddc9a37e9ba5070df80f09fdc6311095e6e6b..6aa3b1d845abe1e3a6efe32990ea0bd8763d5a41 100644 (file)
@@ -62,7 +62,7 @@
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, enum amdgpu_ib_pool_type pool_type,
+                 unsigned int size, enum amdgpu_ib_pool_type pool_type,
                  struct amdgpu_ib *ib)
 {
        int r;
@@ -123,7 +123,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
  * to SI there was just a DE IB.
  */
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
                       struct amdgpu_ib *ibs, struct amdgpu_job *job,
                       struct dma_fence **f)
 {
@@ -131,16 +131,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        struct amdgpu_ib *ib = &ibs[0];
        struct dma_fence *tmp = NULL;
        bool need_ctx_switch;
-       unsigned patch_offset = ~0;
+       unsigned int patch_offset = ~0;
        struct amdgpu_vm *vm;
        uint64_t fence_ctx;
        uint32_t status = 0, alloc_size;
-       unsigned fence_flags = 0;
+       unsigned int fence_flags = 0;
        bool secure, init_shadow;
        u64 shadow_va, csa_va, gds_va;
        int vmid = AMDGPU_JOB_GET_VMID(job);
 
-       unsigned i;
+       unsigned int i;
        int r = 0;
        bool need_pipe_sync = false;
 
@@ -282,7 +282,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
 
                if (ring->funcs->init_cond_exec) {
-                       unsigned ce_offset = ~0;
+                       unsigned int ce_offset = ~0;
 
                        ce_offset = amdgpu_ring_init_cond_exec(ring);
                        if (ce_offset != ~0 && ring->funcs->patch_cond_exec)
@@ -385,7 +385,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
        long tmo_gfx, tmo_mm;
        int r, ret = 0;
-       unsigned i;
+       unsigned int i;
 
        tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
        if (amdgpu_sriov_vf(adev)) {
@@ -402,7 +402,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                /* for CP & SDMA engines since they are scheduled together so
                 * need to make the timeout width enough to cover the time
                 * cost waiting for it coming back under RUNTIME only
-               */
+                */
                tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
        } else if (adev->gmc.xgmi.hive_id) {
                tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
@@ -465,13 +465,13 @@ static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
 {
        struct amdgpu_device *adev = m->private;
 
-       seq_printf(m, "--------------------- DELAYED --------------------- \n");
+       seq_puts(m, "--------------------- DELAYED ---------------------\n");
        amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
                                     m);
-       seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+       seq_puts(m, "-------------------- IMMEDIATE --------------------\n");
        amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
                                     m);
-       seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+       seq_puts(m, "--------------------- DIRECT ----------------------\n");
        amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
 
        return 0;
index fceb3b384955ac023765d0616126b9c0e73a5f34..f3b0aaf3ebc69e7f90f8cf0c3f0e5d3417991669 100644 (file)
@@ -138,6 +138,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
 /**
  * amdgpu_ih_ring_write - write IV to the ring buffer
  *
+ * @adev: amdgpu_device pointer
  * @ih: ih ring to write to
  * @iv: the iv to write
  * @num_dw: size of the iv in dw
@@ -145,8 +146,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
  * Writes an IV to the ring buffer using the CPU and increment the wptr.
  * Used for testing and delegating IVs to a software ring.
  */
-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
-                         unsigned int num_dw)
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+                         const uint32_t *iv, unsigned int num_dw)
 {
        uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2;
        unsigned int i;
@@ -161,6 +162,9 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
        if (wptr != READ_ONCE(ih->rptr)) {
                wmb();
                WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr));
+       } else if (adev->irq.retry_cam_enabled) {
+               dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n",
+                             wptr, ih->rptr);
        }
 }
 
index dd1c2eded6b9d2a533fed7d9cf4354e1e52d0f2d..6c6184f0dbc17e03470b1169174805186bf0df77 100644 (file)
@@ -27,6 +27,9 @@
 /* Maximum number of IVs processed at once */
 #define AMDGPU_IH_MAX_NUM_IVS  32
 
+#define IH_RING_SIZE   (256 * 1024)
+#define IH_SW_RING_SIZE        (8 * 1024)      /* enough for 256 CAM entries */
+
 struct amdgpu_device;
 struct amdgpu_iv_entry;
 
@@ -97,8 +100,8 @@ struct amdgpu_ih_funcs {
 int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
                        unsigned ring_size, bool use_bus_addr);
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
-                         unsigned int num_dw);
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+                         const uint32_t *iv, unsigned int num_dw);
 int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
                                            struct amdgpu_ih_ring *ih);
 int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
index 5273decc5753b45fff9f12f21bc6e567b13d56f3..fa6d0adcec206cacf803e349595e75a59e5d15d7 100644 (file)
@@ -493,7 +493,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev,
                         struct amdgpu_iv_entry *entry,
                         unsigned int num_dw)
 {
-       amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
+       amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw);
        schedule_work(&adev->irq.ih_soft_work);
 }
 
index cca5a495611f3ba1454d0ecfb231618f64e019cb..631c5ab3f7dc5ef05b0432332f4726968bd28c76 100644 (file)
@@ -557,6 +557,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        crtc = (struct drm_crtc *)minfo->crtcs[i];
                        if (crtc && crtc->base.id == info->mode_crtc.id) {
                                struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
                                ui32 = amdgpu_crtc->crtc_id;
                                found = 1;
                                break;
@@ -575,7 +576,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                if (ret)
                        return ret;
 
-               ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
+               ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
                return ret ? -EFAULT : 0;
        }
        case AMDGPU_INFO_HW_IP_COUNT: {
@@ -721,17 +722,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                                    ? -EFAULT : 0;
        }
        case AMDGPU_INFO_READ_MMR_REG: {
-               unsigned n, alloc_size;
+               unsigned int n, alloc_size;
                uint32_t *regs;
-               unsigned se_num = (info->read_mmr_reg.instance >>
+               unsigned int se_num = (info->read_mmr_reg.instance >>
                                   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
                                  AMDGPU_INFO_MMR_SE_INDEX_MASK;
-               unsigned sh_num = (info->read_mmr_reg.instance >>
+               unsigned int sh_num = (info->read_mmr_reg.instance >>
                                   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
                                  AMDGPU_INFO_MMR_SH_INDEX_MASK;
 
                /* set full masks if the userspace set all bits
-                * in the bitfields */
+                * in the bitfields
+                */
                if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
                        se_num = 0xffffffff;
                else if (se_num >= AMDGPU_GFX_MAX_SE)
@@ -896,7 +898,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                return ret;
        }
        case AMDGPU_INFO_VCE_CLOCK_TABLE: {
-               unsigned i;
+               unsigned int i;
                struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
                struct amd_vce_state *vce_state;
 
@@ -1102,6 +1104,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                struct drm_amdgpu_info_video_caps *caps;
                int r;
 
+               if (!adev->asic_funcs->query_video_codecs)
+                       return -EINVAL;
+
                switch (info->video_cap.type) {
                case AMDGPU_INFO_VIDEO_CAPS_DECODE:
                        r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
@@ -1229,13 +1234,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                pasid = 0;
        }
 
-       r = amdgpu_vm_init(adev, &fpriv->vm);
+       r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
        if (r)
                goto error_pasid;
 
-       r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
+       r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
        if (r)
-               goto error_vm;
+               goto error_pasid;
 
        r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
        if (r)
@@ -1719,7 +1724,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
        seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
                   fw_info.feature, fw_info.ver);
 
-       seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
+       seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
 
        return 0;
 }
index e9091ebfe230db24072a7abc0dc9ff1effd9267f..37f15abf7543c7387598e9cb16780d985f143028 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/firmware.h>
+#include <drm/drm_exec.h>
 
 #include "amdgpu_mes.h"
 #include "amdgpu.h"
@@ -642,6 +643,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
        unsigned long flags;
        int r;
 
+       memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
+
        /* allocate the mes queue buffer */
        queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
        if (!queue) {
@@ -1168,34 +1171,31 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
                                 struct amdgpu_mes_ctx_data *ctx_data)
 {
        struct amdgpu_bo_va *bo_va;
-       struct ww_acquire_ctx ticket;
-       struct list_head list;
-       struct amdgpu_bo_list_entry pd;
-       struct ttm_validate_buffer csa_tv;
        struct amdgpu_sync sync;
+       struct drm_exec exec;
        int r;
 
        amdgpu_sync_create(&sync);
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&csa_tv.head);
-
-       csa_tv.bo = &ctx_data->meta_data_obj->tbo;
-       csa_tv.num_shared = 1;
-
-       list_add(&csa_tv.head, &list);
-       amdgpu_vm_get_pd_bo(vm, &list, &pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
-               return r;
+       drm_exec_init(&exec, 0);
+       drm_exec_until_all_locked(&exec) {
+               r = drm_exec_lock_obj(&exec,
+                                     &ctx_data->meta_data_obj->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error_fini_exec;
+
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto error_fini_exec;
        }
 
        bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
        if (!bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for meta data BO\n");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto error_fini_exec;
        }
 
        r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
@@ -1205,33 +1205,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
 
        if (r) {
                DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
-               goto error;
+               goto error_del_bo_va;
        }
 
        r = amdgpu_vm_bo_update(adev, bo_va, false);
        if (r) {
                DRM_ERROR("failed to do vm_bo_update on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, bo_va->last_pt_update);
 
        r = amdgpu_vm_update_pdes(adev, vm, false);
        if (r) {
                DRM_ERROR("failed to update pdes on meta data\n");
-               goto error;
+               goto error_del_bo_va;
        }
        amdgpu_sync_fence(&sync, vm->last_update);
 
        amdgpu_sync_wait(&sync, false);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        amdgpu_sync_free(&sync);
        ctx_data->meta_data_va = bo_va;
        return 0;
 
-error:
+error_del_bo_va:
        amdgpu_vm_bo_del(adev, bo_va);
-       ttm_eu_backoff_reservation(&ticket, &list);
+
+error_fini_exec:
+       drm_exec_fini(&exec);
        amdgpu_sync_free(&sync);
        return r;
 }
@@ -1242,34 +1244,30 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
        struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
        struct amdgpu_bo *bo = ctx_data->meta_data_obj;
        struct amdgpu_vm *vm = bo_va->base.vm;
-       struct amdgpu_bo_list_entry vm_pd;
-       struct list_head list, duplicates;
-       struct dma_fence *fence = NULL;
-       struct ttm_validate_buffer tv;
-       struct ww_acquire_ctx ticket;
-       long r = 0;
-
-       INIT_LIST_HEAD(&list);
-       INIT_LIST_HEAD(&duplicates);
-
-       tv.bo = &bo->tbo;
-       tv.num_shared = 2;
-       list_add(&tv.head, &list);
-
-       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
-       if (r) {
-               dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%ld)\n", r);
-               return r;
+       struct dma_fence *fence;
+       struct drm_exec exec;
+       long r;
+
+       drm_exec_init(&exec, 0);
+       drm_exec_until_all_locked(&exec) {
+               r = drm_exec_lock_obj(&exec,
+                                     &ctx_data->meta_data_obj->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
+
+               r = amdgpu_vm_lock_pd(vm, &exec, 0);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(r))
+                       goto out_unlock;
        }
 
        amdgpu_vm_bo_del(adev, bo_va);
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
 
-       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+       r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+                                  &fence);
        if (r)
                goto out_unlock;
        if (fence) {
@@ -1288,7 +1286,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
 out_unlock:
        if (unlikely(r < 0))
                dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
-       ttm_eu_backoff_reservation(&ticket, &list);
+       drm_exec_fini(&exec);
 
        return r;
 }
@@ -1382,7 +1380,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
                goto error_pasid;
        }
 
-       r = amdgpu_vm_init(adev, vm);
+       r = amdgpu_vm_init(adev, vm, -1);
        if (r) {
                DRM_ERROR("failed to initialize vm\n");
                goto error_pasid;
index 2d6ac30b7135b894674224b826356c30ec3c8f3d..2053954a235ce974c4ca46d54940090da4227cb6 100644 (file)
@@ -224,6 +224,7 @@ struct mes_add_queue_input {
        uint32_t        is_kfd_process;
        uint32_t        is_aql_queue;
        uint32_t        queue_size;
+       uint32_t        exclusively_scheduled;
 };
 
 struct mes_remove_queue_input {
index f7905bce0de15e49202dc23d022b9e68167bdcc8..88419927570a3a893c01762f8fa3631c37573829 100644 (file)
@@ -1575,23 +1575,31 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
 {
        struct dma_buf_attachment *attachment;
        struct dma_buf *dma_buf;
-       unsigned int domain;
        const char *placement;
        unsigned int pin_count;
        u64 size;
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-       switch (domain) {
-       case AMDGPU_GEM_DOMAIN_VRAM:
-               placement = "VRAM";
-               break;
-       case AMDGPU_GEM_DOMAIN_GTT:
-               placement = " GTT";
-               break;
-       case AMDGPU_GEM_DOMAIN_CPU:
-       default:
-               placement = " CPU";
-               break;
+       if (dma_resv_trylock(bo->tbo.base.resv)) {
+               unsigned int domain;
+               domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+               switch (domain) {
+               case AMDGPU_GEM_DOMAIN_VRAM:
+                       if (amdgpu_bo_in_cpu_visible_vram(bo))
+                               placement = "VRAM VISIBLE";
+                       else
+                               placement = "VRAM";
+                       break;
+               case AMDGPU_GEM_DOMAIN_GTT:
+                       placement = "GTT";
+                       break;
+               case AMDGPU_GEM_DOMAIN_CPU:
+               default:
+                       placement = "CPU";
+                       break;
+               }
+               dma_resv_unlock(bo->tbo.base.resv);
+       } else {
+               placement = "UNKNOWN";
        }
 
        size = amdgpu_bo_size(bo);
index 6d676bdd1505b17deab4639413ee06ba8ca8eeaf..4e428060a1fa292e1473756a9c7449c91d4eb16d 100644 (file)
@@ -45,9 +45,6 @@
 
 #define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
 
-static int psp_sysfs_init(struct amdgpu_device *adev);
-static void psp_sysfs_fini(struct amdgpu_device *adev);
-
 static int psp_load_smu_fw(struct psp_context *psp);
 static int psp_rap_terminate(struct psp_context *psp);
 static int psp_securedisplay_terminate(struct psp_context *psp);
@@ -180,9 +177,11 @@ static int psp_early_init(void *handle)
                psp->autoload_supported = false;
                break;
        case IP_VERSION(11, 0, 0):
+       case IP_VERSION(11, 0, 7):
+               adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
+               fallthrough;
        case IP_VERSION(11, 0, 5):
        case IP_VERSION(11, 0, 9):
-       case IP_VERSION(11, 0, 7):
        case IP_VERSION(11, 0, 11):
        case IP_VERSION(11, 5, 0):
        case IP_VERSION(11, 0, 12):
@@ -202,8 +201,8 @@ static int psp_early_init(void *handle)
        case IP_VERSION(13, 0, 3):
        case IP_VERSION(13, 0, 5):
        case IP_VERSION(13, 0, 8):
-       case IP_VERSION(13, 0, 10):
        case IP_VERSION(13, 0, 11):
+       case IP_VERSION(14, 0, 0):
                psp_v13_0_set_psp_funcs(psp);
                psp->autoload_supported = true;
                break;
@@ -215,8 +214,10 @@ static int psp_early_init(void *handle)
                break;
        case IP_VERSION(13, 0, 0):
        case IP_VERSION(13, 0, 7):
+       case IP_VERSION(13, 0, 10):
                psp_v13_0_set_psp_funcs(psp);
                psp->autoload_supported = true;
+               adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
                break;
        case IP_VERSION(13, 0, 4):
                psp_v13_0_4_set_psp_funcs(psp);
@@ -462,13 +463,6 @@ static int psp_sw_init(void *handle)
                }
        }
 
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
-           adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) {
-               ret = psp_sysfs_init(adev);
-               if (ret)
-                       return ret;
-       }
-
        ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
                                      amdgpu_sriov_vf(adev) ?
                                      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
@@ -498,11 +492,11 @@ static int psp_sw_init(void *handle)
        return 0;
 
 failed2:
-       amdgpu_bo_free_kernel(&psp->fw_pri_bo,
-                             &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-failed1:
        amdgpu_bo_free_kernel(&psp->fence_buf_bo,
                              &psp->fence_buf_mc_addr, &psp->fence_buf);
+failed1:
+       amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+                             &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
        return ret;
 }
 
@@ -520,10 +514,6 @@ static int psp_sw_fini(void *handle)
        amdgpu_ucode_release(&psp->cap_fw);
        amdgpu_ucode_release(&psp->toc_fw);
 
-       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
-           adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
-               psp_sysfs_fini(adev);
-
        kfree(cmd);
        cmd = NULL;
 
@@ -2459,8 +2449,8 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
        return ret;
 }
 
-static int psp_execute_non_psp_fw_load(struct psp_context *psp,
-                                 struct amdgpu_firmware_info *ucode)
+int psp_execute_ip_fw_load(struct psp_context *psp,
+                          struct amdgpu_firmware_info *ucode)
 {
        int ret = 0;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
@@ -2503,7 +2493,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
                        DRM_WARN("Failed to set MP1 state prepare for reload\n");
        }
 
-       ret = psp_execute_non_psp_fw_load(psp, ucode);
+       ret = psp_execute_ip_fw_load(psp, ucode);
 
        if (ret)
                DRM_ERROR("PSP load smu failed!\n");
@@ -2545,7 +2535,7 @@ int psp_load_fw_list(struct psp_context *psp,
        for (i = 0; i < ucode_count; ++i) {
                ucode = ucode_list[i];
                psp_print_fw_hdr(psp, ucode);
-               ret = psp_execute_non_psp_fw_load(psp, ucode);
+               ret = psp_execute_ip_fw_load(psp, ucode);
                if (ret)
                        return ret;
        }
@@ -2592,7 +2582,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
 
                psp_print_fw_hdr(psp, ucode);
 
-               ret = psp_execute_non_psp_fw_load(psp, ucode);
+               ret = psp_execute_ip_fw_load(psp, ucode);
                if (ret)
                        return ret;
 
@@ -2931,19 +2921,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
        return ret;
 }
 
-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
-                       uint64_t cmd_gpu_addr, int cmd_size)
-{
-       struct amdgpu_firmware_info ucode = {0};
-
-       ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
-               AMDGPU_UCODE_ID_VCN0_RAM;
-       ucode.mc_addr = cmd_gpu_addr;
-       ucode.ucode_size = cmd_size;
-
-       return psp_execute_non_psp_fw_load(&adev->psp, &ucode);
-}
-
 int psp_ring_cmd_submit(struct psp_context *psp,
                        uint64_t cmd_buf_mc_addr,
                        uint64_t fence_mc_addr,
@@ -3584,6 +3561,11 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
        drm_dev_exit(idx);
 }
 
+/**
+ * DOC: usbc_pd_fw
+ * Reading from this file will retrieve the USB-C PD firmware version. Writing to
+ * this file will trigger the update process.
+ */
 static DEVICE_ATTR(usbc_pd_fw, 0644,
                   psp_usbc_pd_fw_sysfs_read,
                   psp_usbc_pd_fw_sysfs_write);
@@ -3624,7 +3606,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
        adev->psp.vbflash_image_size += count;
        mutex_unlock(&adev->psp.mutex);
 
-       dev_info(adev->dev, "VBIOS flash write PSP done");
+       dev_dbg(adev->dev, "IFWI staged for update");
 
        return count;
 }
@@ -3644,7 +3626,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
        if (adev->psp.vbflash_image_size == 0)
                return -EINVAL;
 
-       dev_info(adev->dev, "VBIOS flash to PSP started");
+       dev_dbg(adev->dev, "PSP IFWI flash process initiated");
 
        ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
                                        AMDGPU_GPU_PAGE_SIZE,
@@ -3669,14 +3651,32 @@ rel_buf:
        adev->psp.vbflash_image_size = 0;
 
        if (ret) {
-               dev_err(adev->dev, "Failed to load VBIOS FW, err = %d", ret);
+               dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
                return ret;
        }
 
-       dev_info(adev->dev, "VBIOS flash to PSP done");
+       dev_dbg(adev->dev, "PSP IFWI flash process done");
        return 0;
 }
 
+/**
+ * DOC: psp_vbflash
+ * Writing to this file will stage an IFWI for update. Reading from this file
+ * will trigger the update process.
+ */
+static struct bin_attribute psp_vbflash_bin_attr = {
+       .attr = {.name = "psp_vbflash", .mode = 0660},
+       .size = 0,
+       .write = amdgpu_psp_vbflash_write,
+       .read = amdgpu_psp_vbflash_read,
+};
+
+/**
+ * DOC: psp_vbflash_status
+ * The status of the flash process.
+ * 0: IFWI flash not complete.
+ * 1: IFWI flash complete.
+ */
 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -3693,39 +3693,49 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
 
        return sysfs_emit(buf, "0x%x\n", vbflash_status);
 }
+static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
 
-static const struct bin_attribute psp_vbflash_bin_attr = {
-       .attr = {.name = "psp_vbflash", .mode = 0660},
-       .size = 0,
-       .write = amdgpu_psp_vbflash_write,
-       .read = amdgpu_psp_vbflash_read,
+static struct bin_attribute *bin_flash_attrs[] = {
+       &psp_vbflash_bin_attr,
+       NULL
 };
 
-static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
+static struct attribute *flash_attrs[] = {
+       &dev_attr_psp_vbflash_status.attr,
+       &dev_attr_usbc_pd_fw.attr,
+       NULL
+};
 
-int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
+static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
 {
-       int ret = 0;
+       struct device *dev = kobj_to_dev(kobj);
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       if (amdgpu_sriov_vf(adev))
-               return -EINVAL;
+       if (attr == &dev_attr_usbc_pd_fw.attr)
+               return adev->psp.sup_pd_fw_up ? 0660 : 0;
 
-       switch (adev->ip_versions[MP0_HWIP][0]) {
-       case IP_VERSION(13, 0, 0):
-       case IP_VERSION(13, 0, 7):
-       case IP_VERSION(13, 0, 10):
-               ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr);
-               if (ret)
-                       dev_err(adev->dev, "Failed to create device file psp_vbflash");
-               ret = device_create_file(adev->dev, &dev_attr_psp_vbflash_status);
-               if (ret)
-                       dev_err(adev->dev, "Failed to create device file psp_vbflash_status");
-               return ret;
-       default:
-               return 0;
-       }
+       return adev->psp.sup_ifwi_up ? 0440 : 0;
 }
 
+static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
+                                               struct bin_attribute *attr,
+                                               int idx)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+
+       return adev->psp.sup_ifwi_up ? 0660 : 0;
+}
+
+const struct attribute_group amdgpu_flash_attr_group = {
+       .attrs = flash_attrs,
+       .bin_attrs = bin_flash_attrs,
+       .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
+       .is_visible = amdgpu_flash_attr_is_visible,
+};
+
 const struct amd_ip_funcs psp_ip_funcs = {
        .name = "psp",
        .early_init = psp_early_init,
@@ -3744,27 +3754,6 @@ const struct amd_ip_funcs psp_ip_funcs = {
        .set_powergating_state = psp_set_powergating_state,
 };
 
-static int psp_sysfs_init(struct amdgpu_device *adev)
-{
-       int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
-
-       if (ret)
-               DRM_ERROR("Failed to create USBC PD FW control file!");
-
-       return ret;
-}
-
-void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev)
-{
-       sysfs_remove_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr);
-       device_remove_file(adev->dev, &dev_attr_psp_vbflash_status);
-}
-
-static void psp_sysfs_fini(struct amdgpu_device *adev)
-{
-       device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
-}
-
 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_PSP,
        .major = 3,
index 2cae0b1a0b8ac720d2c84d7483be426474be5f1a..c3203de4a0078ff2f3cd8983e6256b4280102289 100644 (file)
@@ -309,8 +309,8 @@ struct psp_runtime_scpm_entry {
 
 struct psp_context
 {
-       struct amdgpu_device            *adev;
-       struct psp_ring                 km_ring;
+       struct amdgpu_device            *adev;
+       struct psp_ring                 km_ring;
        struct psp_gfx_cmd_resp         *cmd;
 
        const struct psp_funcs          *funcs;
@@ -339,7 +339,7 @@ struct psp_context
        uint64_t                        tmr_mc_addr;
 
        /* asd firmware */
-       const struct firmware   *asd_fw;
+       const struct firmware           *asd_fw;
 
        /* toc firmware */
        const struct firmware           *toc_fw;
@@ -384,9 +384,13 @@ struct psp_context
 
        uint32_t                        boot_cfg_bitmask;
 
-       char *vbflash_tmp_buf;
-       size_t vbflash_image_size;
-       bool vbflash_done;
+       /* firmware upgrades supported */
+       bool                            sup_pd_fw_up;
+       bool                            sup_ifwi_up;
+
+       char                            *vbflash_tmp_buf;
+       size_t                          vbflash_image_size;
+       bool                            vbflash_done;
 };
 
 struct amdgpu_psp_funcs {
@@ -458,9 +462,10 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
                        uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
 
+int psp_execute_ip_fw_load(struct psp_context *psp,
+                          struct amdgpu_firmware_info *ucode);
+
 int psp_gpu_reset(struct amdgpu_device *adev);
-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
-                       uint64_t cmd_gpu_addr, int cmd_size);
 
 int psp_ta_init_shared_buf(struct psp_context *psp,
                                  struct ta_mem_context *mem_ctx);
@@ -525,6 +530,4 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
 
 int is_psp_fw_valid(struct psp_bin_desc bin);
 
-int amdgpu_psp_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev);
 #endif
index 8aaa427f8c0f63cfe824eb3c2f360737bfe7933a..62011a52183363e36de377269d6c82fa64c2918c 100644 (file)
@@ -1159,7 +1159,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
        }
 
        /* Calculate XGMI relative offset */
-       if (adev->gmc.xgmi.num_physical_nodes > 1) {
+       if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+           info->head.block != AMDGPU_RAS_BLOCK__GFX) {
                block_info.address =
                        amdgpu_xgmi_get_relative_phy_addr(adev,
                                                          block_info.address);
@@ -2414,6 +2415,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_IP_DISCOVERY) {
                switch (adev->ip_versions[MP0_HWIP][0]) {
                case IP_VERSION(13, 0, 0):
+               case IP_VERSION(13, 0, 6):
                case IP_VERSION(13, 0, 10):
                        return true;
                default:
@@ -2440,10 +2442,10 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
        if (!ctx)
                return;
 
-       if (strnstr(ctx->vbios_version, "D16406",
-                   sizeof(ctx->vbios_version)) ||
-               strnstr(ctx->vbios_version, "D36002",
-                       sizeof(ctx->vbios_version)))
+       if (strnstr(ctx->vbios_pn, "D16406",
+                   sizeof(ctx->vbios_pn)) ||
+               strnstr(ctx->vbios_pn, "D36002",
+                       sizeof(ctx->vbios_pn)))
                adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
 }
 
@@ -2515,8 +2517,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
        /* hw_supported needs to be aligned with RAS block mask. */
        adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
 
-       adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
-               adev->ras_hw_enabled & amdgpu_ras_mask;
+
+       /*
+        * Disable ras feature for aqua vanjaram
+        * by default on apu platform.
+        */
+       if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&
+           adev->gmc.is_app_apu)
+               adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
+                       adev->ras_hw_enabled & amdgpu_ras_mask;
+       else
+               adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
+                       adev->ras_hw_enabled & amdgpu_ras_mask;
 }
 
 static void amdgpu_ras_counte_dw(struct work_struct *work)
index 0648dfe559afcffb738404a1d111a34bd20959d3..4287743e1212457e2ed52f14e198cdb40e909e72 100644 (file)
@@ -194,9 +194,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
                /* VEGA20 and ARCTURUS */
                if (adev->asic_type == CHIP_VEGA20)
                        control->i2c_address = EEPROM_I2C_MADDR_0;
-               else if (strnstr(atom_ctx->vbios_version,
+               else if (strnstr(atom_ctx->vbios_pn,
                                 "D342",
-                                sizeof(atom_ctx->vbios_version)))
+                                sizeof(atom_ctx->vbios_pn)))
                        control->i2c_address = EEPROM_I2C_MADDR_0;
                else
                        control->i2c_address = EEPROM_I2C_MADDR_4;
@@ -205,8 +205,8 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
                control->i2c_address = EEPROM_I2C_MADDR_0;
                return true;
        case IP_VERSION(13, 0, 2):
-               if (strnstr(atom_ctx->vbios_version, "D673",
-                           sizeof(atom_ctx->vbios_version)))
+               if (strnstr(atom_ctx->vbios_pn, "D673",
+                           sizeof(atom_ctx->vbios_pn)))
                        control->i2c_address = EEPROM_I2C_MADDR_4;
                else
                        control->i2c_address = EEPROM_I2C_MADDR_0;
index eec41ad3040603622f4448458cd9f7342612acb4..5fed06ffcc6b701e5394f43a918f79277bc63eb5 100644 (file)
@@ -87,7 +87,7 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
                reset_handler = adev->reset_cntl->get_reset_handler(
                        adev->reset_cntl, reset_context);
        if (!reset_handler)
-               return -ENOSYS;
+               return -EOPNOTSUPP;
 
        return reset_handler->prepare_hwcontext(adev->reset_cntl,
                                                reset_context);
@@ -103,7 +103,7 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
                reset_handler = adev->reset_cntl->get_reset_handler(
                        adev->reset_cntl, reset_context);
        if (!reset_handler)
-               return -ENOSYS;
+               return -EOPNOTSUPP;
 
        ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
        if (ret)
index b22d4fb2a8470e457ba5f0db4a9dcda352865716..d3186b570b82e352c3574a8eac1e86dd603be38a 100644 (file)
@@ -56,6 +56,15 @@ enum amdgpu_ring_mux_offset_type {
        AMDGPU_MUX_OFFSET_TYPE_CE,
 };
 
+enum ib_complete_status {
+       /* IB not started/reset value, default value. */
+       IB_COMPLETION_STATUS_DEFAULT = 0,
+       /* IB preempted, started but not completed. */
+       IB_COMPLETION_STATUS_PREEMPTED = 1,
+       /* IB completed. */
+       IB_COMPLETION_STATUS_COMPLETED = 2,
+};
+
 struct amdgpu_ring_mux {
        struct amdgpu_ring      *real_ring;
 
index 80b263646966eef671ce23ee600e079350ebee82..b591d33af26452aa58066cd0ea346ef3526305e5 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "clearstate_defs.h"
 
+#define AMDGPU_MAX_RLC_INSTANCES       8
+
 /* firmware ID used in rlc toc */
 typedef enum _FIRMWARE_ID_ {
        FIRMWARE_ID_INVALID                                     = 0,
@@ -201,7 +203,7 @@ struct amdgpu_rlc {
        u32                     cp_table_size;
 
        /* safe mode for updating CG/PG state */
-       bool in_safe_mode[8];
+       bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES];
        const struct amdgpu_rlc_funcs *funcs;
 
        /* for firmware data */
@@ -257,7 +259,7 @@ struct amdgpu_rlc {
 
        bool rlcg_reg_access_supported;
        /* registers for rlcg indirect reg access */
-       struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl;
+       struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES];
 };
 
 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id);
index dacf281d2b217594490ebb11b4ebe937b236c959..e2b9392d7f0de826b472117fde437b30c97f4cb6 100644 (file)
@@ -239,9 +239,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
                               sizeof(struct amdgpu_sdma_instance));
        }
 
-       if (amdgpu_sriov_vf(adev))
-               return 0;
-
        DRM_DEBUG("psp_load == '%s'\n",
                  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
 
index ae455aab5d29ddfd154e0533796b88b96cdfc98a..36b55d2bd51a91fa52986cba5831b73bb102632c 100644 (file)
@@ -1239,3 +1239,18 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
 
        return 0;
 }
+
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+                              enum AMDGPU_UCODE_ID ucode_id)
+{
+       struct amdgpu_firmware_info ucode = {
+               .ucode_id = (ucode_id ? ucode_id :
+                           (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
+                                       AMDGPU_UCODE_ID_VCN0_RAM)),
+               .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+               .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+                             (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
+       };
+
+       return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
index 92d5534df5f42848b2a5f8f49b3298fd8c399137..a3eed90b6af090478142f3936323742774a7ddcb 100644 (file)
        } while (0)
 
 #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
+#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
 #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
 #define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
 #define AMDGPU_VCN_SW_RING_FLAG                (1 << 9)
 #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
 #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
 
+#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+
 enum fw_queue_mode {
        FW_QUEUE_RING_RESET = 1,
        FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -343,6 +346,11 @@ struct amdgpu_fw_shared_rb_setup {
        uint32_t  reserved[6];
 };
 
+struct amdgpu_fw_shared_drm_key_wa {
+       uint8_t  method;
+       uint8_t  reserved[3];
+};
+
 struct amdgpu_vcn4_fw_shared {
        uint32_t present_flag_0;
        uint8_t pad[12];
@@ -352,6 +360,7 @@ struct amdgpu_vcn4_fw_shared {
        uint8_t pad2[20];
        struct amdgpu_fw_shared_rb_setup rb_setup;
        struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+       struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
 };
 
 struct amdgpu_vcn_fwlog {
@@ -414,4 +423,7 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
                        struct ras_common_if *ras_block);
 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
 
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+                              enum AMDGPU_UCODE_ID ucode_id);
+
 #endif
index 41aa853a07d24d3bada2f7e1f6a44c6f3981f473..ec044f711eb9e936ff997f4228ba8cf04a2b94ee 100644 (file)
@@ -835,6 +835,16 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
        return mode;
 }
 
+void amdgpu_virt_post_reset(struct amdgpu_device *adev)
+{
+       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
+               /* force set to GFXOFF state after reset,
+                * to avoid some invalid operation before GC enable
+                */
+               adev->gfx.is_poweron = false;
+       }
+}
+
 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
 {
        switch (adev->ip_versions[MP0_HWIP][0]) {
@@ -845,6 +855,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i
                        return false;
                else
                        return true;
+       case IP_VERSION(11, 0, 9):
+       case IP_VERSION(11, 0, 7):
+               /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
+               if (ucode_id == AMDGPU_UCODE_ID_RLC_G
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+                   || ucode_id == AMDGPU_UCODE_ID_SMC)
+                       return true;
+               else
+                       return false;
        case IP_VERSION(13, 0, 10):
                /* white list */
                if (ucode_id == AMDGPU_UCODE_ID_CAP
@@ -954,7 +975,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
        return ret;
 }
 
-static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
        uint32_t timeout = 50000;
@@ -972,7 +993,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
                return 0;
        }
 
-       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
+               dev_err(adev->dev, "invalid xcc\n");
+               return 0;
+       }
+
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
        scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
        scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
        scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
@@ -1037,13 +1063,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
 
 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
                       u32 offset, u32 value,
-                      u32 acc_flags, u32 hwip)
+                      u32 acc_flags, u32 hwip, u32 xcc_id)
 {
        u32 rlcg_flag;
 
        if (!amdgpu_sriov_runtime(adev) &&
                amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
-               amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+               amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
                return;
        }
 
@@ -1054,13 +1080,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
 }
 
 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
-                     u32 offset, u32 acc_flags, u32 hwip)
+                     u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
 {
        u32 rlcg_flag;
 
        if (!amdgpu_sriov_runtime(adev) &&
                amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
-               return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+               return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
 
        if (acc_flags & AMDGPU_REGS_NO_KIQ)
                return RREG32_NO_KIQ(offset);
index 4f7bab52282ac1b6463fb2420315bbdf4b92b0ba..fabb83e9d9aec701f1105ffc471480db07f6bb4b 100644 (file)
@@ -355,9 +355,10 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
                        struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
                       u32 offset, u32 value,
-                      u32 acc_flags, u32 hwip);
+                      u32 acc_flags, u32 hwip, u32 xcc_id);
 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
-                     u32 offset, u32 acc_flags, u32 hwip);
+                     u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
                        uint32_t ucode_id);
+void amdgpu_virt_post_reset(struct amdgpu_device *adev);
 #endif
index 53ff91fc6cf6bdc80204939ebb7d74f8f8dbb34a..7148a216ae2fe423ebeab8a8e20076233f55bb4a 100644 (file)
@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
                DRM_WARN("%s: vblank timer overrun\n", __func__);
 
        ret = drm_crtc_handle_vblank(crtc);
+       /* Don't queue timer again when vblank is disabled. */
        if (!ret)
-               DRM_ERROR("amdgpu_vkms failure on handling vblank");
+               return HRTIMER_NORESTART;
 
        return HRTIMER_RESTART;
 }
@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
-       hrtimer_cancel(&amdgpu_crtc->vblank_timer);
+       hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
 }
 
 static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
@@ -500,8 +501,6 @@ static int amdgpu_vkms_sw_init(void *handle)
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
-       adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
index 291977b93b1dfbde6dde4268434208cdf8ef8329..f5daadcec865d6863650d1d515fd416ac890595c 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_drv.h>
 #include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
@@ -111,9 +112,9 @@ struct amdgpu_prt_cb {
 };
 
 /**
- * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
+ * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
  */
-struct amdgpu_vm_tlb_seq_cb {
+struct amdgpu_vm_tlb_seq_struct {
        /**
         * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
         */
@@ -339,25 +340,20 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 }
 
 /**
- * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ * amdgpu_vm_lock_pd - lock PD in drm_exec
  *
  * @vm: vm providing the BOs
- * @validated: head of validation list
- * @entry: entry to add
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
  *
- * Add the page directory to the list of BOs to
- * validate for command submission.
+ * Lock the VM root PD in the DRM execution context.
  */
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
-                        struct list_head *validated,
-                        struct amdgpu_bo_list_entry *entry)
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+                     unsigned int num_fences)
 {
-       entry->priority = 0;
-       entry->tv.bo = &vm->root.bo->tbo;
-       /* Two for VM updates, one for TTM and one for the CS job */
-       entry->tv.num_shared = 4;
-       entry->user_pages = NULL;
-       list_add(&entry->tv.head, validated);
+       /* We need at least two fences for the VM PD/PT updates */
+       return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
+                                   2 + num_fences);
 }
 
 /**
@@ -833,7 +829,7 @@ error:
 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
                                 struct dma_fence_cb *cb)
 {
-       struct amdgpu_vm_tlb_seq_cb *tlb_cb;
+       struct amdgpu_vm_tlb_seq_struct *tlb_cb;
 
        tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
        atomic64_inc(&tlb_cb->vm->tlb_seq);
@@ -871,7 +867,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                           struct dma_fence **fence)
 {
        struct amdgpu_vm_update_params params;
-       struct amdgpu_vm_tlb_seq_cb *tlb_cb;
+       struct amdgpu_vm_tlb_seq_struct *tlb_cb;
        struct amdgpu_res_cursor cursor;
        enum amdgpu_sync_mode sync_mode;
        int r, idx;
@@ -2121,13 +2117,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
+ * @xcp_id: GPU partition selection id
  *
  * Init @vm fields.
  *
  * Returns:
  * 0 for success, error for failure.
  */
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
 {
        struct amdgpu_bo *root_bo;
        struct amdgpu_bo_vm *root;
@@ -2177,7 +2174,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        vm->evicting = false;
 
        r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
-                               false, &root);
+                               false, &root, xcp_id);
        if (r)
                goto error_free_delayed;
        root_bo = &root->bo;
@@ -2279,16 +2276,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                        goto unreserve_bo;
 
                vm->update_funcs = &amdgpu_vm_cpu_funcs;
+               r = amdgpu_vm_pt_map_tables(adev, vm);
+               if (r)
+                       goto unreserve_bo;
+
        } else {
                vm->update_funcs = &amdgpu_vm_sdma_funcs;
        }
-       /*
-        * Make sure root PD gets mapped. As vm_update_mode could be changed
-        * when turning a GFX VM into a compute VM.
-        */
-       r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
-       if (r)
-               goto unreserve_bo;
 
        dma_fence_put(vm->last_update);
        vm->last_update = dma_fence_get_stub();
@@ -2604,7 +2598,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                /* Intentionally setting invalid PTE flag
                 * combination to force a no-retry-fault
                 */
-               flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
+               flags = AMDGPU_VM_NORETRY_FLAGS;
                value = 0;
        } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
                /* Redirect the access to the dummy page */
index 9c85d494f2a24df5919c435728816bdfb3be8936..204ab13184ed1ce1b040e36b67e5a038060505d6 100644 (file)
@@ -36,6 +36,8 @@
 #include "amdgpu_ring.h"
 #include "amdgpu_ids.h"
 
+struct drm_exec;
+
 struct amdgpu_bo_va;
 struct amdgpu_job;
 struct amdgpu_bo_list_entry;
@@ -84,7 +86,13 @@ struct amdgpu_mem_stats;
 /* PDE Block Fragment Size for VEGA10 */
 #define AMDGPU_PDE_BFS(a)      ((uint64_t)a << 59)
 
+/* Flag combination to set no-retry with TF disabled */
+#define AMDGPU_VM_NORETRY_FLAGS        (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
+                               AMDGPU_PTE_TF)
 
+/* Flag combination to set no-retry with TF enabled */
+#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
+                                  AMDGPU_PTE_PRT)
 /* For GFX9 */
 #define AMDGPU_PTE_MTYPE_VG10(a)       ((uint64_t)(a) << 57)
 #define AMDGPU_PTE_MTYPE_VG10_MASK     AMDGPU_PTE_MTYPE_VG10(3ULL)
@@ -392,13 +400,12 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                        u32 pasid);
 
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
-                        struct list_head *validated,
-                        struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+                     unsigned int num_fences);
 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -475,7 +482,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                       struct amdgpu_bo_vm *vmbo, bool immediate);
 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                       int level, bool immediate, struct amdgpu_bo_vm **vmbo);
+                       int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+                       int32_t xcp_id);
 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm);
@@ -491,6 +499,8 @@ void amdgpu_vm_pt_free_work(struct work_struct *work);
 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
 #endif
 
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
 /**
  * amdgpu_vm_tlb_seq - return tlb flush sequence number
  * @vm: the amdgpu_vm structure to query
index 31913ae86de6bb894ded2fc8142178dd1e7d845a..6e31621452de10d7c9340a1995cbd56d7924a32b 100644 (file)
@@ -31,6 +31,7 @@
  */
 static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
 {
+       table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        return amdgpu_bo_kmap(&table->bo, NULL);
 }
 
index dea1a64be44d092b78d34692968bc68aa9fb3c2c..96d601e209b8bda3acc29d739edb6d083228e66e 100644 (file)
@@ -498,11 +498,12 @@ exit:
  * @level: the page table level
  * @immediate: use a immediate update
  * @vmbo: pointer to the buffer object pointer
+ * @xcp_id: GPU partition id
  */
 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                       int level, bool immediate, struct amdgpu_bo_vm **vmbo)
+                       int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+                       int32_t xcp_id)
 {
-       struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
        struct amdgpu_bo_param bp;
        struct amdgpu_bo *bo;
        struct dma_resv *resv;
@@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
        bp.type = ttm_bo_type_kernel;
        bp.no_wait_gpu = immediate;
-       bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
+       bp.xcp_id_plus1 = xcp_id + 1;
 
        if (vm->root.bo)
                bp.resv = vm->root.bo->tbo.base.resv;
@@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        bp.type = ttm_bo_type_kernel;
        bp.resv = bo->tbo.base.resv;
        bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-       bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
+       bp.xcp_id_plus1 = xcp_id + 1;
 
        r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
 
@@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
                return 0;
 
        amdgpu_vm_eviction_unlock(vm);
-       r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
+       r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
+                               vm->root.bo->xcp_id);
        amdgpu_vm_eviction_lock(vm);
        if (r)
                return r;
@@ -778,6 +780,27 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
                                        1, 0, flags);
 }
 
+/**
+ * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: pointer to PTE flags
+ *
+ * Update PTE no-retry flags when TF is enabled.
+ */
+static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
+                                               uint64_t *flags)
+{
+       /*
+        * Update no-retry flags with the corresponding TF
+        * no-retry combination.
+        */
+       if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
+               *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
+               *flags |= adev->gmc.noretry_flags;
+       }
+}
+
 /*
  * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
  *
@@ -804,6 +827,16 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
                flags |= AMDGPU_PTE_EXECUTABLE;
        }
 
+       /*
+        * Update no-retry flags to use the no-retry flag combination
+        * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
+        * does not work when TF is enabled. So, replace them with
+        * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
+        * all cases.
+        */
+       if (level == AMDGPU_VM_PTB)
+               amdgpu_vm_pte_update_noretry_flags(adev, &flags);
+
        /* APUs mapping system memory may need different MTYPEs on different
         * NUMA nodes. Only do this for contiguous ranges that can be assumed
         * to be on the same NUMA node.
@@ -1044,3 +1077,31 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
 
        return 0;
 }
+
+/**
+ * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * make root page directory and everything below it cpu accessible.
+ */
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+       struct amdgpu_vm_pt_cursor cursor;
+       struct amdgpu_vm_bo_base *entry;
+
+       for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+
+               struct amdgpu_bo_vm *bo;
+               int r;
+
+               if (entry->bo) {
+                       bo = to_amdgpu_bo_vm(entry->bo);
+                       r = vm->update_funcs->map_table(bo);
+                       if (r)
+                               return r;
+               }
+       }
+
+       return 0;
+}
index d175e862f2226a3d13dcce57b161d7223aa13adf..9c9cca1294989c25f7c3d251e34be478e9391bc5 100644 (file)
@@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
        if (!adev->xcp_mgr)
                return 0;
 
-       fpriv->xcp_id = ~0;
+       fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
        for (i = 0; i < MAX_XCP; ++i) {
                if (!adev->xcp_mgr->xcp[i].ddev)
                        break;
@@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
                }
        }
 
-       fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
+       fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
                                adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
        return 0;
 }
index 0f8026d64ea518b7f7f390fd5373ac62b9b6c59e..9a1036aeec2a0bcd289a778abcb12b8beb74599f 100644 (file)
@@ -37,6 +37,8 @@
 #define AMDGPU_XCP_FL_NONE 0
 #define AMDGPU_XCP_FL_LOCKED (1 << 0)
 
+#define AMDGPU_XCP_NO_PARTITION (~0)
+
 struct amdgpu_fpriv;
 
 enum AMDGPU_XCP_IP_BLOCK {
index 03dc59cbe8aacd62a21b8436f9350f524bea1578..7e91b24784e5ee888a41556b22a851c1d681f004 100644 (file)
@@ -500,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
        hive = kzalloc(sizeof(*hive), GFP_KERNEL);
        if (!hive) {
                dev_err(adev->dev, "XGMI: allocation failed\n");
+               ret = -ENOMEM;
                hive = NULL;
                goto pro_end;
        }
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
new file mode 100644 (file)
index 0000000..d0fc627
--- /dev/null
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "amdgpu_xcp.h"
+#include "gfx_v9_4_3.h"
+#include "gfxhub_v1_2.h"
+#include "sdma_v4_4_2.h"
+
+#define XCP_INST_MASK(num_inst, xcp_id)                                        \
+       (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
+
+#define AMDGPU_XCP_OPS_KFD     (1 << 0)
+
+void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
+
+       adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
+
+       adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
+       adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
+       adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
+
+       adev->doorbell_index.sdma_doorbell_range = 20;
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->doorbell_index.sdma_engine[i] =
+                       AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
+                       i * (adev->doorbell_index.sdma_doorbell_range >> 1);
+
+       adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
+       adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
+
+       adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
+       adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
+
+       adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
+}
+
+static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
+                            uint32_t inst_idx, struct amdgpu_ring *ring)
+{
+       int xcp_id;
+       enum AMDGPU_XCP_IP_BLOCK ip_blk;
+       uint32_t inst_mask;
+
+       ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+               return;
+
+       inst_mask = 1 << inst_idx;
+
+       switch (ring->funcs->type) {
+       case AMDGPU_HW_IP_GFX:
+       case AMDGPU_RING_TYPE_COMPUTE:
+       case AMDGPU_RING_TYPE_KIQ:
+               ip_blk = AMDGPU_XCP_GFX;
+               break;
+       case AMDGPU_RING_TYPE_SDMA:
+               ip_blk = AMDGPU_XCP_SDMA;
+               break;
+       case AMDGPU_RING_TYPE_VCN_ENC:
+       case AMDGPU_RING_TYPE_VCN_JPEG:
+               ip_blk = AMDGPU_XCP_VCN;
+               if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+                       inst_mask = 1 << (inst_idx * 2);
+               break;
+       default:
+               DRM_ERROR("Not support ring type %d!", ring->funcs->type);
+               return;
+       }
+
+       for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+               if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+                       ring->xcp_id = xcp_id;
+                       break;
+               }
+       }
+}
+
+static void aqua_vanjaram_xcp_gpu_sched_update(
+               struct amdgpu_device *adev,
+               struct amdgpu_ring *ring,
+               unsigned int sel_xcp_id)
+{
+       unsigned int *num_gpu_sched;
+
+       num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+                       .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+       adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+                       .sched[(*num_gpu_sched)++] = &ring->sched;
+       DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
+                       sel_xcp_id, ring->funcs->type,
+                       ring->hw_prio, *num_gpu_sched);
+}
+
+static int aqua_vanjaram_xcp_sched_list_update(
+               struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int i;
+
+       for (i = 0; i < MAX_XCP; i++) {
+               atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+               memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+       }
+
+       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+               return 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               ring = adev->rings[i];
+               if (!ring || !ring->sched.ready || ring->no_scheduler)
+                       continue;
+
+               aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+               /* VCN is shared by two partitions under CPX MODE */
+               if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+                       adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+                       aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+       }
+
+       return 0;
+}
+
+static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+                       ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
+               else
+                       aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
+       }
+
+       return aqua_vanjaram_xcp_sched_list_update(adev);
+}
+
+static int aqua_vanjaram_select_scheds(
+               struct amdgpu_device *adev,
+               u32 hw_ip,
+               u32 hw_prio,
+               struct amdgpu_fpriv *fpriv,
+               unsigned int *num_scheds,
+               struct drm_gpu_scheduler ***scheds)
+{
+       u32 sel_xcp_id;
+       int i;
+
+       if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+               u32 least_ref_cnt = ~0;
+
+               fpriv->xcp_id = 0;
+               for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+                       u32 total_ref_cnt;
+
+                       total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
+                       if (total_ref_cnt < least_ref_cnt) {
+                               fpriv->xcp_id = i;
+                               least_ref_cnt = total_ref_cnt;
+                       }
+               }
+       }
+       sel_xcp_id = fpriv->xcp_id;
+
+       if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+               *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+               *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+               atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+               DRM_DEBUG("Selected partition #%d", sel_xcp_id);
+       } else {
+               DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
+                                        enum amd_hw_ip_block_type block,
+                                        int8_t inst)
+{
+       int8_t dev_inst;
+
+       switch (block) {
+       case GC_HWIP:
+       case SDMA0_HWIP:
+       /* Both JPEG and VCN as JPEG is only alias of VCN */
+       case VCN_HWIP:
+               dev_inst = adev->ip_map.dev_inst[block][inst];
+               break;
+       default:
+               /* For rest of the IPs, no look up required.
+                * Assume 'logical instance == physical instance' for all configs. */
+               dev_inst = inst;
+               break;
+       }
+
+       return dev_inst;
+}
+
+static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
+                                        enum amd_hw_ip_block_type block,
+                                        uint32_t mask)
+{
+       uint32_t dev_mask = 0;
+       int8_t log_inst, dev_inst;
+
+       while (mask) {
+               log_inst = ffs(mask) - 1;
+               dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
+               dev_mask |= (1 << dev_inst);
+               mask &= ~(1 << log_inst);
+       }
+
+       return dev_mask;
+}
+
+static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
+                                         enum amd_hw_ip_block_type ip_block,
+                                         uint32_t inst_mask)
+{
+       int l = 0, i;
+
+       while (inst_mask) {
+               i = ffs(inst_mask) - 1;
+               adev->ip_map.dev_inst[ip_block][l++] = i;
+               inst_mask &= ~(1 << i);
+       }
+       for (; l < HWIP_MAX_INSTANCE; l++)
+               adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
+{
+       u32 ip_map[][2] = {
+               { GC_HWIP, adev->gfx.xcc_mask },
+               { SDMA0_HWIP, adev->sdma.sdma_mask },
+               { VCN_HWIP, adev->vcn.inst_mask },
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+               aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+       adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
+       adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
+}
+
+/* Fixed pattern for smn addressing on different AIDs:
+ *   bit[34]: indicate cross AID access
+ *   bit[33:32]: indicate target AID id
+ * AID id range is 0 ~ 3 as maximum AID number is 4.
+ */
+u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
+{
+       u64 ext_offset;
+
+       /* local routing and bit[34:32] will be zeros */
+       if (ext_id == 0)
+               return 0;
+
+       /* Initiated from host, accessing to all non-zero aids are cross traffic */
+       ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
+
+       return ext_offset;
+}
+
+static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+       enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+       struct amdgpu_device *adev = xcp_mgr->adev;
+
+       if (adev->nbio.funcs->get_compute_partition_mode)
+               mode = adev->nbio.funcs->get_compute_partition_mode(adev);
+
+       return mode;
+}
+
+static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
+{
+       int num_xcc, num_xcc_per_xcp = 0;
+
+       num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+       switch (mode) {
+       case AMDGPU_SPX_PARTITION_MODE:
+               num_xcc_per_xcp = num_xcc;
+               break;
+       case AMDGPU_DPX_PARTITION_MODE:
+               num_xcc_per_xcp = num_xcc / 2;
+               break;
+       case AMDGPU_TPX_PARTITION_MODE:
+               num_xcc_per_xcp = num_xcc / 3;
+               break;
+       case AMDGPU_QPX_PARTITION_MODE:
+               num_xcc_per_xcp = num_xcc / 4;
+               break;
+       case AMDGPU_CPX_PARTITION_MODE:
+               num_xcc_per_xcp = 1;
+               break;
+       }
+
+       return num_xcc_per_xcp;
+}
+
+static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+                                   enum AMDGPU_XCP_IP_BLOCK ip_id,
+                                   struct amdgpu_xcp_ip *ip)
+{
+       struct amdgpu_device *adev = xcp_mgr->adev;
+       int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
+       int num_sdma, num_vcn;
+
+       num_sdma = adev->sdma.num_instances;
+       num_vcn = adev->vcn.num_vcn_inst;
+
+       switch (xcp_mgr->mode) {
+       case AMDGPU_SPX_PARTITION_MODE:
+               num_sdma_xcp = num_sdma;
+               num_vcn_xcp = num_vcn;
+               break;
+       case AMDGPU_DPX_PARTITION_MODE:
+               num_sdma_xcp = num_sdma / 2;
+               num_vcn_xcp = num_vcn / 2;
+               break;
+       case AMDGPU_TPX_PARTITION_MODE:
+               num_sdma_xcp = num_sdma / 3;
+               num_vcn_xcp = num_vcn / 3;
+               break;
+       case AMDGPU_QPX_PARTITION_MODE:
+               num_sdma_xcp = num_sdma / 4;
+               num_vcn_xcp = num_vcn / 4;
+               break;
+       case AMDGPU_CPX_PARTITION_MODE:
+               num_sdma_xcp = 2;
+               num_vcn_xcp = num_vcn ? 1 : 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+
+       switch (ip_id) {
+       case AMDGPU_XCP_GFXHUB:
+               ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+               ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
+               break;
+       case AMDGPU_XCP_GFX:
+               ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+               ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
+               break;
+       case AMDGPU_XCP_SDMA:
+               ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
+               ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
+               break;
+       case AMDGPU_XCP_VCN:
+               ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
+               /* TODO : Assign IP funcs */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ip->ip_id = ip_id;
+
+       return 0;
+}
+
+static enum amdgpu_gfx_partition
+__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+       struct amdgpu_device *adev = xcp_mgr->adev;
+       int num_xcc;
+
+       num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+       if (adev->gmc.num_mem_partitions == 1)
+               return AMDGPU_SPX_PARTITION_MODE;
+
+       if (adev->gmc.num_mem_partitions == num_xcc)
+               return AMDGPU_CPX_PARTITION_MODE;
+
+       if (adev->gmc.num_mem_partitions == num_xcc / 2)
+               return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
+                                                   AMDGPU_QPX_PARTITION_MODE;
+
+       if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
+               return AMDGPU_DPX_PARTITION_MODE;
+
+       return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+}
+
+static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+                                         enum amdgpu_gfx_partition mode)
+{
+       struct amdgpu_device *adev = xcp_mgr->adev;
+       int num_xcc, num_xccs_per_xcp;
+
+       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+       switch (mode) {
+       case AMDGPU_SPX_PARTITION_MODE:
+               return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+       case AMDGPU_DPX_PARTITION_MODE:
+               return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
+       case AMDGPU_TPX_PARTITION_MODE:
+               return (adev->gmc.num_mem_partitions == 1 ||
+                       adev->gmc.num_mem_partitions == 3) &&
+                      ((num_xcc % 3) == 0);
+       case AMDGPU_QPX_PARTITION_MODE:
+               num_xccs_per_xcp = num_xcc / 4;
+               return (adev->gmc.num_mem_partitions == 1 ||
+                       adev->gmc.num_mem_partitions == 4) &&
+                      (num_xccs_per_xcp >= 2);
+       case AMDGPU_CPX_PARTITION_MODE:
+               return ((num_xcc > 1) &&
+                      (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
+                      (num_xcc % adev->gmc.num_mem_partitions) == 0);
+       default:
+               return false;
+       }
+
+       return false;
+}
+
+static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+       /* TODO:
+        * Stop user queues and threads, and make sure GPU is empty of work.
+        */
+
+       if (flags & AMDGPU_XCP_OPS_KFD)
+               amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+       return 0;
+}
+
+static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+       int ret = 0;
+
+       if (flags & AMDGPU_XCP_OPS_KFD) {
+               amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+               amdgpu_amdkfd_device_init(xcp_mgr->adev);
+               /* If KFD init failed, return failure */
+               if (!xcp_mgr->adev->kfd.init_complete)
+                       ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+                                              int mode, int *num_xcps)
+{
+       int num_xcc_per_xcp, num_xcc, ret;
+       struct amdgpu_device *adev;
+       u32 flags = 0;
+
+       adev = xcp_mgr->adev;
+       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+       if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
+               mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
+       } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
+               dev_err(adev->dev,
+                       "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
+                       amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
+               return -EINVAL;
+       }
+
+       if (adev->kfd.init_complete)
+               flags |= AMDGPU_XCP_OPS_KFD;
+
+       if (flags & AMDGPU_XCP_OPS_KFD) {
+               ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
+               if (ret)
+                       goto out;
+       }
+
+       ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+       if (ret)
+               goto unlock;
+
+       num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
+       if (adev->gfx.funcs->switch_partition_mode)
+               adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
+                                                      num_xcc_per_xcp);
+
+       /* Init info about new xcps */
+       *num_xcps = num_xcc / num_xcc_per_xcp;
+       amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
+
+       ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+unlock:
+       if (flags & AMDGPU_XCP_OPS_KFD)
+               amdgpu_amdkfd_unlock_kfd(adev);
+out:
+       return ret;
+}
+
+static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
+                                         int xcc_id, uint8_t *mem_id)
+{
+       /* memory/spatial modes validation check is already done */
+       *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
+       *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
+
+       return 0;
+}
+
+static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
+                                       struct amdgpu_xcp *xcp, uint8_t *mem_id)
+{
+       struct amdgpu_numa_info numa_info;
+       struct amdgpu_device *adev;
+       uint32_t xcc_mask;
+       int r, i, xcc_id;
+
+       adev = xcp_mgr->adev;
+       /* TODO: BIOS is not returning the right info now
+        * Check on this later
+        */
+       /*
+       if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+               mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+       */
+       if (adev->gmc.num_mem_partitions == 1) {
+               /* Only one range */
+               *mem_id = 0;
+               return 0;
+       }
+
+       r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
+       if (r || !xcc_mask)
+               return -EINVAL;
+
+       xcc_id = ffs(xcc_mask) - 1;
+       if (!adev->gmc.is_app_apu)
+               return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
+
+       r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+
+       if (r)
+               return r;
+
+       r = -EINVAL;
+       for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+               if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
+                       *mem_id = i;
+                       r = 0;
+                       break;
+               }
+       }
+
+       return r;
+}
+
+static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+                                    enum AMDGPU_XCP_IP_BLOCK ip_id,
+                                    struct amdgpu_xcp_ip *ip)
+{
+       if (!ip)
+               return -EINVAL;
+
+       return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
+}
+
+struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
+       .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
+       .query_partition_mode = &aqua_vanjaram_query_partition_mode,
+       .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
+       .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
+       .select_scheds = &aqua_vanjaram_select_scheds,
+       .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
+};
+
+static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
+                                 &aqua_vanjaram_xcp_funcs);
+       if (ret)
+               return ret;
+
+       /* TODO: Default memory node affinity init */
+
+       return ret;
+}
+
+int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
+{
+       u32 mask, inst_mask = adev->sdma.sdma_mask;
+       int ret, i;
+
+       /* generally 1 AID supports 4 instances */
+       adev->sdma.num_inst_per_aid = 4;
+       adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
+
+       adev->aid_mask = i = 1;
+       inst_mask >>= adev->sdma.num_inst_per_aid;
+
+       for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
+            inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
+               if ((inst_mask & mask) == mask)
+                       adev->aid_mask |= (1 << i);
+       }
+
+       /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
+        * addressed based on logical instance ids.
+        */
+       adev->vcn.harvest_config = 0;
+       adev->vcn.num_inst_per_aid = 1;
+       adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
+       adev->jpeg.harvest_config = 0;
+       adev->jpeg.num_inst_per_aid = 1;
+       adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
+
+       ret = aqua_vanjaram_xcp_mgr_init(adev);
+       if (ret)
+               return ret;
+
+       aqua_vanjaram_ip_map_init(adev);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
deleted file mode 100644 (file)
index 16471b8..0000000
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Copyright 2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "soc15.h"
-
-#include "soc15_common.h"
-#include "amdgpu_xcp.h"
-#include "gfx_v9_4_3.h"
-#include "gfxhub_v1_2.h"
-#include "sdma_v4_4_2.h"
-
-#define XCP_INST_MASK(num_inst, xcp_id)                                        \
-       (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-
-#define AMDGPU_XCP_OPS_KFD     (1 << 0)
-
-void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
-{
-       int i;
-
-       adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
-
-       adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
-
-       adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
-       adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
-       adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
-
-       adev->doorbell_index.sdma_doorbell_range = 20;
-       for (i = 0; i < adev->sdma.num_instances; i++)
-               adev->doorbell_index.sdma_engine[i] =
-                       AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
-                       i * (adev->doorbell_index.sdma_doorbell_range >> 1);
-
-       adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
-       adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
-
-       adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
-       adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
-
-       adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
-                            uint32_t inst_idx, struct amdgpu_ring *ring)
-{
-       int xcp_id;
-       enum AMDGPU_XCP_IP_BLOCK ip_blk;
-       uint32_t inst_mask;
-
-       ring->xcp_id = ~0;
-       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
-               return;
-
-       inst_mask = 1 << inst_idx;
-
-       switch (ring->funcs->type) {
-       case AMDGPU_HW_IP_GFX:
-       case AMDGPU_RING_TYPE_COMPUTE:
-       case AMDGPU_RING_TYPE_KIQ:
-               ip_blk = AMDGPU_XCP_GFX;
-               break;
-       case AMDGPU_RING_TYPE_SDMA:
-               ip_blk = AMDGPU_XCP_SDMA;
-               break;
-       case AMDGPU_RING_TYPE_VCN_ENC:
-       case AMDGPU_RING_TYPE_VCN_JPEG:
-               ip_blk = AMDGPU_XCP_VCN;
-               if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
-                       inst_mask = 1 << (inst_idx * 2);
-               break;
-       default:
-               DRM_ERROR("Not support ring type %d!", ring->funcs->type);
-               return;
-       }
-
-       for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
-               if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
-                       ring->xcp_id = xcp_id;
-                       break;
-               }
-       }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
-               struct amdgpu_device *adev,
-               struct amdgpu_ring *ring,
-               unsigned int sel_xcp_id)
-{
-       unsigned int *num_gpu_sched;
-
-       num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
-                       .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
-       adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
-                       .sched[(*num_gpu_sched)++] = &ring->sched;
-       DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
-                       sel_xcp_id, ring->funcs->type,
-                       ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
-               struct amdgpu_device *adev)
-{
-       struct amdgpu_ring *ring;
-       int i;
-
-       for (i = 0; i < MAX_XCP; i++) {
-               atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
-               memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
-       }
-
-       if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
-               return 0;
-
-       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-               ring = adev->rings[i];
-               if (!ring || !ring->sched.ready)
-                       continue;
-
-               aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
-               /* VCN is shared by two partitions under CPX MODE */
-               if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
-                       adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
-                       aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
-       }
-
-       return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
-       int i;
-
-       for (i = 0; i < adev->num_rings; i++) {
-               struct amdgpu_ring *ring = adev->rings[i];
-
-               if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
-                       aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
-               else
-                       aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
-       }
-
-       return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
-               struct amdgpu_device *adev,
-               u32 hw_ip,
-               u32 hw_prio,
-               struct amdgpu_fpriv *fpriv,
-               unsigned int *num_scheds,
-               struct drm_gpu_scheduler ***scheds)
-{
-       u32 sel_xcp_id;
-       int i;
-
-       if (fpriv->xcp_id == ~0) {
-               u32 least_ref_cnt = ~0;
-
-               fpriv->xcp_id = 0;
-               for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
-                       u32 total_ref_cnt;
-
-                       total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
-                       if (total_ref_cnt < least_ref_cnt) {
-                               fpriv->xcp_id = i;
-                               least_ref_cnt = total_ref_cnt;
-                       }
-               }
-       }
-       sel_xcp_id = fpriv->xcp_id;
-
-       if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
-               *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
-               *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
-               atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
-               DRM_DEBUG("Selected partition #%d", sel_xcp_id);
-       } else {
-               DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
-               return -ENOENT;
-       }
-
-       return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
-                                        enum amd_hw_ip_block_type block,
-                                        int8_t inst)
-{
-       int8_t dev_inst;
-
-       switch (block) {
-       case GC_HWIP:
-       case SDMA0_HWIP:
-       /* Both JPEG and VCN as JPEG is only alias of VCN */
-       case VCN_HWIP:
-               dev_inst = adev->ip_map.dev_inst[block][inst];
-               break;
-       default:
-               /* For rest of the IPs, no look up required.
-                * Assume 'logical instance == physical instance' for all configs. */
-               dev_inst = inst;
-               break;
-       }
-
-       return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
-                                        enum amd_hw_ip_block_type block,
-                                        uint32_t mask)
-{
-       uint32_t dev_mask = 0;
-       int8_t log_inst, dev_inst;
-
-       while (mask) {
-               log_inst = ffs(mask) - 1;
-               dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
-               dev_mask |= (1 << dev_inst);
-               mask &= ~(1 << log_inst);
-       }
-
-       return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
-                                         enum amd_hw_ip_block_type ip_block,
-                                         uint32_t inst_mask)
-{
-       int l = 0, i;
-
-       while (inst_mask) {
-               i = ffs(inst_mask) - 1;
-               adev->ip_map.dev_inst[ip_block][l++] = i;
-               inst_mask &= ~(1 << i);
-       }
-       for (; l < HWIP_MAX_INSTANCE; l++)
-               adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
-       u32 ip_map[][2] = {
-               { GC_HWIP, adev->gfx.xcc_mask },
-               { SDMA0_HWIP, adev->sdma.sdma_mask },
-               { VCN_HWIP, adev->vcn.inst_mask },
-       };
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
-               aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
-       adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
-       adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
-/* Fixed pattern for smn addressing on different AIDs:
- *   bit[34]: indicate cross AID access
- *   bit[33:32]: indicate target AID id
- * AID id range is 0 ~ 3 as maximum AID number is 4.
- */
-u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
-{
-       u64 ext_offset;
-
-       /* local routing and bit[34:32] will be zeros */
-       if (ext_id == 0)
-               return 0;
-
-       /* Initiated from host, accessing to all non-zero aids are cross traffic */
-       ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
-
-       return ext_offset;
-}
-
-static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
-{
-       enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
-       struct amdgpu_device *adev = xcp_mgr->adev;
-
-       if (adev->nbio.funcs->get_compute_partition_mode)
-               mode = adev->nbio.funcs->get_compute_partition_mode(adev);
-
-       return mode;
-}
-
-static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
-{
-       int num_xcc, num_xcc_per_xcp = 0;
-
-       num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
-
-       switch (mode) {
-       case AMDGPU_SPX_PARTITION_MODE:
-               num_xcc_per_xcp = num_xcc;
-               break;
-       case AMDGPU_DPX_PARTITION_MODE:
-               num_xcc_per_xcp = num_xcc / 2;
-               break;
-       case AMDGPU_TPX_PARTITION_MODE:
-               num_xcc_per_xcp = num_xcc / 3;
-               break;
-       case AMDGPU_QPX_PARTITION_MODE:
-               num_xcc_per_xcp = num_xcc / 4;
-               break;
-       case AMDGPU_CPX_PARTITION_MODE:
-               num_xcc_per_xcp = 1;
-               break;
-       }
-
-       return num_xcc_per_xcp;
-}
-
-static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
-                                   enum AMDGPU_XCP_IP_BLOCK ip_id,
-                                   struct amdgpu_xcp_ip *ip)
-{
-       struct amdgpu_device *adev = xcp_mgr->adev;
-       int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
-       int num_sdma, num_vcn;
-
-       num_sdma = adev->sdma.num_instances;
-       num_vcn = adev->vcn.num_vcn_inst;
-
-       switch (xcp_mgr->mode) {
-       case AMDGPU_SPX_PARTITION_MODE:
-               num_sdma_xcp = num_sdma;
-               num_vcn_xcp = num_vcn;
-               break;
-       case AMDGPU_DPX_PARTITION_MODE:
-               num_sdma_xcp = num_sdma / 2;
-               num_vcn_xcp = num_vcn / 2;
-               break;
-       case AMDGPU_TPX_PARTITION_MODE:
-               num_sdma_xcp = num_sdma / 3;
-               num_vcn_xcp = num_vcn / 3;
-               break;
-       case AMDGPU_QPX_PARTITION_MODE:
-               num_sdma_xcp = num_sdma / 4;
-               num_vcn_xcp = num_vcn / 4;
-               break;
-       case AMDGPU_CPX_PARTITION_MODE:
-               num_sdma_xcp = 2;
-               num_vcn_xcp = num_vcn ? 1 : 0;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
-
-       switch (ip_id) {
-       case AMDGPU_XCP_GFXHUB:
-               ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
-               ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
-               break;
-       case AMDGPU_XCP_GFX:
-               ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
-               ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
-               break;
-       case AMDGPU_XCP_SDMA:
-               ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
-               ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
-               break;
-       case AMDGPU_XCP_VCN:
-               ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
-               /* TODO : Assign IP funcs */
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       ip->ip_id = ip_id;
-
-       return 0;
-}
-
-static enum amdgpu_gfx_partition
-__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
-{
-       struct amdgpu_device *adev = xcp_mgr->adev;
-       int num_xcc;
-
-       num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
-
-       if (adev->gmc.num_mem_partitions == 1)
-               return AMDGPU_SPX_PARTITION_MODE;
-
-       if (adev->gmc.num_mem_partitions == num_xcc)
-               return AMDGPU_CPX_PARTITION_MODE;
-
-       if (adev->gmc.num_mem_partitions == num_xcc / 2)
-               return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
-                                                   AMDGPU_QPX_PARTITION_MODE;
-
-       if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
-               return AMDGPU_DPX_PARTITION_MODE;
-
-       return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
-}
-
-static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
-                                         enum amdgpu_gfx_partition mode)
-{
-       struct amdgpu_device *adev = xcp_mgr->adev;
-       int num_xcc, num_xccs_per_xcp;
-
-       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
-       switch (mode) {
-       case AMDGPU_SPX_PARTITION_MODE:
-               return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
-       case AMDGPU_DPX_PARTITION_MODE:
-               return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
-       case AMDGPU_TPX_PARTITION_MODE:
-               return (adev->gmc.num_mem_partitions == 1 ||
-                       adev->gmc.num_mem_partitions == 3) &&
-                      ((num_xcc % 3) == 0);
-       case AMDGPU_QPX_PARTITION_MODE:
-               num_xccs_per_xcp = num_xcc / 4;
-               return (adev->gmc.num_mem_partitions == 1 ||
-                       adev->gmc.num_mem_partitions == 4) &&
-                      (num_xccs_per_xcp >= 2);
-       case AMDGPU_CPX_PARTITION_MODE:
-               return ((num_xcc > 1) &&
-                      (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
-                      (num_xcc % adev->gmc.num_mem_partitions) == 0);
-       default:
-               return false;
-       }
-
-       return false;
-}
-
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
-       /* TODO:
-        * Stop user queues and threads, and make sure GPU is empty of work.
-        */
-
-       if (flags & AMDGPU_XCP_OPS_KFD)
-               amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
-       return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
-       int ret = 0;
-
-       if (flags & AMDGPU_XCP_OPS_KFD) {
-               amdgpu_amdkfd_device_probe(xcp_mgr->adev);
-               amdgpu_amdkfd_device_init(xcp_mgr->adev);
-               /* If KFD init failed, return failure */
-               if (!xcp_mgr->adev->kfd.init_complete)
-                       ret = -EIO;
-       }
-
-       return ret;
-}
-
-static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
-                                              int mode, int *num_xcps)
-{
-       int num_xcc_per_xcp, num_xcc, ret;
-       struct amdgpu_device *adev;
-       u32 flags = 0;
-
-       adev = xcp_mgr->adev;
-       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
-
-       if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
-               mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
-       } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
-               dev_err(adev->dev,
-                       "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
-                       amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
-               return -EINVAL;
-       }
-
-       if (adev->kfd.init_complete)
-               flags |= AMDGPU_XCP_OPS_KFD;
-
-       if (flags & AMDGPU_XCP_OPS_KFD) {
-               ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
-               if (ret)
-                       goto out;
-       }
-
-       ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
-       if (ret)
-               goto unlock;
-
-       num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
-       if (adev->gfx.funcs->switch_partition_mode)
-               adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
-                                                      num_xcc_per_xcp);
-
-       /* Init info about new xcps */
-       *num_xcps = num_xcc / num_xcc_per_xcp;
-       amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
-
-       ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
-unlock:
-       if (flags & AMDGPU_XCP_OPS_KFD)
-               amdgpu_amdkfd_unlock_kfd(adev);
-out:
-       return ret;
-}
-
-static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
-                                         int xcc_id, uint8_t *mem_id)
-{
-       /* memory/spatial modes validation check is already done */
-       *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
-       *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
-
-       return 0;
-}
-
-static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
-                                       struct amdgpu_xcp *xcp, uint8_t *mem_id)
-{
-       struct amdgpu_numa_info numa_info;
-       struct amdgpu_device *adev;
-       uint32_t xcc_mask;
-       int r, i, xcc_id;
-
-       adev = xcp_mgr->adev;
-       /* TODO: BIOS is not returning the right info now
-        * Check on this later
-        */
-       /*
-       if (adev->gmc.gmc_funcs->query_mem_partition_mode)
-               mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-       */
-       if (adev->gmc.num_mem_partitions == 1) {
-               /* Only one range */
-               *mem_id = 0;
-               return 0;
-       }
-
-       r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
-       if (r || !xcc_mask)
-               return -EINVAL;
-
-       xcc_id = ffs(xcc_mask) - 1;
-       if (!adev->gmc.is_app_apu)
-               return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
-
-       r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
-
-       if (r)
-               return r;
-
-       r = -EINVAL;
-       for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
-               if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
-                       *mem_id = i;
-                       r = 0;
-                       break;
-               }
-       }
-
-       return r;
-}
-
-static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
-                                    enum AMDGPU_XCP_IP_BLOCK ip_id,
-                                    struct amdgpu_xcp_ip *ip)
-{
-       if (!ip)
-               return -EINVAL;
-
-       return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
-}
-
-struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
-       .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
-       .query_partition_mode = &aqua_vanjaram_query_partition_mode,
-       .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
-       .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
-       .select_scheds = &aqua_vanjaram_select_scheds,
-       .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
-};
-
-static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
-{
-       int ret;
-
-       ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
-                                 &aqua_vanjaram_xcp_funcs);
-       if (ret)
-               return ret;
-
-       /* TODO: Default memory node affinity init */
-
-       return ret;
-}
-
-int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
-{
-       u32 mask, inst_mask = adev->sdma.sdma_mask;
-       int ret, i;
-
-       /* generally 1 AID supports 4 instances */
-       adev->sdma.num_inst_per_aid = 4;
-       adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
-
-       adev->aid_mask = i = 1;
-       inst_mask >>= adev->sdma.num_inst_per_aid;
-
-       for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
-            inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
-               if ((inst_mask & mask) == mask)
-                       adev->aid_mask |= (1 << i);
-       }
-
-       /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
-        * addressed based on logical instance ids.
-        */
-       adev->vcn.harvest_config = 0;
-       adev->vcn.num_inst_per_aid = 1;
-       adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
-       adev->jpeg.harvest_config = 0;
-       adev->jpeg.num_inst_per_aid = 1;
-       adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
-
-       ret = aqua_vanjaram_xcp_mgr_init(adev);
-       if (ret)
-               return ret;
-
-       aqua_vanjaram_ip_map_init(adev);
-
-       return 0;
-}
index 5f610e9a5f0f839fb25a67cb1357580818c7ed01..9f63ddb89b75c1b6ea002a52044a4b93cb5e09d2 100644 (file)
@@ -1438,6 +1438,8 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
 
                ctx->vbios_pn[count] = 0;
        }
+
+       pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
 }
 
 static void atom_get_vbios_version(struct atom_context *ctx)
@@ -1460,11 +1462,9 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
        int base;
        struct atom_context *ctx =
            kzalloc(sizeof(struct atom_context), GFP_KERNEL);
-       char *str;
        struct _ATOM_ROM_HEADER *atom_rom_header;
        struct _ATOM_MASTER_DATA_TABLE *master_table;
        struct _ATOM_FIRMWARE_INFO *atom_fw_info;
-       u16 idx;
 
        if (!ctx)
                return NULL;
@@ -1502,16 +1502,6 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
                return NULL;
        }
 
-       idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
-       if (idx == 0)
-               idx = 0x80;
-
-       str = CSTR(idx);
-       if (*str != '\0') {
-               pr_info("ATOM BIOS: %s\n", str);
-               strscpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
-       }
-
        atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
        if (atom_rom_header->usMasterDataTableOffset != 0) {
                master_table = (struct _ATOM_MASTER_DATA_TABLE *)
index 0c183982452060497d2711a7eb5254a93db5dcd9..55bf99d5288d021fee1dc33e7ec5ff32caa4159d 100644 (file)
@@ -33,7 +33,6 @@ struct drm_device;
 #define ATOM_ATI_MAGIC_PTR     0x30
 #define ATOM_ATI_MAGIC         " 761295520"
 #define ATOM_ROM_TABLE_PTR     0x48
-#define ATOM_ROM_PART_NUMBER_PTR       0x6E
 
 #define ATOM_ROM_MAGIC         "ATOM"
 #define ATOM_ROM_MAGIC_PTR     4
@@ -143,7 +142,6 @@ struct atom_context {
        int io_mode;
        uint32_t *scratch;
        int scratch_size_bytes;
-       char vbios_version[20];
 
        uint8_t name[STRLEN_LONG];
        uint8_t vbios_pn[STRLEN_LONG];
index 44af8022b89fa96900f751c3e224b5b82995f2f4..0aee9c8288a2b8eb88054177b52e8a8ce0404215 100644 (file)
@@ -271,8 +271,7 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin");
 MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin");
 MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin");
 
-static const struct soc15_reg_golden golden_settings_gc_10_1[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
@@ -315,13 +314,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = {
        /* Pending on emulation bring up */
 };
 
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -1376,8 +1373,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
@@ -1418,8 +1414,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
@@ -1464,13 +1459,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = {
        /* Pending on emulation bring up */
 };
 
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -2093,13 +2086,11 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = {
        /* Pending on emulation bring up */
 };
 
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -3154,8 +3145,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
@@ -3164,7 +3154,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080),
@@ -3201,13 +3191,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = {
        /* Pending on emulation bring up */
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
@@ -3254,8 +3242,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG,  0x00000020, 0x00000020),
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
@@ -3285,8 +3272,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG,  0x00000020, 0x00000020),
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
@@ -3309,8 +3295,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0x30000000, 0x30000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0x7e000000, 0x7e000100),
@@ -3380,7 +3365,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
 };
 
@@ -3421,8 +3406,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x00000044),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
@@ -3506,6 +3490,8 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
 static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
                                           uint16_t pasid, uint32_t flush_type,
                                           bool all_hub, uint8_t dst_sel);
+static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+                                              unsigned int vmid);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -3714,8 +3700,8 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                break;
        case IP_VERSION(10, 3, 4):
                soc15_program_register_sequence(adev,
-                                                golden_settings_gc_10_3_4,
-                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
+                                               golden_settings_gc_10_3_4,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
                break;
        case IP_VERSION(10, 3, 5):
                soc15_program_register_sequence(adev,
@@ -3782,7 +3768,7 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
        uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
        uint32_t tmp = 0;
-       unsigned i;
+       unsigned int i;
        int r;
 
        WREG32(scratch, 0xCAFEDEAD);
@@ -3820,7 +3806,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;
-       unsigned index;
+       unsigned int index;
        uint64_t gpu_addr;
        volatile uint32_t *cpu_ptr;
        long r;
@@ -3951,7 +3937,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
                break;
        }
 
-       return ret ;
+       return ret;
 }
 
 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
@@ -4151,7 +4137,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
 
-       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
        reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
        reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
        reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
@@ -4159,14 +4145,14 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
        reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
        reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
        switch (adev->ip_versions[GC_HWIP][0]) {
-               case IP_VERSION(10, 3, 0):
-                       reg_access_ctrl->spare_int =
-                               SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
-                       break;
-               default:
-                       reg_access_ctrl->spare_int =
-                               SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
-                       break;
+       case IP_VERSION(10, 3, 0):
+               reg_access_ctrl->spare_int =
+                       SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
+               break;
+       default:
+               reg_access_ctrl->spare_int =
+                       SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+               break;
        }
        adev->gfx.rlc.rlcg_reg_access_supported = true;
 }
@@ -4187,11 +4173,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
                        return r;
        }
 
-       /* init spm vmid with 0xf */
-       if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
-
-
        return 0;
 }
 
@@ -4213,7 +4194,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
        int r;
        u32 *hpd;
        const __le32 *fw_data = NULL;
-       unsigned fw_size;
+       unsigned int fw_size;
        u32 *fw = NULL;
        size_t mec_hpd_size;
 
@@ -4295,7 +4276,8 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id
 {
        /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
         * field when performing a select_se_sh so it should be
-        * zero here */
+        * zero here
+        */
        WARN_ON(simd != 0);
 
        /* type 2 wave data */
@@ -4474,7 +4456,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                                       int mec, int pipe, int queue)
 {
-       unsigned irq_type;
+       unsigned int irq_type;
        struct amdgpu_ring *ring;
        unsigned int hw_prio;
 
@@ -4795,7 +4777,8 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
        uint32_t pa_sc_tile_steering_override;
 
        /* for ASICs that integrates GFX v10.3
-        * pa_sc_tile_steering_override should be set to 0 */
+        * pa_sc_tile_steering_override should be set to 0
+        */
        if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
                return 0;
 
@@ -4871,8 +4854,10 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
        nv_grbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
 
-       /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          access. These should be enabled by FW for target VMIDs. */
+       /*
+        * Initialize all compute VMIDs to have no GDS, GWS, or OA
+        * access. These should be enabled by FW for target VMIDs.
+        */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -5108,8 +5093,10 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
 
 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
 {
-       /* TODO: enable rlc & smu handshake until smu
-        * and gfxoff feature works as expected */
+       /*
+        * TODO: enable rlc & smu handshake until smu
+        * and gfxoff feature works as expected
+        */
        if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
                gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
 
@@ -5132,7 +5119,7 @@ static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
 {
        const struct rlc_firmware_header_v2_0 *hdr;
        const __le32 *fw_data;
-       unsigned i, fw_size;
+       unsigned int i, fw_size;
 
        if (!adev->gfx.rlc_fw)
                return -EINVAL;
@@ -5169,6 +5156,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
 
                gfx_v10_0_init_csb(adev);
 
+               gfx_v10_0_update_spm_vmid_internal(adev, 0xf);
+
                if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
                        gfx_v10_0_rlc_enable_srm(adev);
        } else {
@@ -5199,6 +5188,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
 
                gfx_v10_0_init_csb(adev);
 
+               gfx_v10_0_update_spm_vmid_internal(adev, 0xf);
+
                adev->gfx.rlc.funcs->start(adev);
 
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -5207,6 +5198,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
                                return r;
                }
        }
+
        return 0;
 }
 
@@ -5674,11 +5666,10 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
 
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
+       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))
                WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
-       } else {
+       else
                WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
-       }
 
        if (adev->job_hang && !enable)
                return 0;
@@ -5700,7 +5691,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
        int r;
        const struct gfx_firmware_header_v1_0 *pfp_hdr;
        const __le32 *fw_data;
-       unsigned i, fw_size;
+       unsigned int i, fw_size;
        uint32_t tmp;
        uint32_t usec_timeout = 50000;  /* wait for 50ms */
 
@@ -5778,7 +5769,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
        int r;
        const struct gfx_firmware_header_v1_0 *ce_hdr;
        const __le32 *fw_data;
-       unsigned i, fw_size;
+       unsigned int i, fw_size;
        uint32_t tmp;
        uint32_t usec_timeout = 50000;  /* wait for 50ms */
 
@@ -5855,7 +5846,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
        int r;
        const struct gfx_firmware_header_v1_0 *me_hdr;
        const __le32 *fw_data;
-       unsigned i, fw_size;
+       unsigned int i, fw_size;
        uint32_t tmp;
        uint32_t usec_timeout = 50000;  /* wait for 50ms */
 
@@ -6243,7 +6234,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 {
        const struct gfx_firmware_header_v1_0 *mec_hdr;
        const __le32 *fw_data;
-       unsigned i;
+       unsigned int i;
        u32 tmp;
        u32 usec_timeout = 50000; /* Wait for 50 ms */
 
@@ -6922,8 +6913,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
 {
        uint32_t data, pattern = 0xDEADBEEF;
 
-       /* check if mmVGT_ESGS_RING_SIZE_UMD
-        * has been remapped to mmVGT_ESGS_RING_SIZE */
+       /*
+        * check if mmVGT_ESGS_RING_SIZE_UMD
+        * has been remapped to mmVGT_ESGS_RING_SIZE
+        */
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 2):
@@ -6934,12 +6927,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
                WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
 
                if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) {
-                       WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data);
+                       WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
                        return true;
-               } else {
-                       WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data);
-                       return false;
                }
+               WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data);
                break;
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 3):
@@ -6954,12 +6945,12 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
                if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
                        WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
                        return true;
-               } else {
-                       WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
-                       return false;
                }
+               WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
                break;
        }
+
+       return false;
 }
 
 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
@@ -6969,8 +6960,10 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
        if (amdgpu_sriov_vf(adev))
                return;
 
-       /* initialize cam_index to 0
-        * index will auto-inc after each data writting */
+       /*
+        * Initialize cam_index to 0
+        * index will auto-inc after each data writing
+        */
        WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
 
        switch (adev->ip_versions[GC_HWIP][0]) {
@@ -7100,6 +7093,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
 static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev)
 {
        uint32_t data;
+
        data = RREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG);
        data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
        WREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG, data);
@@ -7216,7 +7210,7 @@ static bool gfx_v10_0_is_idle(void *handle)
 
 static int gfx_v10_0_wait_for_idle(void *handle)
 {
-       unsigned i;
+       unsigned int i;
        u32 tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -7471,7 +7465,7 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
 {
        uint32_t data;
-       unsigned i;
+       unsigned int i;
 
        data = RLC_SAFE_MODE__CMD_MASK;
        data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
@@ -7900,12 +7894,11 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
        return 0;
 }
 
-static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+                                              unsigned int vmid)
 {
        u32 reg, data;
 
-       amdgpu_gfx_off_ctrl(adev, false);
-
        /* not for *_SOC15 */
        reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
        if (amdgpu_sriov_is_pp_one_vf(adev))
@@ -7920,6 +7913,13 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
                WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
        else
                WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+{
+       amdgpu_gfx_off_ctrl(adev, false);
+
+       gfx_v10_0_update_spm_vmid_internal(adev, vmid);
 
        amdgpu_gfx_off_ctrl(adev, true);
 }
@@ -8297,7 +8297,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                       struct amdgpu_ib *ib,
                                       uint32_t flags)
 {
-       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+       unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -8338,7 +8338,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                           struct amdgpu_ib *ib,
                                           uint32_t flags)
 {
-       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+       unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        if (ring->is_mes_queue)
@@ -8373,7 +8373,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
 }
 
 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
-                                    u64 seq, unsigned flags)
+                                    u64 seq, unsigned int flags)
 {
        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
        bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
@@ -8429,7 +8429,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
 }
 
 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                        unsigned vmid, uint64_t pd_addr)
+                                        unsigned int vmid, uint64_t pd_addr)
 {
        if (ring->is_mes_queue)
                gfx_v10_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
@@ -8511,9 +8511,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, 0);
 }
 
-static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
 {
-       unsigned ret;
+       unsigned int ret;
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
        amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
@@ -8525,9 +8525,10 @@ static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
        return ret;
 }
 
-static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
+static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
 {
-       unsigned cur;
+       unsigned int cur;
+
        BUG_ON(offset > ring->buf_mask);
        BUG_ON(ring->ring[offset] != 0x55aa55aa);
 
@@ -8750,7 +8751,7 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
 }
 
 static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
-                                        unsigned vmid)
+                                        unsigned int vmid)
 {
        struct amdgpu_device *adev = ring->adev;
        uint32_t value = 0;
@@ -8859,7 +8860,7 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
 
 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
                                            struct amdgpu_irq_src *src,
-                                           unsigned type,
+                                           unsigned int type,
                                            enum amdgpu_interrupt_state state)
 {
        switch (type) {
@@ -8956,7 +8957,7 @@ static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
 
 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
-                                             unsigned type,
+                                             unsigned int type,
                                              enum amdgpu_interrupt_state state)
 {
        switch (state) {
@@ -8975,7 +8976,7 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
 
 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
                                               struct amdgpu_irq_src *source,
-                                              unsigned type,
+                                              unsigned int type,
                                               enum amdgpu_interrupt_state state)
 {
        switch (state) {
@@ -9342,7 +9343,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
 
 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
 {
-       unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+       unsigned int total_cu = adev->gfx.config.max_cu_per_sh *
                            adev->gfx.config.max_sh_per_se *
                            adev->gfx.config.max_shader_engines;
 
@@ -9423,7 +9424,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
 {
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-       unsigned disable_masks[4 * 2];
+       unsigned int disable_masks[4 * 2];
 
        if (!adev || !cu_info)
                return -EINVAL;
@@ -9540,8 +9541,7 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
                     (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
 }
 
-const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version gfx_v10_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GFX,
        .major = 10,
        .minor = 0,
index 3a7af59e83ca11ea1409e8903899d302b5b3187c..4d53d6f13c3b78ce18083a031717ca47739d2b6e 100644 (file)
@@ -663,7 +663,7 @@ static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
 
-       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
        reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
        reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
        reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
index 65577eca58f1c85d44f55b2f10bf9300ee97f643..458faf657042e3d6a2b8af49b508db2867bf3295 100644 (file)
@@ -762,6 +762,8 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
                                     void *inject_if, uint32_t instance_mask);
 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
+static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+                                             unsigned int vmid);
 
 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
                                uint64_t queue_mask)
@@ -1632,7 +1634,7 @@ static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
 
-       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
        reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
        reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
        reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
@@ -1667,22 +1669,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
                        return r;
        }
 
-       switch (adev->ip_versions[GC_HWIP][0]) {
-       case IP_VERSION(9, 2, 2):
-       case IP_VERSION(9, 1, 0):
-               gfx_v9_0_init_lbpw(adev);
-               break;
-       case IP_VERSION(9, 4, 0):
-               gfx_v9_4_init_lbpw(adev);
-               break;
-       default:
-               break;
-       }
-
-       /* init spm vmid with 0xf */
-       if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
-
        return 0;
 }
 
@@ -2942,12 +2928,14 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
+               gfx_v9_0_init_lbpw(adev);
                if (amdgpu_lbpw == 0)
                        gfx_v9_0_enable_lbpw(adev, false);
                else
                        gfx_v9_0_enable_lbpw(adev, true);
                break;
        case IP_VERSION(9, 4, 0):
+               gfx_v9_4_init_lbpw(adev);
                if (amdgpu_lbpw > 0)
                        gfx_v9_0_enable_lbpw(adev, true);
                else
@@ -2957,6 +2945,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
                break;
        }
 
+       gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
+
        adev->gfx.rlc.funcs->start(adev);
 
        return 0;
@@ -4881,12 +4871,11 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
        return 0;
 }
 
-static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+                                             unsigned int vmid)
 {
        u32 reg, data;
 
-       amdgpu_gfx_off_ctrl(adev, false);
-
        reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
        if (amdgpu_sriov_is_pp_one_vf(adev))
                data = RREG32_NO_KIQ(reg);
@@ -4900,6 +4889,13 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
                WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
        else
                WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+{
+       amdgpu_gfx_off_ctrl(adev, false);
+
+       gfx_v9_0_update_spm_vmid_internal(adev, vmid);
 
        amdgpu_gfx_off_ctrl(adev, true);
 }
@@ -5230,6 +5226,9 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
                de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
        }
 
+       ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
+               IB_COMPLETION_STATUS_PREEMPTED;
+
        if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
                memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
        } else {
index 9e3b835bdbb263ca8263215b759909900e54c14e..d8d6807e7b96459a975f9e44d532c370ab0944f3 100644 (file)
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 
 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
+#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
 
 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
 
@@ -195,14 +196,11 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
        num_xcc = NUM_XCC(adev->gfx.xcc_mask);
        for (i = 0; i < num_xcc; i++) {
                dev_inst = GET_INST(GC, i);
-               if (dev_inst >= 2)
-                       WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4);
 
+               WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
+                            GOLDEN_GB_ADDR_CONFIG);
                /* Golden settings applied by driver for ASIC with rev_id 0 */
                if (adev->rev_id == 0) {
-                       WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
-                                    GOLDEN_GB_ADDR_CONFIG);
-
                        WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
                                              REDUCE_FIFO_DEPTH_BY_2, 2);
                }
@@ -624,7 +622,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
                                                int num_xccs_per_xcp)
 {
        int ret, i, num_xcc;
-       u32 tmp = 0;
+       u32 tmp = 0, regval;
 
        if (adev->psp.funcs) {
                ret = psp_spatial_partition(&adev->psp,
@@ -632,23 +630,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
                                                    num_xccs_per_xcp);
                if (ret)
                        return ret;
-       } else {
-               num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+       }
 
-               for (i = 0; i < num_xcc; i++) {
-                       tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
-                                           num_xccs_per_xcp);
-                       tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
-                                           i % num_xccs_per_xcp);
+       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+       for (i = 0; i < num_xcc; i++) {
+               tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
+                                   num_xccs_per_xcp);
+               tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
+                                   i % num_xccs_per_xcp);
+               regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL);
+               if (regval != tmp)
                        WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
                                     tmp);
-               }
-               ret = 0;
        }
 
        adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
 
-       return ret;
+       return 0;
 }
 
 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
@@ -900,6 +899,7 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
        int i;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;
+       uint32_t data;
 
        /*
         * Configure apertures:
@@ -919,6 +919,11 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
                /* CP and shaders */
                WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
                WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
+
+               /* Enable trap for each kfd vmid. */
+               data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
+               data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+               WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
        }
        soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
        mutex_unlock(&adev->srbm_mutex);
@@ -1037,32 +1042,6 @@ static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_
        WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
 }
 
-static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev,
-                                         int xcc_id)
-{
-       uint32_t tmp = 0;
-       int num_xcc;
-
-       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
-       switch (num_xcc) {
-       /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */
-       case 1:
-               WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8);
-               break;
-       case 2:
-       case 4:
-       case 6:
-       case 8:
-               tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID);
-               tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP));
-               WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp);
-
-               break;
-       default:
-               break;
-       }
-}
-
 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
 {
        uint32_t rlc_setting;
@@ -1101,6 +1080,24 @@ static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
        WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
 }
 
+static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+       int xcc_id, num_xcc;
+       struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+       for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+               reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
+               reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
+               reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
+               reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
+               reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
+               reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
+               reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
+               reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
+       }
+}
+
 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
 {
        /* init spm vmid with 0xf */
@@ -1736,7 +1733,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
 
        WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
        WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
-       WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
        WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
        WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
        WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
@@ -1920,9 +1917,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
                        return r;
        }
 
-       /* set the virtual and physical id based on partition_mode */
-       gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id);
-
        r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
        if (r)
                return r;
@@ -2181,6 +2175,9 @@ static int gfx_v9_4_3_early_init(void *handle)
        gfx_v9_4_3_set_gds_init(adev);
        gfx_v9_4_3_set_rlc_funcs(adev);
 
+       /* init rlcg reg access ctrl */
+       gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
+
        return gfx_v9_4_3_init_microcode(adev);
 }
 
@@ -2197,6 +2194,10 @@ static int gfx_v9_4_3_late_init(void *handle)
        if (r)
                return r;
 
+       if (adev->gfx.ras &&
+           adev->gfx.ras->enable_watchdog_timer)
+               adev->gfx.ras->enable_watchdog_timer(adev);
+
        return 0;
 }
 
@@ -4043,6 +4044,34 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
        gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
 }
 
+static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
+                                       void *ras_error_status, int xcc_id)
+{
+       uint32_t i;
+       uint32_t data;
+
+       data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+                            amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
+
+       if (amdgpu_watchdog_timer.timeout_fatal_disable &&
+           (amdgpu_watchdog_timer.period < 1 ||
+            amdgpu_watchdog_timer.period > 0x23)) {
+               dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
+               amdgpu_watchdog_timer.period = 0x23;
+       }
+       data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
+                            amdgpu_watchdog_timer.period);
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
+               WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
+       }
+       gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+                       xcc_id);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+
 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
                                        void *ras_error_status)
 {
@@ -4065,6 +4094,11 @@ static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
        amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
 }
 
+static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
+{
+       amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
+}
+
 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
        .name = "gfx_v9_4_3",
        .early_init = gfx_v9_4_3_early_init,
@@ -4393,4 +4427,5 @@ struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
        .ras_block = {
                .hw_ops = &gfx_v9_4_3_ras_ops,
        },
+       .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
 };
index d94cc1ec7242db0ce8ca0f1b265bb9f4f64fe986..cdc290a474a927f943ee2eabfedc3a945a5343a3 100644 (file)
@@ -103,7 +103,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                        min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
-                       /*
+                      /*
                        * Raven2 has a HW issue that it is unable to use the
                        * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
                        * So here is the workaround that increase system
@@ -248,7 +248,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
 static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned num_level, block_size;
+       unsigned int num_level, block_size;
        uint32_t tmp;
        int i;
 
@@ -308,7 +308,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned i;
+       unsigned int i;
 
        for (i = 0 ; i < 18; ++i) {
                WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -375,6 +375,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
                                                 bool value)
 {
        u32 tmp;
+
        tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
        tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
                        RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
index 4dabf910334b7abd23989f0a55d82946b0831816..dbdee1a0aefe69c1f483a62703281ed6dbc61bee 100644 (file)
@@ -140,7 +140,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
                                min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
                        if (adev->apu_flags & AMD_APU_IS_RAVEN2)
-                               /*
+                              /*
                                * Raven2 has a HW issue that it is unable to use the
                                * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
                                * So here is the workaround that increase system
@@ -315,7 +315,7 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
                                              uint32_t xcc_mask)
 {
        struct amdgpu_vmhub *hub;
-       unsigned num_level, block_size;
+       unsigned int num_level, block_size;
        uint32_t tmp;
        int i, j;
 
@@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
                                       uint32_t xcc_mask)
 {
-       uint32_t tmp_mask;
        int i;
 
-       tmp_mask = xcc_mask;
        /*
         * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
         * VF copy registers so vbios post doesn't program them, for
         * SRIOV driver need to program them
         */
        if (amdgpu_sriov_vf(adev)) {
-               for_each_inst(i, tmp_mask) {
-                       i = ffs(tmp_mask) - 1;
+               for_each_inst(i, xcc_mask) {
                        WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
                                     adev->gmc.vram_start >> 24);
                        WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
index f173a61c6c15af3159c55fd3958fd0eb25ae0cfc..a041c6c970e123d1249a3875f308bcd7606b44e8 100644 (file)
@@ -31,7 +31,7 @@
 
 #include "soc15_common.h"
 
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
        "CB/DB",
        "Reserved",
        "GE1",
@@ -332,7 +332,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
 static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned i;
+       unsigned int i;
 
        for (i = 0 ; i < 18; ++i) {
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -393,6 +393,7 @@ static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
+
        tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
        tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
                            RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
index d8fc3e8088cd003104175a5bf23a9d2b63331431..7708d5ded7b88cc4ecdd0172b5a2081ed9fd8020 100644 (file)
@@ -34,7 +34,7 @@
 #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP                            0x16f8
 #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX   0
 
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
        "CB/DB",
        "Reserved",
        "GE1",
@@ -341,7 +341,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
 static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned i;
+       unsigned int i;
 
        for (i = 0 ; i < 18; ++i) {
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -582,6 +582,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
 static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
 {
        int i;
+
        adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
        adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
        adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL);
@@ -616,6 +617,7 @@ static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
 static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
 {
        int i;
+
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL);
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2);
        WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL);
@@ -679,9 +681,8 @@ static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
                tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
        }
 
-       if (!time) {
+       if (!time)
                DRM_WARN("failed to wait for GRBM(EA) idle\n");
-       }
 }
 
 const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
index c53147f9c9fc0b2653fad912558a1d8848510fa9..e1c76c070ba97d3c42126b9735945d292441b884 100644 (file)
@@ -30,7 +30,7 @@
 #include "navi10_enum.h"
 #include "soc15_common.h"
 
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
        "CB/DB",
        "Reserved",
        "GE1",
@@ -340,7 +340,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
 static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned i;
+       unsigned int i;
 
        for (i = 0 ; i < 18; ++i) {
                WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
index ae777487d72efc3ef5871e037af03f02765e4ed8..07f369c7a1ed2784c5732e569b433b1e74f85944 100644 (file)
@@ -33,7 +33,7 @@
 #define regGCVM_L2_CNTL4_DEFAULT               0x000000c1
 #define regGCVM_L2_CNTL5_DEFAULT               0x00003fe0
 
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
        "CB/DB",
        "Reserved",
        "GE1",
@@ -345,7 +345,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
 static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
-       unsigned i;
+       unsigned int i;
 
        for (i = 0 ; i < 18; ++i) {
                WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
index 0c8a479895761e46bc2563da43a9fe318a9b5b4d..fa87a85e1017e78d0bdc4e9be090a53732210b16 100644 (file)
 
 #include "amdgpu_reset.h"
 
-#if 0
-static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
-{
-       /* TODO add golden setting for hdp */
-};
-#endif
-
 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
                                         struct amdgpu_irq_src *src,
-                                        unsigned type,
+                                        unsigned int type,
                                         enum amdgpu_interrupt_state state)
 {
        return 0;
@@ -70,7 +63,7 @@ static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
 
 static int
 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
-                                  struct amdgpu_irq_src *src, unsigned type,
+                                  struct amdgpu_irq_src *src, unsigned int type,
                                   enum amdgpu_interrupt_state state)
 {
        switch (state) {
@@ -109,9 +102,11 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
                                       struct amdgpu_irq_src *source,
                                       struct amdgpu_iv_entry *entry)
 {
+       uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
+                              AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+       struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
        bool retry_fault = !!(entry->src_data[1] & 0x80);
        bool write_fault = !!(entry->src_data[1] & 0x20);
-       struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
        struct amdgpu_task_info task_info;
        uint32_t status = 0;
        u64 addr;
@@ -164,8 +159,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
        amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 
        dev_err(adev->dev,
-               "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
-               "for process %s pid %d thread %s pid %d)\n",
+               "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
                entry->vmid_src ? "mmhub" : "gfxhub",
                entry->src_id, entry->ring_id, entry->vmid,
                entry->pasid, task_info.process_name, task_info.tgid,
@@ -244,7 +238,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
        u32 tmp;
        /* Use register 17 for GART */
-       const unsigned eng = 17;
+       const unsigned int eng = 17;
        unsigned int i;
        unsigned char hub_ip = 0;
 
@@ -346,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
            down_read_trylock(&adev->reset_domain->sem)) {
                struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
-               const unsigned eng = 17;
+               const unsigned int eng = 17;
                u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
                u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
                u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
@@ -477,12 +471,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 }
 
 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                            unsigned vmid, uint64_t pd_addr)
+                                            unsigned int vmid, uint64_t pd_addr)
 {
        bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
        uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
-       unsigned eng = ring->vm_inv_eng;
+       unsigned int eng = ring->vm_inv_eng;
 
        /*
         * It may lose gpuvm invalidate acknowldege state across power-gating
@@ -524,8 +518,8 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        return pd_addr;
 }
 
-static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
-                                        unsigned pasid)
+static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+                                        unsigned int pasid)
 {
        struct amdgpu_device *adev = ring->adev;
        uint32_t reg;
@@ -645,10 +639,10 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
                         AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 }
 
-static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
                size = AMDGPU_VBIOS_VGA_ALLOCATION;
@@ -751,6 +745,7 @@ static int gmc_v10_0_early_init(void *handle)
        adev->gmc.private_aperture_start = 0x1000000000000000ULL;
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+       adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 
        return 0;
 }
@@ -972,7 +967,7 @@ static int gmc_v10_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
        if (r) {
-               printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
                return r;
        }
 
@@ -1081,7 +1076,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
                gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->gmc.gart_size >> 20),
+                (unsigned int)(adev->gmc.gart_size >> 20),
                 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
        return 0;
@@ -1255,8 +1250,7 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
        .get_clockgating_state = gmc_v10_0_get_clockgating_state,
 };
 
-const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 10,
        .minor = 0,
index c571f0d95994628916fd02e7db562f7a01cc98e9..e3b76fd28d158ca3633078050458576b12fffe75 100644 (file)
@@ -50,7 +50,7 @@
 
 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
                                         struct amdgpu_irq_src *src,
-                                        unsigned type,
+                                        unsigned int type,
                                         enum amdgpu_interrupt_state state)
 {
        return 0;
@@ -58,7 +58,7 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
 
 static int
 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
-                                  struct amdgpu_irq_src *src, unsigned type,
+                                  struct amdgpu_irq_src *src, unsigned int type,
                                   enum amdgpu_interrupt_state state)
 {
        switch (state) {
@@ -97,7 +97,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
                                       struct amdgpu_irq_src *source,
                                       struct amdgpu_iv_entry *entry)
 {
-       struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+       uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
+                              AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+       struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
        uint32_t status = 0;
        u64 addr;
 
@@ -124,8 +126,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
                amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 
                dev_err(adev->dev,
-                       "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
-                       "for process %s pid %d thread %s pid %d)\n",
+                       "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
                        entry->vmid_src ? "mmhub" : "gfxhub",
                        entry->src_id, entry->ring_id, entry->vmid,
                        entry->pasid, task_info.process_name, task_info.tgid,
@@ -198,7 +199,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
        u32 tmp;
        /* Use register 17 for GART */
-       const unsigned eng = 17;
+       const unsigned int eng = 17;
        unsigned int i;
        unsigned char hub_ip = 0;
 
@@ -296,7 +297,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
-               const unsigned eng = 17;
+               const unsigned int eng = 17;
                u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
                u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
                u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
@@ -309,7 +310,6 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        mutex_lock(&adev->mman.gtt_window_lock);
        gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
        mutex_unlock(&adev->mman.gtt_window_lock);
-       return;
 }
 
 /**
@@ -379,12 +379,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 }
 
 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                            unsigned vmid, uint64_t pd_addr)
+                                            unsigned int vmid, uint64_t pd_addr)
 {
        bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
        uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
-       unsigned eng = ring->vm_inv_eng;
+       unsigned int eng = ring->vm_inv_eng;
 
        /*
         * It may lose gpuvm invalidate acknowldege state across power-gating
@@ -426,8 +426,8 @@ static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        return pd_addr;
 }
 
-static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
-                                        unsigned pasid)
+static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+                                        unsigned int pasid)
 {
        struct amdgpu_device *adev = ring->adev;
        uint32_t reg;
@@ -547,10 +547,10 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
                         AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 }
 
-static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
                size = AMDGPU_VBIOS_VGA_ALLOCATION;
@@ -651,6 +651,7 @@ static int gmc_v11_0_early_init(void *handle)
        adev->gmc.private_aperture_start = 0x1000000000000000ULL;
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+       adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 
        return 0;
 }
@@ -727,9 +728,9 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
                adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 
        /* set the gart size */
-       if (amdgpu_gart_size == -1) {
+       if (amdgpu_gart_size == -1)
                adev->gmc.gart_size = 512ULL << 20;
-       else
+       else
                adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 
        gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
@@ -823,7 +824,7 @@ static int gmc_v11_0_sw_init(void *handle)
 
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
        if (r) {
-               printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
                return r;
        }
 
@@ -926,7 +927,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
        gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->gmc.gart_size >> 20),
+                (unsigned int)(adev->gmc.gart_size >> 20),
                 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
        return 0;
index aa754c95a0b3aaee667e16d0381ae1714f5e0fc1..5b837a65fad20cabba7a2a7b99b8843878d8da3d 100644 (file)
@@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_HAINAN:
                chip_name = "hainan";
                break;
-       default: BUG();
+       default:
+               BUG();
        }
 
        /* this memory configuration requires special firmware */
@@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
                        WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
                }
                /* load the MC ucode */
-               for (i = 0; i < ucode_size; i++) {
+               for (i = 0; i < ucode_size; i++)
                        WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
-               }
 
                /* put the engine back into the active state */
                WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
@@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
                                       struct amdgpu_gmc *mc)
 {
        u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+
        base <<= 24;
 
        amdgpu_gmc_vram_location(adev, mc, base);
@@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+       if (gmc_v6_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
 
        if (adev->mode_info.num_crtc) {
                u32 tmp;
@@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 
-       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+       if (gmc_v6_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
        int r;
 
        tmp = RREG32(mmMC_ARB_RAMCFG);
-       if (tmp & (1 << 11)) {
+       if (tmp & (1 << 11))
                chansize = 16;
-       } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
+       else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
                chansize = 64;
-       } else {
+       else
                chansize = 32;
-       }
+
        tmp = RREG32(mmMC_SHARED_CHMAP);
        switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
        case 0:
@@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 }
 
 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                           unsigned vmid, uint64_t pd_addr)
+                                           unsigned int vmid, uint64_t pd_addr)
 {
        uint32_t reg;
 
@@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
 }
 
  /**
-   + * gmc_v8_0_set_prt - set PRT VM fault
-   + *
-   + * @adev: amdgpu_device pointer
-   + * @enable: enable/disable VM fault handling for PRT
-   +*/
+  * gmc_v8_0_set_prt() - set PRT VM fault
+  *
+  * @adev: amdgpu_device pointer
+  * @enable: enable/disable VM fault handling for PRT
+  */
 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 {
        u32 tmp;
@@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->gmc.gart_size >> 20),
+                (unsigned int)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
        return 0;
 }
@@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle)
                return 0;
 }
 
-static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
                size = AMDGPU_VBIOS_VGA_ALLOCATION;
        } else {
                u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
                size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
                        REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
                        4);
@@ -814,6 +814,7 @@ static int gmc_v6_0_sw_init(void *handle)
                adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
        } else {
                u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
                tmp &= MC_SEQ_MISC0__MT__MASK;
                adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
        }
@@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle)
 
 static int gmc_v6_0_wait_for_idle(void *handle)
 {
-       unsigned i;
+       unsigned int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->usec_timeout; i++) {
@@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v6_0_mc_stop(adev);
-               if (gmc_v6_0_wait_for_idle(adev)) {
+               if (gmc_v6_0_wait_for_idle(adev))
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-               }
-
 
                tmp = RREG32(mmSRBM_SOFT_RESET);
                tmp |= srbm_soft_reset;
@@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle)
 
 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *src,
-                                            unsigned type,
+                                            unsigned int type,
                                             enum amdgpu_interrupt_state state)
 {
        u32 tmp;
@@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
 }
 
-const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 6,
        .minor = 0,
index acd2b407860f7bc084250e35476100482c053e66..6a6929ac27482d76ca5eefff06e8410572182f28 100644 (file)
@@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
 MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 
-static const u32 golden_settings_iceland_a11[] =
-{
+static const u32 golden_settings_iceland_a11[] = {
        mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 };
 
-static const u32 iceland_mgcg_cgcg_init[] =
-{
+static const u32 iceland_mgcg_cgcg_init[] = {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
@@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_KABINI:
        case CHIP_MULLINS:
                return 0;
-       default: BUG();
+       default:
+               return -EINVAL;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
@@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
                                       struct amdgpu_gmc *mc)
 {
        u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+
        base <<= 24;
 
        amdgpu_gmc_vram_location(adev, mc, base);
@@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-       if (gmc_v7_0_wait_for_idle((void *)adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
+
        if (adev->mode_info.num_crtc) {
                /* Lockout access through VGA aperture*/
                tmp = RREG32(mmVGA_HDP_CONTROL);
@@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (gmc_v7_0_wait_for_idle((void *)adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
 
        WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
@@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 
                /* Get VRAM informations */
                tmp = RREG32(mmMC_ARB_RAMCFG);
-               if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+               if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
                        chansize = 64;
-               } else {
+               else
                        chansize = 32;
-               }
+
                tmp = RREG32(mmMC_SHARED_CHMAP);
                switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
                case 0:
@@ -472,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 }
 
 static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                           unsigned vmid, uint64_t pd_addr)
+                                           unsigned int vmid, uint64_t pd_addr)
 {
        uint32_t reg;
 
@@ -488,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        return pd_addr;
 }
 
-static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
-                                       unsigned pasid)
+static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+                                       unsigned int pasid)
 {
        amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 }
@@ -700,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->gmc.gart_size >> 20),
+                (unsigned int)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
        return 0;
 }
@@ -761,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  * Print human readable fault information (CIK).
  */
 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
-                                    u32 addr, u32 mc_client, unsigned pasid)
+                                    u32 addr, u32 mc_client, unsigned int pasid)
 {
        u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
        u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -942,6 +941,7 @@ static int gmc_v7_0_early_init(void *handle)
                adev->gmc.shared_aperture_end + 1;
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+       adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 
        return 0;
 }
@@ -956,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle)
                return 0;
 }
 
-static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
                size = AMDGPU_VBIOS_VGA_ALLOCATION;
        } else {
                u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
                size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
                        REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
                        4);
@@ -984,6 +985,7 @@ static int gmc_v7_0_sw_init(void *handle)
                adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
        } else {
                u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
                tmp &= MC_SEQ_MISC0__MT__MASK;
                adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
        }
@@ -1152,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle)
 
 static int gmc_v7_0_wait_for_idle(void *handle)
 {
-       unsigned i;
+       unsigned int i;
        u32 tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -1190,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v7_0_mc_stop(adev);
-               if (gmc_v7_0_wait_for_idle((void *)adev)) {
+               if (gmc_v7_0_wait_for_idle((void *)adev))
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-               }
-
 
                tmp = RREG32(mmSRBM_SOFT_RESET);
                tmp |= srbm_soft_reset;
@@ -1219,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 
 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *src,
-                                            unsigned type,
+                                            unsigned int type,
                                             enum amdgpu_interrupt_state state)
 {
        u32 tmp;
@@ -1383,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
 }
 
-const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 7,
        .minor = 0,
@@ -1392,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
        .funcs = &gmc_v7_0_ip_funcs,
 };
 
-const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 7,
        .minor = 4,
index 85dead2a57021cf98ae6901465f781f3f4a13ffe..5af235202513227a962315be9cae7f5861d336e3 100644 (file)
@@ -64,8 +64,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
 
-static const u32 golden_settings_tonga_a11[] =
-{
+static const u32 golden_settings_tonga_a11[] = {
        mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
        mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
        mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
@@ -75,34 +74,29 @@ static const u32 golden_settings_tonga_a11[] =
        mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 };
 
-static const u32 tonga_mgcg_cgcg_init[] =
-{
+static const u32 tonga_mgcg_cgcg_init[] = {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_fiji_a10[] =
-{
+static const u32 golden_settings_fiji_a10[] = {
        mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
 };
 
-static const u32 fiji_mgcg_cgcg_init[] =
-{
+static const u32 fiji_mgcg_cgcg_init[] = {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_polaris11_a11[] =
-{
+static const u32 golden_settings_polaris11_a11[] = {
        mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 };
 
-static const u32 golden_settings_polaris10_a11[] =
-{
+static const u32 golden_settings_polaris10_a11[] = {
        mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
        mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
        mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -110,19 +104,16 @@ static const u32 golden_settings_polaris10_a11[] =
        mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
 };
 
-static const u32 cz_mgcg_cgcg_init[] =
-{
+static const u32 cz_mgcg_cgcg_init[] = {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 stoney_mgcg_cgcg_init[] =
-{
+static const u32 stoney_mgcg_cgcg_init[] = {
        mmATC_MISC_CG, 0xffffffff, 0x000c0200,
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_stoney_common[] =
-{
+static const u32 golden_settings_stoney_common[] = {
        mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
        mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
 };
@@ -260,7 +251,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_STONEY:
        case CHIP_VEGAM:
                return 0;
-       default: BUG();
+       default:
+               return -EINVAL;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
@@ -448,9 +440,9 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-       if (gmc_v8_0_wait_for_idle((void *)adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
+
        if (adev->mode_info.num_crtc) {
                /* Lockout access through VGA aperture*/
                tmp = RREG32(mmVGA_HDP_CONTROL);
@@ -483,9 +475,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (gmc_v8_0_wait_for_idle((void *)adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev))
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-       }
 
        WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
@@ -517,11 +508,11 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 
                /* Get VRAM informations */
                tmp = RREG32(mmMC_ARB_RAMCFG);
-               if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+               if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
                        chansize = 64;
-               } else {
+               else
                        chansize = 32;
-               }
+
                tmp = RREG32(mmMC_SHARED_CHMAP);
                switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
                case 0:
@@ -671,7 +662,7 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 }
 
 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                           unsigned vmid, uint64_t pd_addr)
+                                           unsigned int vmid, uint64_t pd_addr)
 {
        uint32_t reg;
 
@@ -687,8 +678,8 @@ static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        return pd_addr;
 }
 
-static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
-                                       unsigned pasid)
+static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+                                       unsigned int pasid)
 {
        amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
 }
@@ -759,11 +750,11 @@ static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v8_0_set_prt - set PRT VM fault
+ * gmc_v8_0_set_prt() - set PRT VM fault
  *
  * @adev: amdgpu_device pointer
  * @enable: enable/disable VM fault handling for PRT
-*/
+ */
 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 {
        u32 tmp;
@@ -940,7 +931,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 
        gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-                (unsigned)(adev->gmc.gart_size >> 20),
+                (unsigned int)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
        return 0;
 }
@@ -1001,7 +992,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  * Print human readable fault information (VI).
  */
 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
-                                    u32 addr, u32 mc_client, unsigned pasid)
+                                    u32 addr, u32 mc_client, unsigned int pasid)
 {
        u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
        u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1056,6 +1047,7 @@ static int gmc_v8_0_early_init(void *handle)
                adev->gmc.shared_aperture_end + 1;
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+       adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 
        return 0;
 }
@@ -1070,15 +1062,16 @@ static int gmc_v8_0_late_init(void *handle)
                return 0;
 }
 
-static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
                size = AMDGPU_VBIOS_VGA_ALLOCATION;
        } else {
                u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
                size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
                        REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
                        4);
@@ -1282,7 +1275,7 @@ static bool gmc_v8_0_is_idle(void *handle)
 
 static int gmc_v8_0_wait_for_idle(void *handle)
 {
-       unsigned i;
+       unsigned int i;
        u32 tmp;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -1318,13 +1311,15 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
                        srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
                                                        SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
        }
+
        if (srbm_soft_reset) {
                adev->gmc.srbm_soft_reset = srbm_soft_reset;
                return true;
-       } else {
-               adev->gmc.srbm_soft_reset = 0;
-               return false;
        }
+
+       adev->gmc.srbm_soft_reset = 0;
+
+       return false;
 }
 
 static int gmc_v8_0_pre_soft_reset(void *handle)
@@ -1335,9 +1330,8 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
                return 0;
 
        gmc_v8_0_mc_stop(adev);
-       if (gmc_v8_0_wait_for_idle(adev)) {
+       if (gmc_v8_0_wait_for_idle(adev))
                dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-       }
 
        return 0;
 }
@@ -1386,7 +1380,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
 
 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *src,
-                                            unsigned type,
+                                            unsigned int type,
                                             enum amdgpu_interrupt_state state)
 {
        u32 tmp;
@@ -1747,8 +1741,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
 }
 
-const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 8,
        .minor = 0,
@@ -1756,8 +1749,7 @@ const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
        .funcs = &gmc_v8_0_ip_funcs,
 };
 
-const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 8,
        .minor = 1,
@@ -1765,8 +1757,7 @@ const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
        .funcs = &gmc_v8_0_ip_funcs,
 };
 
-const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 8,
        .minor = 5,
index 67e669e0141cc0378a8620fb5f344b11ad368e6f..880460cd323980cb8c20f2cfa205ded89e5e9b41 100644 (file)
@@ -81,7 +81,7 @@
 
 #define MAX_MEM_RANGES 8
 
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
        "CB",
        "DB",
        "IA",
@@ -332,14 +332,12 @@ static const char *mmhub_client_ids_aldebaran[][2] = {
        [384+0][1] = "OSS",
 };
 
-static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
-{
+static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
        SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
        SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
 };
 
-static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
-{
+static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
        SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
 };
@@ -416,13 +414,14 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
 
 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
                struct amdgpu_irq_src *src,
-               unsigned type,
+               unsigned int type,
                enum amdgpu_interrupt_state state)
 {
        u32 bits, i, tmp, reg;
 
        /* Devices newer then VEGA10/12 shall have these programming
-            sequences performed by PSP BL */
+        * sequences performed by PSP BL
+        */
        if (adev->asic_type >= CHIP_VEGA20)
                return 0;
 
@@ -466,7 +465,7 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
 
 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *src,
-                                       unsigned type,
+                                       unsigned int type,
                                        enum amdgpu_interrupt_state state)
 {
        struct amdgpu_vmhub *hub;
@@ -631,8 +630,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
        amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
 
        dev_err(adev->dev,
-               "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
-               "pasid:%u, for process %s pid %d thread %s pid %d)\n",
+               "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
                hub_name, retry_fault ? "retry" : "no-retry",
                entry->src_id, entry->ring_id, entry->vmid,
                entry->pasid, task_info.process_name, task_info.tgid,
@@ -816,7 +814,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                                        uint32_t vmhub, uint32_t flush_type)
 {
        bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
-       const unsigned eng = 17;
+       const unsigned int eng = 17;
        u32 j, inv_req, inv_req2, tmp;
        struct amdgpu_vmhub *hub;
 
@@ -1033,13 +1031,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 }
 
 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
-                                           unsigned vmid, uint64_t pd_addr)
+                                           unsigned int vmid, uint64_t pd_addr)
 {
        bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
        uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
-       unsigned eng = ring->vm_inv_eng;
+       unsigned int eng = ring->vm_inv_eng;
 
        /*
         * It may lose gpuvm invalidate acknowldege state across power-gating
@@ -1081,8 +1079,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
        return pd_addr;
 }
 
-static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
-                                       unsigned pasid)
+static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+                                       unsigned int pasid)
 {
        struct amdgpu_device *adev = ring->adev;
        uint32_t reg;
@@ -1373,10 +1371,10 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
        }
 }
 
-static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
        u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
-       unsigned size;
+       unsigned int size;
 
        /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
 
@@ -1622,6 +1620,7 @@ static int gmc_v9_0_early_init(void *handle)
        adev->gmc.private_aperture_start = 0x1000000000000000ULL;
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+       adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
 
        return 0;
 }
@@ -2150,7 +2149,7 @@ static int gmc_v9_0_sw_init(void *handle)
        dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
        r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
        if (r) {
-               printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
                return r;
        }
        adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
@@ -2304,7 +2303,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
                return r;
 
        DRM_INFO("PCIE GART of %uM enabled.\n",
-                (unsigned)(adev->gmc.gart_size >> 20));
+                (unsigned int)(adev->gmc.gart_size >> 20));
        if (adev->gmc.pdb0_bo)
                DRM_INFO("PDB0 located at 0x%016llX\n",
                                (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
@@ -2490,8 +2489,7 @@ const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
        .get_clockgating_state = gmc_v9_0_get_clockgating_state,
 };
 
-const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GMC,
        .major = 9,
        .minor = 0,
index b02e1cef78a768ba8a7cbfe7d2d5196b49a29008..980b241200803b1f711405c1335ae559587ebce8 100644 (file)
@@ -535,7 +535,7 @@ static int ih_v6_0_sw_init(void *handle)
         * use bus address for ih ring by psp bl */
        use_bus_addr =
                (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
        if (r)
                return r;
 
@@ -548,7 +548,7 @@ static int ih_v6_0_sw_init(void *handle)
        /* initialize ih control register offset */
        ih_v6_0_init_register_offset(adev);
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
        if (r)
                return r;
 
index 36a123e6c8ee752e7b1633506743044f24abdfc2..eb06d749876f27cda8337a2ae1a3ab2a03d1be9c 100644 (file)
@@ -909,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
 
        /* prepare MQD backup */
        adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
-       if (!adev->mes.mqd_backup[pipe])
+       if (!adev->mes.mqd_backup[pipe]) {
                dev_warn(adev->dev,
                         "no memory to create MQD backup for ring %s\n",
                         ring->name);
+               return -ENOMEM;
+       }
 
        return 0;
 }
index 1bdaa00c0b466ff2aabfcf77ae41f781ad4a69ef..11fda318064fbc91a4c4b0e49ee627542ebc7b4c 100644 (file)
@@ -210,9 +210,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
        mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
        mes_add_queue_pkt.gds_size = input->queue_size;
 
-       /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
-       mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
-       mes_add_queue_pkt.gds_size = input->queue_size;
+       mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
 
        return mes_v11_0_submit_pkt_and_poll_completion(mes,
                        &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
@@ -1019,10 +1017,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
 
        /* prepare MQD backup */
        adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
-       if (!adev->mes.mqd_backup[pipe])
+       if (!adev->mes.mqd_backup[pipe]) {
                dev_warn(adev->dev,
                         "no memory to create MQD backup for ring %s\n",
                         ring->name);
+               return -ENOMEM;
+       }
 
        return 0;
 }
index eec13cb5bf75828e45c88c7715b0afb157d7605d..b6a8478dabf43cf29a428e6c0e42085fa5770001 100644 (file)
@@ -565,7 +565,7 @@ static int navi10_ih_sw_init(void *handle)
                use_bus_addr = false;
        else
                use_bus_addr = true;
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
        if (r)
                return r;
 
@@ -578,7 +578,7 @@ static int navi10_ih_sw_init(void *handle)
        /* initialize ih control registers offset */
        navi10_ih_init_register_offset(adev);
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
        if (r)
                return r;
 
index f9cb0d2c89d15bf2b36e72e73392ad446e929faa..d23827d3d8ccaad2941df86c07bc43fec7f62334 100644 (file)
@@ -49,6 +49,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
 
 /* For large FW files the time to complete can be very long */
 #define USBC_PD_POLLING_LIMIT_S 240
@@ -93,6 +96,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
        case IP_VERSION(13, 0, 5):
        case IP_VERSION(13, 0, 8):
        case IP_VERSION(13, 0, 11):
+       case IP_VERSION(14, 0, 0):
                err = psp_init_toc_microcode(psp, ucode_prefix);
                if (err)
                        return err;
index 5c4d4df9cf94ccb075c605ddf85ac714bd8df397..1cc34efb455bb83e8b5ca7dd730415b72ce962f5 100644 (file)
@@ -237,17 +237,15 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
 // emulation only, won't work on real chip
 // navi10 real chip need to use PSP to load firmware
 static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
-{      int ret, i;
-
-       if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
-               return 0;
+{
+       int ret, i;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ret = amdgpu_sdma_init_microcode(adev, i, false);
                if (ret)
                        return ret;
        }
-       
+
        return ret;
 }
 
index 96948a59f8dd556455d1f6ab0bd484285852b6e7..da683afa0222f188dda74788863fd82ec5a35bcd 100644 (file)
 #define SOC15_REG_OFFSET1(ip, inst, reg, offset) \
        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset))
 
-#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
+#define __WREG32_SOC15_RLC__(reg, value, flag, hwip, inst) \
        ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
-        amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \
+        amdgpu_sriov_wreg(adev, reg, value, flag, hwip, inst) : \
         WREG32(reg, value))
 
-#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
+#define __RREG32_SOC15_RLC__(reg, flag, hwip, inst) \
        ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
-        amdgpu_sriov_rreg(adev, reg, flag, hwip) : \
+        amdgpu_sriov_rreg(adev, reg, flag, hwip, inst) : \
         RREG32(reg))
 
 #define WREG32_FIELD15(ip, idx, reg, field, val)       \
         __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,   \
                                (__RREG32_SOC15_RLC__( \
                                        adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
-                                       0, ip##_HWIP) & \
+                                       0, ip##_HWIP, idx) & \
                                ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
-                             0, ip##_HWIP)
+                             0, ip##_HWIP, idx)
 
 #define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val)        \
        __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name,   \
                        (__RREG32_SOC15_RLC__( \
                                        adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
-                                       0, ip##_HWIP) & \
+                                       0, ip##_HWIP, idx) & \
                                        ~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \
-                       0, ip##_HWIP)
+                       0, ip##_HWIP, idx)
 
 #define RREG32_SOC15(ip, inst, reg) \
        __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
-                        0, ip##_HWIP)
+                        0, ip##_HWIP, inst)
 
-#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
+#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0)
 
-#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
 
 #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
        __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
-                        AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+                        AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
 
 #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
         __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \
-                        (offset), 0, ip##_HWIP)
+                        (offset), 0, ip##_HWIP, inst)
 
 #define WREG32_SOC15(ip, inst, reg, value) \
         __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
-                         value, 0, ip##_HWIP)
+                         value, 0, ip##_HWIP, inst)
 
 #define WREG32_SOC15_IP(ip, reg, value) \
-        __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
+        __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0)
 
 #define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
-        __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+        __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
 
 #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
        __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
-                            value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+                            value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
 
 #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
         __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
-                         value, 0, ip##_HWIP)
+                         value, 0, ip##_HWIP, inst)
 
 #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask)      \
        amdgpu_device_wait_on_rreg(adev, inst,                       \
        #reg, expected_value, mask)
 
 #define WREG32_RLC(reg, value) \
-       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP)
+       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP, 0)
 
-#define WREG32_RLC_EX(prefix, reg, value) \
+#define WREG32_RLC_EX(prefix, reg, value, inst) \
        do {                                                    \
                if (amdgpu_sriov_fullaccess(adev)) {    \
                        uint32_t i = 0; \
                        uint32_t retries = 50000;       \
-                       uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0;       \
-                       uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1;       \
-                       uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT;      \
+                       uint32_t r0 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0;    \
+                       uint32_t r1 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1;    \
+                       uint32_t spare_int = adev->reg_offset[GC_HWIP][inst][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT;   \
                        WREG32(r0, value);      \
                        WREG32(r1, (reg | 0x80000000)); \
                        WREG32(spare_int, 0x1); \
 
 /* shadow the registers in the callback function */
 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
-       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP)
+       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP, inst)
 
 /* for GC only */
 #define RREG32_RLC(reg) \
        __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
 
 #define WREG32_RLC_NO_KIQ(reg, value, hwip) \
-       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
+       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
 
 #define RREG32_RLC_NO_KIQ(reg, hwip) \
-       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
+       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
 
 #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
        do {                                                    \
        } while (0)
 
 #define RREG32_SOC15_RLC(ip, inst, reg) \
-       __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP)
+       __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP, inst)
 
 #define WREG32_SOC15_RLC(ip, inst, reg, value) \
        do {                                                    \
                uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
-               __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \
+               __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP, inst); \
        } while (0)
 
 #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
        do {                                                    \
                        uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\
-                       WREG32_RLC_EX(prefix, target_reg, value); \
+                       WREG32_RLC_EX(prefix, target_reg, value, inst); \
        } while (0)
 
 #define WREG32_FIELD15_RLC(ip, idx, reg, field, val)   \
        __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
                             (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
-                                                  AMDGPU_REGS_RLC, ip##_HWIP) & \
+                                                  AMDGPU_REGS_RLC, ip##_HWIP, idx) & \
                              ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
-                            AMDGPU_REGS_RLC, ip##_HWIP)
+                            AMDGPU_REGS_RLC, ip##_HWIP, idx)
 
 #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
-       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP)
+       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP, inst)
 
 #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
-       __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP)
+       __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst)
 
 /* inst equals to ext for some IPs */
 #define RREG32_SOC15_EXT(ip, inst, reg, ext) \
index c975aed2f6c7845f480e0a9b17db92a7110ba217..18794394c5a052b26ef009647616a8aba92efd9a 100644 (file)
@@ -881,9 +881,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
        if (indirect)
-               psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
-                                   (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
-                                              (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
+               amdgpu_vcn_psp_update_sram(adev, 0, 0);
 
        /* force RBC into idle state */
        rb_bufsz = order_base_2(ring->ring_size);
index bb1875f926f19d78d16a08538df7232ed0f1c5be..6fbea38f4d3e1bd3caeddcff9527f47c281adfc5 100644 (file)
@@ -912,9 +912,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
        if (indirect)
-               psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
-                                   (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
-                                              (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+               amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
 
        ring = &adev->vcn.inst[inst_idx].ring_dec;
        /* force RBC into idle state */
index c8f63b3c6f69d945388187d41c65482686e3b354..b76ba21b5a896c9a5ca8614c73b685064527b0ff 100644 (file)
@@ -1037,9 +1037,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
                VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
 
        if (indirect)
-               psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
-                       (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
-                               (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+               amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
 
        ring = &adev->vcn.inst[inst_idx].ring_dec;
        /* force RBC into idle state */
index 259795098173ac5cb7800959536bb51e1342abeb..6089c7deba8ac810762c833ed49ade0e1eb80998 100644 (file)
@@ -169,6 +169,12 @@ static int vcn_v4_0_sw_init(void *handle)
                fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
                        AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
 
+               if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) {
+                       fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+                       fw_shared->drm_key_wa.method =
+                               AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+               }
+
                if (amdgpu_sriov_vf(adev))
                        fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
 
@@ -993,9 +999,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
 
 
        if (indirect)
-               psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
-                       (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
-                               (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+               amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
 
        ring = &adev->vcn.inst[inst_idx].ring_enc[0];
 
@@ -1800,7 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
        return 0;
 }
 
-static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
+static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_ENC,
        .align_mask = 0x3f,
        .nop = VCN_ENC_CMD_NO_OP,
@@ -1845,7 +1849,11 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
-               adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs;
+               if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2))
+                       vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;
+
+               adev->vcn.inst[i].ring_enc[0].funcs =
+                      (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs;
                adev->vcn.inst[i].ring_enc[0].me = i;
 
                DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
index 5d67b8b8a3d6bc03a566c62dd56cb9b785df3dc7..550ac040b4be85b06af2bebe5d556ea413fc5645 100644 (file)
@@ -778,9 +778,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
 
        if (indirect)
-               psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
-                       (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
-                               (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+               amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
 
        ring = &adev->vcn.inst[inst_idx].ring_enc[0];
 
index 1e83db0c5438d0c07b27bf1540ad6b4bdf2bc61e..d364c6dd152c33b7fc1fbc614668b2dd4ffe223a 100644 (file)
@@ -485,7 +485,7 @@ static int vega10_ih_sw_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, true);
        if (r)
                return r;
 
@@ -510,7 +510,7 @@ static int vega10_ih_sw_init(void *handle)
        /* initialize ih control registers offset */
        vega10_ih_init_register_offset(adev);
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
        if (r)
                return r;
 
index 4d719df376a7224dba72c2cf880cd18e5e8d2044..544ee55a22da662b17b53b49ecee926f41d5f71f 100644 (file)
@@ -539,7 +539,7 @@ static int vega20_ih_sw_init(void *handle)
            (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)))
                use_bus_addr = false;
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
        if (r)
                return r;
 
@@ -565,7 +565,7 @@ static int vega20_ih_sw_init(void *handle)
        /* initialize ih control registers offset */
        vega20_ih_init_register_offset(adev);
 
-       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, use_bus_addr);
+       r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr);
        if (r)
                return r;
 
index 6a27b000a246e7d04dca0a8d42b92a5a0ff96d8e..aef8e12df61f6689c684acf51620ef997533183c 100644 (file)
@@ -1487,7 +1487,8 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
                goto out_unlock;
        }
 
-       if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) {
+       if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
+                                     kfd_dbg_has_cwsr_workaround(dev))) {
                retval = -EBUSY;
                goto out_unlock;
        }
@@ -1845,22 +1846,21 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
                idr_for_each_entry(&pdd->alloc_idr, mem, id) {
                        struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
 
-                       if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
+                       if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
                                num_of_bos++;
                }
        }
        return num_of_bos;
 }
 
-static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
+static int criu_get_prime_handle(struct kgd_mem *mem, int flags,
                                      u32 *shared_fd)
 {
        struct dma_buf *dmabuf;
        int ret;
 
-       dmabuf = amdgpu_gem_prime_export(gobj, flags);
-       if (IS_ERR(dmabuf)) {
-               ret = PTR_ERR(dmabuf);
+       ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
+       if (ret) {
                pr_err("dmabuf export failed for the BO\n");
                return ret;
        }
@@ -1918,7 +1918,11 @@ static int criu_checkpoint_bos(struct kfd_process *p,
                        kgd_mem = (struct kgd_mem *)mem;
                        dumper_bo = kgd_mem->bo;
 
-                       if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
+                       /* Skip checkpointing BOs that are used for Trap handler
+                        * code and state. Currently, these BOs have a VA that
+                        * is less GPUVM Base
+                        */
+                       if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
                                continue;
 
                        bo_bucket = &bo_buckets[bo_index];
@@ -1940,7 +1944,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
                        }
                        if (bo_bucket->alloc_flags
                            & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
-                               ret = criu_get_prime_handle(&dumper_bo->tbo.base,
+                               ret = criu_get_prime_handle(kgd_mem,
                                                bo_bucket->alloc_flags &
                                                KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
                                                &bo_bucket->dmabuf_fd);
@@ -2402,7 +2406,7 @@ static int criu_restore_bo(struct kfd_process *p,
        /* create the dmabuf object and export the bo */
        if (bo_bucket->alloc_flags
            & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
-               ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
+               ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
                                            &bo_bucket->dmabuf_fd);
                if (ret)
                        return ret;
@@ -2755,6 +2759,16 @@ static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
 
                if (pdd->qpd.queue_count)
                        return -EEXIST;
+
+               /*
+                * Setup TTMPs by default.
+                * Note that this call must remain here for MES ADD QUEUE to
+                * skip_process_ctx_clear unconditionally as the first call to
+                * SET_SHADER_DEBUGGER clears any stale process context data
+                * saved in MES.
+                */
+               if (pdd->dev->kfd->shared_resources.enable_mes)
+                       kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
        }
 
        p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
@@ -2848,7 +2862,8 @@ static int runtime_disable(struct kfd_process *p)
                        if (!pdd->dev->kfd->shared_resources.enable_mes)
                                debug_refresh_runlist(pdd->dev->dqm);
                        else
-                               kfd_dbg_set_mes_debug_mode(pdd);
+                               kfd_dbg_set_mes_debug_mode(pdd,
+                                                          !kfd_dbg_has_cwsr_workaround(pdd->dev));
                }
        }
 
index fff3ccc04fa94f13042e1ddcb69c1ee7c709bc7e..9ec750666382fe9bfebbc7708144f67a3fe480e6 100644 (file)
@@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
        if (!q)
                return 0;
 
-       if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
-           KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0))
+       if (!kfd_dbg_has_cwsr_workaround(q->device))
                return 0;
 
        if (enable && q->properties.is_user_cu_masked)
@@ -345,11 +344,10 @@ unwind:
        return r;
 }
 
-int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd)
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
 {
        uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
        uint32_t flags = pdd->process->dbg_flags;
-       bool sq_trap_en = !!spi_dbg_cntl;
 
        if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
                return 0;
@@ -433,7 +431,7 @@ int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
        if (!pdd->dev->kfd->shared_resources.enable_mes)
                r = debug_map_and_unlock(pdd->dev->dqm);
        else
-               r = kfd_dbg_set_mes_debug_mode(pdd);
+               r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
        kfd_dbg_clear_dev_watch_id(pdd, watch_id);
 
@@ -446,7 +444,8 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
                                        uint32_t *watch_id,
                                        uint32_t watch_mode)
 {
-       int r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
+       int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
+       uint32_t xcc_mask = pdd->dev->xcc_mask;
 
        if (r)
                return r;
@@ -460,19 +459,21 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
        }
 
        amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
-       pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
+       for_each_inst(xcc_id, xcc_mask)
+               pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
                                pdd->dev->adev,
                                watch_address,
                                watch_address_mask,
                                *watch_id,
                                watch_mode,
-                               pdd->dev->vm_info.last_vmid_kfd);
+                               pdd->dev->vm_info.last_vmid_kfd,
+                               xcc_id);
        amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
 
        if (!pdd->dev->kfd->shared_resources.enable_mes)
                r = debug_map_and_unlock(pdd->dev->dqm);
        else
-               r = kfd_dbg_set_mes_debug_mode(pdd);
+               r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
        /* HWS is broken so no point in HW rollback but release the watchpoint anyways */
        if (r)
@@ -514,7 +515,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
                if (!pdd->dev->kfd->shared_resources.enable_mes)
                        r = debug_refresh_runlist(pdd->dev->dqm);
                else
-                       r = kfd_dbg_set_mes_debug_mode(pdd);
+                       r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
                if (r) {
                        target->dbg_flags = prev_flags;
@@ -537,7 +538,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
                        if (!pdd->dev->kfd->shared_resources.enable_mes)
                                debug_refresh_runlist(pdd->dev->dqm);
                        else
-                               kfd_dbg_set_mes_debug_mode(pdd);
+                               kfd_dbg_set_mes_debug_mode(pdd, true);
                }
        }
 
@@ -599,7 +600,7 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind
                if (!pdd->dev->kfd->shared_resources.enable_mes)
                        debug_refresh_runlist(pdd->dev->dqm);
                else
-                       kfd_dbg_set_mes_debug_mode(pdd);
+                       kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
        }
 
        kfd_dbg_set_workaround(target, false);
@@ -715,7 +716,7 @@ int kfd_dbg_trap_activate(struct kfd_process *target)
                if (!pdd->dev->kfd->shared_resources.enable_mes)
                        r = debug_refresh_runlist(pdd->dev->dqm);
                else
-                       r = kfd_dbg_set_mes_debug_mode(pdd);
+                       r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
                if (r) {
                        target->runtime_info.runtime_state =
@@ -751,7 +752,8 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
                if (!KFD_IS_SOC15(pdd->dev))
                        return -ENODEV;
 
-               if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws)
+               if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) ||
+                                        kfd_dbg_has_cwsr_workaround(pdd->dev)))
                        return -EBUSY;
        }
 
@@ -848,7 +850,7 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
                if (!pdd->dev->kfd->shared_resources.enable_mes)
                        r = debug_refresh_runlist(pdd->dev->dqm);
                else
-                       r = kfd_dbg_set_mes_debug_mode(pdd);
+                       r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
                if (r)
                        break;
@@ -880,7 +882,7 @@ int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
                if (!pdd->dev->kfd->shared_resources.enable_mes)
                        r = debug_refresh_runlist(pdd->dev->dqm);
                else
-                       r = kfd_dbg_set_mes_debug_mode(pdd);
+                       r = kfd_dbg_set_mes_debug_mode(pdd, true);
 
                if (r)
                        break;
index a289e59ceb797525be1879a8366cdcb2a09a0fbd..fd0ff64d4184a63094ccb6ad257451c84ce3bde8 100644 (file)
@@ -76,8 +76,9 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
 
 static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
 {
-       return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
-              KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0);
+       return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+               KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
+               KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
 }
 
 void debug_event_write_work_handler(struct work_struct *work);
@@ -100,6 +101,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
                 KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
 }
 
+static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
+{
+       return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+              KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
+}
+
 static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
 {
        if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
@@ -119,5 +126,14 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
        return true;
 }
 
-int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd);
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en);
+
+static inline bool kfd_dbg_has_ttmps_always_setup(struct kfd_node *dev)
+{
+       return (KFD_GC_VERSION(dev) < IP_VERSION(11, 0, 0) &&
+                       KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 2)) ||
+              (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+                       KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0) &&
+                       (dev->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 70);
+}
 #endif
index 0b3dc754e06ba264bf25835f33c7607a6fae4dfe..ebc9674d3ce133921898e0b871abccab617f19f5 100644 (file)
@@ -508,6 +508,7 @@ static int kfd_gws_init(struct kfd_node *node)
 {
        int ret = 0;
        struct kfd_dev *kfd = node->kfd;
+       uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
 
        if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
                return 0;
@@ -524,7 +525,10 @@ static int kfd_gws_init(struct kfd_node *node)
                (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
                (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
                        && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
-                       && kfd->mec2_fw_version >= 0x6b))))
+                       && kfd->mec2_fw_version >= 0x6b) ||
+               (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
+                       && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
+                       && mes_rev >= 68))))
                ret = amdgpu_amdkfd_alloc_gws(node->adev,
                                node->adev->gds.gws_size, &node->gws);
 
index f515cb8f30caf01858901a4138dcfc52ca663fda..ccaf85fc12c25b4a562aeec59c8a2abbeab85486 100644 (file)
@@ -226,9 +226,10 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
        queue_input.paging = false;
        queue_input.tba_addr = qpd->tba_addr;
        queue_input.tma_addr = qpd->tma_addr;
-       queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
-                             KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3);
+       queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
        queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;
+       queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled ||
+                                            kfd_dbg_has_ttmps_always_setup(q->device);
 
        queue_type = convert_to_mes_queue_type(q->properties.type);
        if (queue_type < 0) {
@@ -238,10 +239,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
        }
        queue_input.queue_type = (uint32_t)queue_type;
 
-       if (q->gws) {
-               queue_input.gws_base = 0;
-               queue_input.gws_size = qpd->num_gws;
-       }
+       queue_input.exclusively_scheduled = q->properties.is_gws;
 
        amdgpu_mes_lock(&adev->mes);
        r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
@@ -251,7 +249,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
                        q->properties.doorbell_off);
                pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
                kfd_hws_hang(dqm);
-}
+       }
 
        return r;
 }
@@ -1621,7 +1619,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 
        if (dqm->dev->kfd2kgd->get_iq_wait_times)
                dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
-                                       &dqm->wait_times);
+                                       &dqm->wait_times,
+                                       ffs(dqm->dev->xcc_mask) - 1);
        return 0;
 }
 
@@ -1663,6 +1662,26 @@ static int start_cpsch(struct device_queue_manager *dqm)
 
        if (!dqm->dev->kfd->shared_resources.enable_mes)
                execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
+
+       /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
+       if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
+           (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
+               uint32_t reg_offset = 0;
+               uint32_t grace_period = 1;
+
+               retval = pm_update_grace_period(&dqm->packet_mgr,
+                                               grace_period);
+               if (retval)
+                       pr_err("Setting grace timeout failed\n");
+               else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
+                       /* Update dqm->wait_times maintained in software */
+                       dqm->dev->kfd2kgd->build_grace_period_packet_info(
+                                       dqm->dev->adev, dqm->wait_times,
+                                       grace_period, &reg_offset,
+                                       &dqm->wait_times,
+                                       ffs(dqm->dev->xcc_mask) - 1);
+       }
+
        dqm_unlock(dqm);
 
        return 0;
@@ -1806,8 +1825,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
         */
        q->properties.is_evicted = !!qpd->evicted;
        q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
-                       KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) &&
-                       KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3);
+                                 kfd_dbg_has_cwsr_workaround(q->device);
 
        if (qd)
                mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
index 94c0fc2e57b7f8e6afafe438e1d75ee4f05eae75..83699392c8089cd12d8bafa274c72e6e0d8bcca3 100644 (file)
@@ -318,6 +318,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 }
 
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+                       enum kfd_preempt_type type, unsigned int timeout,
+                       uint32_t pipe_id, uint32_t queue_id)
+{
+       int err;
+       struct v10_compute_mqd *m;
+       u32 doorbell_off;
+
+       m = get_mqd(mqd);
+
+       doorbell_off = m->cp_hqd_pq_doorbell_control >>
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+       err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+       if (err)
+               pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+       return err;
+}
+
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
                struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                struct queue_properties *q)
@@ -460,7 +480,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
                mqd->free_mqd = free_mqd_hiq_sdma;
                mqd->load_mqd = kfd_hiq_load_mqd_kiq;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->destroy_mqd = destroy_hiq_mqd;
                mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v10_compute_mqd);
                mqd->mqd_stride = kfd_mqd_stride;
index 31fec5e70d13523a36781a8f78b0d32df5b2b537..2319467d2d9575cb365055a4ee4fab83f3cfda3a 100644 (file)
@@ -335,6 +335,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 }
 
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+                       enum kfd_preempt_type type, unsigned int timeout,
+                       uint32_t pipe_id, uint32_t queue_id)
+{
+       int err;
+       struct v11_compute_mqd *m;
+       u32 doorbell_off;
+
+       m = get_mqd(mqd);
+
+       doorbell_off = m->cp_hqd_pq_doorbell_control >>
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+       err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+       if (err)
+               pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+       return err;
+}
+
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
                struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                struct queue_properties *q)
@@ -449,7 +469,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
                mqd->free_mqd = free_mqd_hiq_sdma;
                mqd->load_mqd = kfd_hiq_load_mqd_kiq;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->destroy_mqd = destroy_hiq_mqd;
                mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v11_compute_mqd);
 #if defined(CONFIG_DEBUG_FS)
index 601bb9f68048ccfc13f7cc5dcab641e7ca727e73..e23d32f356077225f06b76008b8b75cc6ab22a87 100644 (file)
@@ -405,6 +405,25 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 }
 
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+                       enum kfd_preempt_type type, unsigned int timeout,
+                       uint32_t pipe_id, uint32_t queue_id)
+{
+       int err;
+       struct v9_mqd *m;
+       u32 doorbell_off;
+
+       m = get_mqd(mqd);
+
+       doorbell_off = m->cp_hqd_pq_doorbell_control >>
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+       err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+       if (err)
+               pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+       return err;
+}
+
 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
                struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                struct queue_properties *q)
@@ -548,16 +567,19 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
 {
        uint32_t xcc_mask = mm->dev->xcc_mask;
        int xcc_id, err, inst = 0;
-       void *xcc_mqd;
        uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+       struct v9_mqd *m;
+       u32 doorbell_off;
 
        for_each_inst(xcc_id, xcc_mask) {
-               xcc_mqd = mqd + hiq_mqd_size * inst;
-               err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
-                                                   type, timeout, pipe_id,
-                                                   queue_id, xcc_id);
+               m = get_mqd(mqd + hiq_mqd_size * inst);
+
+               doorbell_off = m->cp_hqd_pq_doorbell_control >>
+                               CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+               err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
                if (err) {
-                       pr_debug("Destroy MQD failed for xcc: %d\n", inst);
+                       pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst);
                        break;
                }
                ++inst;
@@ -846,7 +868,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
                } else {
                        mqd->init_mqd = init_mqd_hiq;
                        mqd->load_mqd = kfd_hiq_load_mqd_kiq;
-                       mqd->destroy_mqd = kfd_destroy_mqd_cp;
+                       mqd->destroy_mqd = destroy_hiq_mqd;
                }
                break;
        case KFD_MQD_TYPE_DIQ:
index 29a2d0499b674f293127830f69a238be599061ba..8fda16e6fee6cb925ad5ada0c7b32a7da9aab667 100644 (file)
@@ -298,7 +298,8 @@ static int pm_set_grace_period_v9(struct packet_manager *pm,
                        pm->dqm->wait_times,
                        grace_period,
                        &reg_offset,
-                       &reg_data);
+                       &reg_data,
+                       0);
 
        if (grace_period == USE_DEFAULT_GRACE_PERIOD)
                reg_data = pm->dqm->wait_times;
index ba9d690541193a2579c4a2aefd9e76ddc0073f11..60e6b37b43badaab41ad0f63ac9263a5798c6071 100644 (file)
@@ -123,7 +123,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
        if (!gws && pdd->qpd.num_gws == 0)
                return -EINVAL;
 
-       if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) {
+       if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) {
                if (gws)
                        ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
                                gws, &mem);
@@ -136,7 +136,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
        } else {
                /*
                 * Intentionally set GWS to a non-NULL value
-                * for GFX 9.4.3.
+                * for devices that do not use GWS for global wave
+                * synchronization but require the formality
+                * of setting GWS for cooperative groups.
                 */
                pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
        }
@@ -173,7 +175,8 @@ void pqm_uninit(struct process_queue_manager *pqm)
 
        list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
                if (pqn->q && pqn->q->gws &&
-                   KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
+                   KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+                   !pqn->q->device->kfd->shared_resources.enable_mes)
                        amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
                                pqn->q->gws);
                kfd_procfs_del_queue(pqn->q);
@@ -455,7 +458,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
                }
 
                if (pqn->q->gws) {
-                       if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
+                       if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+                           !dev->kfd->shared_resources.enable_mes)
                                amdgpu_amdkfd_remove_gws_from_process(
                                                pqm->process->kgd_process_info,
                                                pqn->q->gws);
index 5ff1a5a89d96818d643e625b9ab594b8a742db32..01c7de2d6e1991619bd4d26b9e5e391e97ee5a40 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/types.h>
 #include <linux/sched/task.h>
 #include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
+
 #include "amdgpu_sync.h"
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
@@ -46,6 +48,8 @@
  * page table is updated.
  */
 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING   (2UL * NSEC_PER_MSEC)
+#define dynamic_svm_range_dump(svms) \
+       _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
 
 /* Giant svm range split into smaller ranges based on this, it is decided using
  * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
@@ -1455,37 +1459,34 @@ struct svm_validate_context {
        struct svm_range *prange;
        bool intr;
        DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
-       struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
-       struct list_head validate_list;
-       struct ww_acquire_ctx ticket;
+       struct drm_exec exec;
 };
 
-static int svm_range_reserve_bos(struct svm_validate_context *ctx)
+static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
 {
        struct kfd_process_device *pdd;
        struct amdgpu_vm *vm;
        uint32_t gpuidx;
        int r;
 
-       INIT_LIST_HEAD(&ctx->validate_list);
-       for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
-               pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
-               if (!pdd) {
-                       pr_debug("failed to find device idx %d\n", gpuidx);
-                       return -EINVAL;
-               }
-               vm = drm_priv_to_vm(pdd->drm_priv);
-
-               ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
-               ctx->tv[gpuidx].num_shared = 4;
-               list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
-       }
+       drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
+       drm_exec_until_all_locked(&ctx->exec) {
+               for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
+                       pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
+                       if (!pdd) {
+                               pr_debug("failed to find device idx %d\n", gpuidx);
+                               r = -EINVAL;
+                               goto unreserve_out;
+                       }
+                       vm = drm_priv_to_vm(pdd->drm_priv);
 
-       r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
-                                  ctx->intr, NULL);
-       if (r) {
-               pr_debug("failed %d to reserve bo\n", r);
-               return r;
+                       r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
+                       drm_exec_retry_on_contention(&ctx->exec);
+                       if (unlikely(r)) {
+                               pr_debug("failed %d to reserve bo\n", r);
+                               goto unreserve_out;
+                       }
+               }
        }
 
        for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
@@ -1508,13 +1509,13 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
        return 0;
 
 unreserve_out:
-       ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
+       drm_exec_fini(&ctx->exec);
        return r;
 }
 
 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
 {
-       ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
+       drm_exec_fini(&ctx->exec);
 }
 
 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
@@ -1522,6 +1523,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
        struct kfd_process_device *pdd;
 
        pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+       if (!pdd)
+               return NULL;
 
        return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
 }
@@ -1596,12 +1599,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
        }
 
        if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
-               if (!prange->mapped_to_gpu) {
+               bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+               if (!prange->mapped_to_gpu ||
+                   bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
                        r = 0;
                        goto free_ctx;
                }
-
-               bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
        }
 
        if (prange->actual_loc && !prange->ttm_res) {
@@ -1613,7 +1616,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                goto free_ctx;
        }
 
-       svm_range_reserve_bos(ctx);
+       svm_range_reserve_bos(ctx, intr);
 
        p = container_of(prange->svms, struct kfd_process, svms);
        owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
@@ -3561,7 +3564,7 @@ out_unlock_range:
                        break;
        }
 
-       svm_range_debug_dump(svms);
+       dynamic_svm_range_dump(svms);
 
        mutex_unlock(&svms->lock);
        mmap_read_unlock(mm);
index 61fc62f3e0034a1b129ddb4ad298cb2667fa556d..3b07493903885650cd4042dfea4ce856edb143f2 100644 (file)
@@ -38,6 +38,7 @@
 #include "kfd_device_queue_manager.h"
 #include "kfd_iommu.h"
 #include "kfd_svm.h"
+#include "kfd_debug.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_ras.h"
 #include "amdgpu.h"
@@ -1931,23 +1932,27 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
                        HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
                        HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
 
-       if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
-               dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
-                                               HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+       if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
+               dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
 
-               if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2))
+       if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
+               if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3))
                        dev->node_props.debug_prop |=
-                               HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+                               HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 |
+                               HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3;
                else
+                       dev->node_props.debug_prop |=
+                               HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
+                               HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+
+               if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2))
                        dev->node_props.capability |=
                                HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
        } else {
                dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
                                        HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
 
-               if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0))
-                       dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
-               else
+               if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
                        dev->node_props.capability |=
                                HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
        }
index cba2cd5ed9d19cbda41dcfd07cd0eb44ed8f9e48..dea32a9e55060e8deb983e888290b19cdf58e2fd 100644 (file)
 #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
 
 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9    6
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 7
 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10   7
 #define HSA_DBG_WATCH_ADDR_MASK_HI_BIT  \
                        (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT)
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3 \
+                       (30 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT)
 
 struct kfd_node_properties {
        uint64_t hive_id;
index bf0a655d009e60e78235ca542706ca3f23440df9..901d1961b73927c7662fb1761cbfb36cf2ffaa7c 100644 (file)
@@ -5,7 +5,7 @@ menu "Display Engine Configuration"
 config DRM_AMD_DC
        bool "AMD DC - Enable new display engine"
        default y
-       depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
+       depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
        select SND_HDA_COMPONENT if SND_HDA_CORE
        # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
        select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
index ff0a217b9d567b8cb768e7d9e42f2c663e3536dd..d96b6eda332090585b687219ede673fb851432e0 100644 (file)
@@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
  */
 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 {
+       struct amdgpu_crtc *acrtc = NULL;
+
        if (crtc >= adev->mode_info.num_crtc)
                return 0;
-       else {
-               struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 
-               if (acrtc->dm_irq_params.stream == NULL) {
-                       DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
-                                 crtc);
-                       return 0;
-               }
+       acrtc = adev->mode_info.crtcs[crtc];
 
-               return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+       if (!acrtc->dm_irq_params.stream) {
+               DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+                         crtc);
+               return 0;
        }
+
+       return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
 }
 
 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                                  u32 *vbl, u32 *position)
 {
        u32 v_blank_start, v_blank_end, h_position, v_position;
+       struct amdgpu_crtc *acrtc = NULL;
 
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
-       else {
-               struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 
-               if (acrtc->dm_irq_params.stream ==  NULL) {
-                       DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
-                                 crtc);
-                       return 0;
-               }
-
-               /*
-                * TODO rework base driver to use values directly.
-                * for now parse it back into reg-format
-                */
-               dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
-                                        &v_blank_start,
-                                        &v_blank_end,
-                                        &h_position,
-                                        &v_position);
+       acrtc = adev->mode_info.crtcs[crtc];
 
-               *position = v_position | (h_position << 16);
-               *vbl = v_blank_start | (v_blank_end << 16);
+       if (!acrtc->dm_irq_params.stream) {
+               DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+                         crtc);
+               return 0;
        }
 
+       /*
+        * TODO rework base driver to use values directly.
+        * for now parse it back into reg-format
+        */
+       dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+                                &v_blank_start,
+                                &v_blank_end,
+                                &h_position,
+                                &v_position);
+
+       *position = v_position | (h_position << 16);
+       *vbl = v_blank_start | (v_blank_end << 16);
+
        return 0;
 }
 
@@ -424,12 +425,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
-       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
-               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
-                                                amdgpu_crtc->pflip_status,
-                                                AMDGPU_FLIP_SUBMITTED,
-                                                amdgpu_crtc->crtc_id,
-                                                amdgpu_crtc);
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+                            amdgpu_crtc->pflip_status,
+                            AMDGPU_FLIP_SUBMITTED,
+                            amdgpu_crtc->crtc_id,
+                            amdgpu_crtc);
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return;
        }
@@ -883,7 +884,7 @@ static int dm_set_powergating_state(void *handle,
 }
 
 /* Prototypes of private functions */
-static int dm_early_init(voidhandle);
+static int dm_early_init(void *handle);
 
 /* Allocate memory for FBC compressed data  */
 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1282,7 +1283,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
        pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
 
-       pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+       pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
        pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
        pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 
@@ -1347,6 +1348,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        if (amdgpu_in_reset(adev))
                goto skip;
 
+       if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+               offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+               dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+               spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+               offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+               spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+               goto skip;
+       }
+
        mutex_lock(&adev->dm.dc_lock);
        if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
                dc_link_dp_handle_automated_test(dc_link);
@@ -1365,8 +1375,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
                DP_TEST_RESPONSE,
                &test_response.raw,
                sizeof(test_response));
-       }
-       else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+       } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
                        dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
                        dc_link_dp_allow_hpd_rx_irq(dc_link)) {
                /* offload_work->data is from handle_hpd_rx_irq->
@@ -1554,7 +1563,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
 
-       if(amdgpu_dm_irq_init(adev)) {
+       if (amdgpu_dm_irq_init(adev)) {
                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
                goto error;
        }
@@ -1696,9 +1705,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
                adev->dm.dc->debug.disable_stutter = true;
 
-       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
                adev->dm.dc->debug.disable_dsc = true;
-       }
 
        if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
                adev->dm.dc->debug.disable_clock_gate = true;
@@ -1942,8 +1950,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        mutex_destroy(&adev->dm.audio_lock);
        mutex_destroy(&adev->dm.dc_lock);
        mutex_destroy(&adev->dm.dpia_aux_lock);
-
-       return;
 }
 
 static int load_dmcu_fw(struct amdgpu_device *adev)
@@ -1952,7 +1958,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
        int r;
        const struct dmcu_firmware_header_v1_0 *hdr;
 
-       switch(adev->asic_type) {
+       switch (adev->asic_type) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
@@ -2709,7 +2715,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
                struct dc_scaling_info scaling_infos[MAX_SURFACES];
                struct dc_flip_addrs flip_addrs[MAX_SURFACES];
                struct dc_stream_update stream_update;
-       } * bundle;
+       } *bundle;
        int k, m;
 
        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@@ -2739,8 +2745,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
 
 cleanup:
        kfree(bundle);
-
-       return;
 }
 
 static int dm_resume(void *handle)
@@ -2954,8 +2958,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
        .set_powergating_state = dm_set_powergating_state,
 };
 
-const struct amdgpu_ip_block_version dm_ip_block =
-{
+const struct amdgpu_ip_block_version dm_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_DCE,
        .major = 1,
        .minor = 0,
@@ -3000,9 +3003,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
        caps->aux_support = false;
 
-       if (caps->ext_caps->bits.oled == 1 /*||
-           caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
-           caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
+       if (caps->ext_caps->bits.oled == 1
+           /*
+            * ||
+            * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+            * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+            */)
                caps->aux_support = true;
 
        if (amdgpu_backlight == 0)
@@ -3236,86 +3242,6 @@ static void handle_hpd_irq(void *param)
 
 }
 
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
-       u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
-       u8 dret;
-       bool new_irq_handled = false;
-       int dpcd_addr;
-       int dpcd_bytes_to_read;
-
-       const int max_process_count = 30;
-       int process_count = 0;
-
-       const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
-       if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
-               dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
-               /* DPCD 0x200 - 0x201 for downstream IRQ */
-               dpcd_addr = DP_SINK_COUNT;
-       } else {
-               dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
-               /* DPCD 0x2002 - 0x2005 for downstream IRQ */
-               dpcd_addr = DP_SINK_COUNT_ESI;
-       }
-
-       dret = drm_dp_dpcd_read(
-               &aconnector->dm_dp_aux.aux,
-               dpcd_addr,
-               esi,
-               dpcd_bytes_to_read);
-
-       while (dret == dpcd_bytes_to_read &&
-               process_count < max_process_count) {
-               u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
-               u8 retry;
-               dret = 0;
-
-               process_count++;
-
-               DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
-               /* handle HPD short pulse irq */
-               if (aconnector->mst_mgr.mst_state)
-                       drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
-                                                       esi,
-                                                       ack,
-                                                       &new_irq_handled);
-
-               if (new_irq_handled) {
-                       /* ACK at DPCD to notify down stream */
-                       for (retry = 0; retry < 3; retry++) {
-                               ssize_t wret;
-
-                               wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
-                                                         dpcd_addr + 1,
-                                                         ack[1]);
-                               if (wret == 1)
-                                       break;
-                       }
-
-                       if (retry == 3) {
-                               DRM_ERROR("Failed to ack MST event.\n");
-                               return;
-                       }
-
-                       drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
-                       /* check if there is new irq to be handled */
-                       dret = drm_dp_dpcd_read(
-                               &aconnector->dm_dp_aux.aux,
-                               dpcd_addr,
-                               esi,
-                               dpcd_bytes_to_read);
-
-                       new_irq_handled = false;
-               } else {
-                       break;
-               }
-       }
-
-       if (process_count == max_process_count)
-               DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
                                                        union hpd_irq_data hpd_irq_data)
 {
@@ -3377,7 +3303,23 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
                if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
                        hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
-                       dm_handle_mst_sideband_msg(aconnector);
+                       bool skip = false;
+
+                       /*
+                        * DOWN_REP_MSG_RDY is also handled by polling method
+                        * mgr->cbs->poll_hpd_irq()
+                        */
+                       spin_lock(&offload_wq->offload_lock);
+                       skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+                       if (!skip)
+                               offload_wq->is_handling_mst_msg_rdy_event = true;
+
+                       spin_unlock(&offload_wq->offload_lock);
+
+                       if (!skip)
+                               schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
                        goto out;
                }
 
@@ -3468,7 +3410,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                aconnector = to_amdgpu_dm_connector(connector);
                dc_link = aconnector->dc_link;
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+               if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
                        int_params.irq_source = dc_link->irq_source_hpd;
 
@@ -3477,7 +3419,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                                        (void *) aconnector);
                }
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+               if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
 
                        /* Also register for DP short pulse (hpd_rx). */
                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@@ -3486,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
                                        handle_hpd_rx_irq,
                                        (void *) aconnector);
-
-                       if (adev->dm.hpd_rx_offload_wq)
-                               adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
-                                       aconnector;
                }
+
+               if (adev->dm.hpd_rx_offload_wq)
+                       adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                               aconnector;
        }
 }
 
@@ -3503,7 +3445,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
-       unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+       unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 
        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3517,11 +3459,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
         *    coming from DC hardware.
         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
-        *    for acknowledging and handling. */
+        *    for acknowledging and handling.
+        */
 
        /* Use VBLANK interrupt */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
                if (r) {
                        DRM_ERROR("Failed to add crtc irq id!\n");
                        return r;
@@ -3529,7 +3472,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
 
                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
                int_params.irq_source =
-                       dc_interrupt_to_irq_source(dc, i+1 , 0);
+                       dc_interrupt_to_irq_source(dc, i + 1, 0);
 
                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 
@@ -3585,7 +3528,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
-       unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+       unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 
        if (adev->family >= AMDGPU_FAMILY_AI)
                client_id = SOC15_IH_CLIENTID_DCE;
@@ -3602,7 +3545,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
         *    coming from DC hardware.
         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
-        *    for acknowledging and handling. */
+        *    for acknowledging and handling.
+        */
 
        /* Use VBLANK interrupt */
        for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@@ -4049,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
 }
 
 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
-                               unsigned *min, unsigned *max)
+                               unsigned int *min, unsigned int *max)
 {
        if (!caps)
                return 0;
@@ -4069,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
                                        uint32_t brightness)
 {
-       unsigned min, max;
+       unsigned int min, max;
 
        if (!get_brightness_range(caps, &min, &max))
                return brightness;
@@ -4082,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
                                      uint32_t brightness)
 {
-       unsigned min, max;
+       unsigned int min, max;
 
        if (!get_brightness_range(caps, &min, &max))
                return brightness;
@@ -4148,6 +4092,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
                                         int bl_idx)
 {
+       int ret;
        struct amdgpu_dm_backlight_caps caps;
        struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
 
@@ -4162,13 +4107,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
                if (!rc)
                        return dm->brightness[bl_idx];
                return convert_brightness_to_user(&caps, avg);
-       } else {
-               int ret = dc_link_get_backlight_level(link);
-
-               if (ret == DC_ERROR_UNEXPECTED)
-                       return dm->brightness[bl_idx];
-               return convert_brightness_to_user(&caps, ret);
        }
+
+       ret = dc_link_get_backlight_level(link);
+
+       if (ret == DC_ERROR_UNEXPECTED)
+               return dm->brightness[bl_idx];
+
+       return convert_brightness_to_user(&caps, ret);
 }
 
 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -4562,7 +4508,6 @@ fail:
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 {
        drm_atomic_private_obj_fini(&dm->atomic_obj);
-       return;
 }
 
 /******************************************************************************
@@ -5394,6 +5339,7 @@ static bool adjust_colour_depth_from_display_info(
 {
        enum dc_color_depth depth = timing_out->display_color_depth;
        int normalized_clk;
+
        do {
                normalized_clk = timing_out->pix_clk_100hz / 10;
                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@@ -5609,6 +5555,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
 {
        struct dc_sink_init_data sink_init_data = { 0 };
        struct dc_sink *sink = NULL;
+
        sink_init_data.link = aconnector->dc_link;
        sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
 
@@ -5732,7 +5679,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
                return &aconnector->freesync_vid_base;
 
        /* Find the preferred mode */
-       list_for_each_entry (m, list_head, head) {
+       list_for_each_entry(m, list_head, head) {
                if (m->type & DRM_MODE_TYPE_PREFERRED) {
                        m_pref = m;
                        break;
@@ -5756,7 +5703,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
         * For some monitors, preferred mode is not the mode with highest
         * supported refresh rate.
         */
-       list_for_each_entry (m, list_head, head) {
+       list_for_each_entry(m, list_head, head) {
                current_refresh  = drm_mode_vrefresh(m);
 
                if (m->hdisplay == m_pref->hdisplay &&
@@ -5849,6 +5796,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                                edp_min_bpp_x16, edp_max_bpp_x16,
                                dsc_caps,
                                &stream->timing,
+                               dc_link_get_highest_encoding_format(aconnector->dc_link),
                                &bw_range)) {
 
                if (bw_range.max_kbps < link_bw_in_kbps) {
@@ -5857,6 +5805,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                                        &dsc_options,
                                        0,
                                        &stream->timing,
+                                       dc_link_get_highest_encoding_format(aconnector->dc_link),
                                        &dsc_cfg)) {
                                stream->timing.dsc_cfg = dsc_cfg;
                                stream->timing.flags.DSC = 1;
@@ -5871,6 +5820,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                                &dsc_options,
                                link_bw_in_kbps,
                                &stream->timing,
+                               dc_link_get_highest_encoding_format(aconnector->dc_link),
                                &dsc_cfg)) {
                stream->timing.dsc_cfg = dsc_cfg;
                stream->timing.flags.DSC = 1;
@@ -5914,12 +5864,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                                                &dsc_options,
                                                link_bandwidth_kbps,
                                                &stream->timing,
+                                               dc_link_get_highest_encoding_format(aconnector->dc_link),
                                                &stream->timing.dsc_cfg)) {
                                stream->timing.flags.DSC = 1;
                                DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
                        }
                } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
-                       timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+                       timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+                                       dc_link_get_highest_encoding_format(aconnector->dc_link));
                        max_supported_bw_in_kbps = link_bandwidth_kbps;
                        dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
 
@@ -5931,6 +5883,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                                                &dsc_options,
                                                dsc_max_supported_bw_in_kbps,
                                                &stream->timing,
+                                               dc_link_get_highest_encoding_format(aconnector->dc_link),
                                                &stream->timing.dsc_cfg)) {
                                        stream->timing.flags.DSC = 1;
                                        DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
@@ -6028,7 +5981,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 * This may not be an error, the use case is when we have no
                 * usermode calls to reset and set mode upon hotplug. In this
                 * case, we call set mode ourselves to restore the previous mode
-                * and the modelist may not be filled in in time.
+                * and the modelist may not be filled in time.
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
@@ -6051,9 +6004,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                drm_mode_set_crtcinfo(&mode, 0);
 
        /*
-       * If scaling is enabled and refresh rate didn't change
-       * we copy the vic and polarities of the old timings
-       */
+        * If scaling is enabled and refresh rate didn't change
+        * we copy the vic and polarities of the old timings
+        */
        if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
@@ -6817,6 +6770,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
 
        if (!state->duplicated) {
                int max_bpc = conn_state->max_requested_bpc;
+
                is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
                          aconnector->force_yuv420_output;
                color_depth = convert_color_depth_from_display_info(connector,
@@ -7135,7 +7089,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
 {
        struct drm_display_mode *m;
 
-       list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+       list_for_each_entry(m, &aconnector->base.probed_modes, head) {
                if (drm_mode_equal(m, mode))
                        return true;
        }
@@ -7295,6 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
        memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
        mutex_init(&aconnector->hpd_lock);
+       mutex_init(&aconnector->handle_mst_msg_ready);
 
        /*
         * configure support HPD hot plug connector_>polled default value is 0
@@ -7454,7 +7409,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 
        link->priv = aconnector;
 
-       DRM_DEBUG_DRIVER("%s()\n", __func__);
 
        i2c = create_i2c(link->ddc, link->link_index, &res);
        if (!i2c) {
@@ -7982,7 +7936,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
 }
 
 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
-                                   struct dc_state *dc_state,
                                    struct drm_device *dev,
                                    struct amdgpu_display_manager *dm,
                                    struct drm_crtc *pcrtc,
@@ -8125,7 +8078,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * Only allow immediate flips for fast updates that don't
                 * change memory domain, FB pitch, DCC state, rotation or
                 * mirroring.
+                *
+                * dm_crtc_helper_atomic_check() only accepts async flips with
+                * fast updates.
                 */
+               if (crtc->state->async_flip &&
+                   acrtc_state->update_type != UPDATE_TYPE_FAST)
+                       drm_warn_once(state->dev,
+                                     "[PLANE:%d:%s] async flip with non-fast update\n",
+                                     plane->base.id, plane->name);
                bundle->flip_addrs[planes_count].flip_immediate =
                        crtc->state->async_flip &&
                        acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8168,8 +8129,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                         * DRI3/Present extension with defined target_msc.
                         */
                        last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
-               }
-               else {
+               } else {
                        /* For variable refresh rate mode only:
                         * Get vblank of last completed flip to avoid > 1 vrr
                         * flips per video frame by use of throttling, but allow
@@ -8455,55 +8415,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
        stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
 }
 
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+                                       struct dc_state *dc_state)
 {
        struct drm_device *dev = state->dev;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_display_manager *dm = &adev->dm;
-       struct dm_atomic_state *dm_state;
-       struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
-       u32 i, j;
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-       unsigned long flags;
-       bool wait_for_vblank = true;
-       struct drm_connector *connector;
-       struct drm_connector_state *old_con_state, *new_con_state;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-       int crtc_disable_count = 0;
        bool mode_set_reset_required = false;
-       int r;
-
-       trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
-       r = drm_atomic_helper_wait_for_fences(dev, state, false);
-       if (unlikely(r))
-               DRM_ERROR("Waiting for fences timed out!");
-
-       drm_atomic_helper_update_legacy_modeset_state(dev, state);
-       drm_dp_mst_atomic_wait_for_dependencies(state);
-
-       dm_state = dm_atomic_get_new_state(state);
-       if (dm_state && dm_state->context) {
-               dc_state = dm_state->context;
-       } else {
-               /* No state changes, retain current state. */
-               dc_state_temp = dc_create_state(dm->dc);
-               ASSERT(dc_state_temp);
-               dc_state = dc_state_temp;
-               dc_resource_state_copy_construct_current(dm->dc, dc_state);
-       }
+       u32 i;
 
-       for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
-                                      new_crtc_state, i) {
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+                                     new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -8526,9 +8451,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
                drm_dbg_state(state->dev,
-                       "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
-                       "planes_changed:%d, mode_changed:%d,active_changed:%d,"
-                       "connectors_changed:%d\n",
+                       "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
                        acrtc->crtc_id,
                        new_crtc_state->enable,
                        new_crtc_state->active,
@@ -8601,24 +8524,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                }
        } /* for_each_crtc_in_state() */
 
-       if (dc_state) {
-               /* if there mode set or reset, disable eDP PSR */
-               if (mode_set_reset_required) {
-                       if (dm->vblank_control_workqueue)
-                               flush_workqueue(dm->vblank_control_workqueue);
+       /* if there mode set or reset, disable eDP PSR */
+       if (mode_set_reset_required) {
+               if (dm->vblank_control_workqueue)
+                       flush_workqueue(dm->vblank_control_workqueue);
 
-                       amdgpu_dm_psr_disable_all(dm);
-               }
+               amdgpu_dm_psr_disable_all(dm);
+       }
 
-               dm_enable_per_frame_crtc_master_sync(dc_state);
-               mutex_lock(&dm->dc_lock);
-               WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+       dm_enable_per_frame_crtc_master_sync(dc_state);
+       mutex_lock(&dm->dc_lock);
+       WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
 
-               /* Allow idle optimization when vblank count is 0 for display off */
-               if (dm->active_vblank_irq_count == 0)
-                       dc_allow_idle_optimizations(dm->dc, true);
-               mutex_unlock(&dm->dc_lock);
-       }
+       /* Allow idle optimization when vblank count is 0 for display off */
+       if (dm->active_vblank_irq_count == 0)
+               dc_allow_idle_optimizations(dm->dc, true);
+       mutex_unlock(&dm->dc_lock);
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8638,6 +8559,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                acrtc->otg_inst = status->primary_otg_inst;
                }
        }
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct amdgpu_display_manager *dm = &adev->dm;
+       struct dm_atomic_state *dm_state;
+       struct dc_state *dc_state = NULL;
+       u32 i, j;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       unsigned long flags;
+       bool wait_for_vblank = true;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_con_state, *new_con_state;
+       struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       int crtc_disable_count = 0;
+
+       trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+       drm_atomic_helper_update_legacy_modeset_state(dev, state);
+       drm_dp_mst_atomic_wait_for_dependencies(state);
+
+       dm_state = dm_atomic_get_new_state(state);
+       if (dm_state && dm_state->context) {
+               dc_state = dm_state->context;
+               amdgpu_dm_commit_streams(state, dc_state);
+       }
+
        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8760,13 +8719,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
-               struct dc_surface_update dummy_updates[MAX_SURFACES];
+               struct dc_surface_update *dummy_updates;
                struct dc_stream_update stream_update;
                struct dc_info_packet hdr_packet;
                struct dc_stream_status *status = NULL;
                bool abm_changed, hdr_changed, scaling_changed;
 
-               memset(&dummy_updates, 0, sizeof(dummy_updates));
                memset(&stream_update, 0, sizeof(stream_update));
 
                if (acrtc) {
@@ -8825,6 +8783,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                 * Here we create an empty update on each plane.
                 * To fix this, DC should permit updating only stream properties.
                 */
+               dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
                for (j = 0; j < status->plane_count; j++)
                        dummy_updates[j].surface = status->plane_states[0];
 
@@ -8836,6 +8795,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                            dm_new_crtc_state->stream,
                                            &stream_update);
                mutex_unlock(&dm->dc_lock);
+               kfree(dummy_updates);
        }
 
        /**
@@ -8914,8 +8874,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
                if (dm_new_crtc_state->stream)
-                       amdgpu_dm_commit_planes(state, dc_state, dev,
-                                               dm, crtc, wait_for_vblank);
+                       amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
        }
 
        /* Update audio instances for each connector. */
@@ -8970,9 +8929,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        for (i = 0; i < crtc_disable_count; i++)
                pm_runtime_put_autosuspend(dev->dev);
        pm_runtime_mark_last_busy(dev->dev);
-
-       if (dc_state_temp)
-               dc_release_state(dc_state_temp);
 }
 
 static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -9104,8 +9060,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
                                        &commit->flip_done, 10*HZ);
 
                if (ret == 0)
-                       DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
-                                 "timed out\n", crtc->base.id, crtc->name);
+                       DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+                                 crtc->base.id, crtc->name);
 
                drm_crtc_commit_put(commit);
        }
@@ -9190,7 +9146,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
        return false;
 }
 
-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
        u64 num, den, res;
        struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
 
@@ -9312,9 +9269,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                goto skip_modeset;
 
        drm_dbg_state(state->dev,
-               "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
-               "planes_changed:%d, mode_changed:%d,active_changed:%d,"
-               "connectors_changed:%d\n",
+               "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
                acrtc->crtc_id,
                new_crtc_state->enable,
                new_crtc_state->active,
@@ -9343,8 +9298,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
                        DRM_DEBUG_DRIVER(
-                               "Mode change not required for front porch change, "
-                               "setting mode_changed to %d",
+                               "Mode change not required for front porch change, setting mode_changed to %d",
                                new_crtc_state->mode_changed);
 
                        set_freesync_fixed_config(dm_new_crtc_state);
@@ -9356,9 +9310,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                        struct drm_display_mode *high_mode;
 
                        high_mode = get_highest_refresh_rate_mode(aconnector, false);
-                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
                                set_freesync_fixed_config(dm_new_crtc_state);
-                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
@@ -9526,6 +9479,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
         */
        for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
                struct amdgpu_framebuffer *old_afb, *new_afb;
+
                if (other->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
@@ -9624,11 +9578,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
        }
 
        /* Core DRM takes care of checking FB modifiers, so we only need to
-        * check tiling flags when the FB doesn't have a modifier. */
+        * check tiling flags when the FB doesn't have a modifier.
+        */
        if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
                if (adev->family < AMDGPU_FAMILY_AI) {
                        linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
-                                AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+                                AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
                                 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
                } else {
                        linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@@ -9762,8 +9717,8 @@ static int dm_update_plane_state(struct dc *dc,
                if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
                        if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
                                return -EINVAL;
-                       else
-                               *is_top_most_overlay = false;
+
+                       *is_top_most_overlay = false;
                }
 
                DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -9850,12 +9805,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
        /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
         * cursor per pipe but it's going to inherit the scaling and
         * positioning from the underlying pipe. Check the cursor plane's
-        * blending properties match the underlying planes'. */
+        * blending properties match the underlying planes'.
+        */
 
        new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
-       if (!new_cursor_state || !new_cursor_state->fb) {
+       if (!new_cursor_state || !new_cursor_state->fb)
                return 0;
-       }
 
        dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
        cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@@ -9900,6 +9855,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
        struct drm_connector_state *conn_state, *old_conn_state;
        struct amdgpu_dm_connector *aconnector = NULL;
        int i;
+
        for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
                if (!conn_state->crtc)
                        conn_state = old_conn_state;
@@ -10334,7 +10290,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        }
 
        /* Store the overall update type for use later in atomic check. */
-       for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
                struct dm_crtc_state *dm_new_crtc_state =
                        to_dm_crtc_state(new_crtc_state);
 
@@ -10356,7 +10312,7 @@ fail:
        else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
                DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
        else
-               DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+               DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
 
        trace_amdgpu_dm_atomic_check_finish(state, ret);
 
index 4561f55afa99d157874207d84c9bee7a2f8e3b0b..9fb5bb3a75a777b619b41eb36ff74d461a3dbee3 100644 (file)
@@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
         * we're handling link loss
         */
        bool is_handling_link_loss;
+       /**
+        * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+        * ready event when we're already handling mst message ready event
+        */
+       bool is_handling_mst_msg_rdy_event;
        /**
         * @aconnector: The aconnector that this work queue is attached to
         */
@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
        struct drm_dp_mst_port *mst_output_port;
        struct amdgpu_dm_connector *mst_root;
        struct drm_dp_aux *dsc_aux;
+       struct mutex handle_mst_msg_ready;
+
        /* TODO see if we can merge with ddc_bus or make a dm_connector */
        struct amdgpu_i2c_adapter *i2c;
 
index 0802f8e8fac5f07f1f53d9fc85986154e499a45a..52ecfa746b54dd23b4bdc5bfa6d48d3714a31f6b 100644 (file)
@@ -123,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
        secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
        crtc = secure_display_ctx->crtc;
 
-       if (!crtc) {
+       if (!crtc)
                return;
-       }
 
        psp = &drm_to_adev(crtc->dev)->psp;
 
@@ -151,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
        ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
 
        if (!ret) {
-               if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+               if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
                        psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
-               }
        }
 
        mutex_unlock(&psp->securedisplay_context.mutex);
index 440fc0869a34ba631c84df42df35318ed37aeabb..30d4c6fd95f531de021e54657662c605f8543663 100644 (file)
@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
+       /*
+        * Only allow async flips for fast updates that don't change the FB
+        * pitch, the DCC state, rotation, etc.
+        */
+       if (crtc_state->async_flip &&
+           dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+               drm_dbg_atomic(crtc->dev,
+                              "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+                              crtc->base.id, crtc->name);
+               return -EINVAL;
+       }
+
        /* In some use cases, like reset, no stream is attached */
        if (!dm_crtc_state->stream)
                return 0;
index d63ee636483b86a1b6691d15766cd28a9b376a48..7c21e21bcc51a0e77d5cf18dd0c15dd90239214b 100644 (file)
@@ -1075,24 +1075,24 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
 
        switch (dm_crtc_state->stream->output_color_space) {
        case COLOR_SPACE_SRGB:
-               seq_printf(m, "sRGB");
+               seq_puts(m, "sRGB");
                break;
        case COLOR_SPACE_YCBCR601:
        case COLOR_SPACE_YCBCR601_LIMITED:
-               seq_printf(m, "BT601_YCC");
+               seq_puts(m, "BT601_YCC");
                break;
        case COLOR_SPACE_YCBCR709:
        case COLOR_SPACE_YCBCR709_LIMITED:
-               seq_printf(m, "BT709_YCC");
+               seq_puts(m, "BT709_YCC");
                break;
        case COLOR_SPACE_ADOBERGB:
-               seq_printf(m, "opRGB");
+               seq_puts(m, "opRGB");
                break;
        case COLOR_SPACE_2020_RGB_FULLRANGE:
-               seq_printf(m, "BT2020_RGB");
+               seq_puts(m, "BT2020_RGB");
                break;
        case COLOR_SPACE_2020_YCBCR:
-               seq_printf(m, "BT2020_YCC");
+               seq_puts(m, "BT2020_YCC");
                break;
        default:
                goto unlock;
@@ -3022,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused)
                        seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
                }
        } else {
-               seq_printf(m, "ILR is not supported by this eDP panel.\n");
+               seq_puts(m, "ILR is not supported by this eDP panel.\n");
        }
 
        return 0;
index 5536d17306d00d6fa4abf285b873eff838355ae6..8db47f66eac06ebadd1473db37372b3083b7450c 100644 (file)
 static bool
 lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
 {
-
        struct dc_link *link = handle;
        struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
-       struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+       struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
+                                 link->dc->caps.i2c_speed_in_khz};
 
        return dm_helpers_submit_i2c(link->ctx, link, &cmd);
 }
@@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3
 {
        struct dc_link *link = handle;
 
-       struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
-       struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+       struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
+                                            {false, address, size, data} };
+       struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
+                                 link->dc->caps.i2c_speed_in_khz};
 
        return dm_helpers_submit_i2c(link->ctx, link, &cmd);
 }
@@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
 
 static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
 {
-
        struct ta_hdcp_shared_memory *hdcp_cmd;
 
        if (!psp->hdcp_context.context.initialized) {
@@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
        *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
        *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
 
-
        return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
 }
 
-static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+static int psp_set_srm(struct psp_context *psp,
+                      u8 *srm, uint32_t srm_size, uint32_t *srm_version)
 {
-
        struct ta_hdcp_shared_memory *hdcp_cmd;
 
        if (!psp->hdcp_context.context.initialized) {
@@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
 
        psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+       if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+           hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
            hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
                return -EINVAL;
 
@@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
 
 static void link_lock(struct hdcp_workqueue *work, bool lock)
 {
-
        int i = 0;
 
        for (i = 0; i < work->max_link; i++) {
@@ -160,10 +160,11 @@ static void link_lock(struct hdcp_workqueue *work, bool lock)
                        mutex_unlock(&work[i].mutex);
        }
 }
+
 void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
                         unsigned int link_index,
                         struct amdgpu_dm_connector *aconnector,
-                        uint8_t content_type,
+                        u8 content_type,
                         bool enable_encryption)
 {
        struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -178,18 +179,19 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
        query.display = NULL;
        mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
 
-       if (query.display != NULL) {
+       if (query.display) {
                memcpy(display, query.display, sizeof(struct mod_hdcp_display));
                mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
 
                hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
 
                if (enable_encryption) {
-                       /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
-                        * (s3 resume case)
+                       /* Explicitly set the saved SRM as sysfs call will be after
+                        * we already enabled hdcp (s3 resume case)
                         */
                        if (hdcp_work->srm_size > 0)
-                               psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+                               psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
+                                           hdcp_work->srm_size,
                                            &hdcp_work->srm_version);
 
                        display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
@@ -219,7 +221,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
 }
 
 static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
-                        unsigned int link_index,
+                               unsigned int link_index,
                         struct amdgpu_dm_connector *aconnector)
 {
        struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -238,7 +240,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
                conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
                DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
-                        aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms);
+                                aconnector->base.index, conn_state->hdcp_content_type,
+                                aconnector->base.dpms);
        }
 
        mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
@@ -246,6 +249,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
        process_output(hdcp_w);
        mutex_unlock(&hdcp_w->mutex);
 }
+
 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
 {
        struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -274,15 +278,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index
        schedule_work(&hdcp_w->cpirq_work);
 }
 
-
-
-
 static void event_callback(struct work_struct *work)
 {
        struct hdcp_workqueue *hdcp_work;
 
        hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
-                                     callback_dwork);
+                                callback_dwork);
 
        mutex_lock(&hdcp_work->mutex);
 
@@ -294,13 +295,12 @@ static void event_callback(struct work_struct *work)
        process_output(hdcp_work);
 
        mutex_unlock(&hdcp_work->mutex);
-
-
 }
 
 static void event_property_update(struct work_struct *work)
 {
-       struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+       struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
+                                                       property_update_work);
        struct amdgpu_dm_connector *aconnector = NULL;
        struct drm_device *dev;
        long ret;
@@ -334,11 +334,10 @@ static void event_property_update(struct work_struct *work)
                mutex_lock(&hdcp_work->mutex);
 
                if (conn_state->commit) {
-                       ret = wait_for_completion_interruptible_timeout(
-                               &conn_state->commit->hw_done, 10 * HZ);
+                       ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
+                                                                       10 * HZ);
                        if (ret == 0) {
-                               DRM_ERROR(
-                                       "HDCP state unknown! Setting it to DESIRED");
+                               DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
                                hdcp_work->encryption_status[conn_index] =
                                        MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
                        }
@@ -349,24 +348,20 @@ static void event_property_update(struct work_struct *work)
                                DRM_MODE_HDCP_CONTENT_TYPE0 &&
                                hdcp_work->encryption_status[conn_index] <=
                                MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
-
                                DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
-                               drm_hdcp_update_content_protection(
-                                       connector,
-                                       DRM_MODE_CONTENT_PROTECTION_ENABLED);
+                               drm_hdcp_update_content_protection(connector,
+                                                                  DRM_MODE_CONTENT_PROTECTION_ENABLED);
                        } else if (conn_state->hdcp_content_type ==
                                        DRM_MODE_HDCP_CONTENT_TYPE1 &&
                                        hdcp_work->encryption_status[conn_index] ==
                                        MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
-                               drm_hdcp_update_content_protection(
-                                       connector,
-                                       DRM_MODE_CONTENT_PROTECTION_ENABLED);
+                               drm_hdcp_update_content_protection(connector,
+                                                                  DRM_MODE_CONTENT_PROTECTION_ENABLED);
                        }
                } else {
                        DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
-                       drm_hdcp_update_content_protection(
-                               connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
-
+                       drm_hdcp_update_content_protection(connector,
+                                                          DRM_MODE_CONTENT_PROTECTION_DESIRED);
                }
                mutex_unlock(&hdcp_work->mutex);
                drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -402,7 +397,7 @@ static void event_property_validate(struct work_struct *work)
                                       &query);
 
                DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
-                       aconnector->base.index,
+                                aconnector->base.index,
                        aconnector->base.state->content_protection,
                        query.encryption_status,
                        hdcp_work->encryption_status[conn_index]);
@@ -410,7 +405,8 @@ static void event_property_validate(struct work_struct *work)
                if (query.encryption_status !=
                    hdcp_work->encryption_status[conn_index]) {
                        DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
-                               hdcp_work->encryption_status[conn_index], query.encryption_status);
+                                        hdcp_work->encryption_status[conn_index],
+                                        query.encryption_status);
 
                        hdcp_work->encryption_status[conn_index] =
                                query.encryption_status;
@@ -429,7 +425,7 @@ static void event_watchdog_timer(struct work_struct *work)
        struct hdcp_workqueue *hdcp_work;
 
        hdcp_work = container_of(to_delayed_work(work),
-                                     struct hdcp_workqueue,
+                                struct hdcp_workqueue,
                                      watchdog_timer_dwork);
 
        mutex_lock(&hdcp_work->mutex);
@@ -443,7 +439,6 @@ static void event_watchdog_timer(struct work_struct *work)
        process_output(hdcp_work);
 
        mutex_unlock(&hdcp_work->mutex);
-
 }
 
 static void event_cpirq(struct work_struct *work)
@@ -459,10 +454,8 @@ static void event_cpirq(struct work_struct *work)
        process_output(hdcp_work);
 
        mutex_unlock(&hdcp_work->mutex);
-
 }
 
-
 void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
 {
        int i = 0;
@@ -478,10 +471,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
        kfree(hdcp_work);
 }
 
-
 static bool enable_assr(void *handle, struct dc_link *link)
 {
-
        struct hdcp_workqueue *hdcp_work = handle;
        struct mod_hdcp hdcp = hdcp_work->hdcp;
        struct psp_context *psp = hdcp.config.psp.handle;
@@ -499,7 +490,8 @@ static bool enable_assr(void *handle, struct dc_link *link)
        memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
        dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
-       dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+       dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
+               link->link_enc_hw_inst;
        dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
 
        psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -541,7 +533,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        else if (aconnector->dc_em_sink)
                sink = aconnector->dc_em_sink;
 
-       if (sink != NULL)
+       if (sink)
                link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
 
        display->controller = CONTROLLER_ID_D0 + config->otg_inst;
@@ -567,16 +559,20 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        conn_state = aconnector->base.state;
 
        DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
-                       (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
-                       (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+                        (!!aconnector->base.state) ?
+                        aconnector->base.state->content_protection : -1,
+                        (!!aconnector->base.state) ?
+                        aconnector->base.state->hdcp_content_type : -1);
 
        if (conn_state)
                hdcp_update_display(hdcp_work, link_index, aconnector,
-                       conn_state->hdcp_content_type, false);
+                                   conn_state->hdcp_content_type, false);
 }
 
-
-/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+/**
+ * DOC: Add sysfs interface for set/get srm
+ *
+ * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
  *      will automatically call once or twice depending on the size
  *
  * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
@@ -587,23 +583,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
  * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
  * the last call we will send the full SRM. PSP will fail on every call before the last.
  *
- * This means we don't know if the SRM is good until the last call. And because of this limitation we
- * cannot throw errors early as it will stop the kernel from writing to sysfs
+ * This means we don't know if the SRM is good until the last call. And because of this
+ * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
  *
  * Example 1:
- *     Good SRM size = 5096
- *     first call to write 4096 -> PSP fails
- *     Second call to write 1000 -> PSP Pass -> SRM is set
+ *     Good SRM size = 5096
+ *     first call to write 4096 -> PSP fails
+ *     Second call to write 1000 -> PSP Pass -> SRM is set
  *
  * Example 2:
- *     Bad SRM size = 4096
- *     first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
- *     is the last call)
+ *     Bad SRM size = 4096
+ *     first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ *     is the last call)
  *
  * Solution?:
- *     1: Parse the SRM? -> It is signed so we don't know the EOF
- *     2: We can have another sysfs that passes the size before calling set. -> simpler solution
- *     below
+ *     1: Parse the SRM? -> It is signed so we don't know the EOF
+ *     2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ *     below
  *
  * Easy Solution:
  * Always call get after Set to verify if set was successful.
@@ -612,20 +608,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
  * +----------------------+
  * PSP will only update its srm if its older than the one we are trying to load.
  * Always do set first than get.
- *     -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
- *     version and save it
+ *     -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ *     version and save it
  *
- *     -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
- *     same(newer) version back and save it
+ *     -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ *     same(newer) version back and save it
  *
- *     -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
- *     incorrect/corrupted and we should correct our SRM by getting it from PSP
+ *     -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ *     incorrect/corrupted and we should correct our SRM by getting it from PSP
  */
-static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
+                             struct bin_attribute *bin_attr, char *buffer,
                              loff_t pos, size_t count)
 {
        struct hdcp_workqueue *work;
-       uint32_t srm_version = 0;
+       u32 srm_version = 0;
 
        work = container_of(bin_attr, struct hdcp_workqueue, attr);
        link_lock(work, true);
@@ -639,19 +636,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi
                work->srm_version = srm_version;
        }
 
-
        link_lock(work, false);
 
        return count;
 }
 
-static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
+                            struct bin_attribute *bin_attr, char *buffer,
                             loff_t pos, size_t count)
 {
        struct hdcp_workqueue *work;
-       uint8_t *srm = NULL;
-       uint32_t srm_version;
-       uint32_t srm_size;
+       u8 *srm = NULL;
+       u32 srm_version;
+       u32 srm_size;
        size_t ret = count;
 
        work = container_of(bin_attr, struct hdcp_workqueue, attr);
@@ -684,12 +681,12 @@ ret:
 /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
  *
  * For example,
- *     if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
- *     needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
- *     across boot/reboots/suspend/resume/shutdown
+ *     if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ *     needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ *     across boot/reboots/suspend/resume/shutdown
  *
- * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
- * to make the SRM persistent.
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
+ * we need to make the SRM persistent.
  *
  * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
  * -The kernel cannot write to the file systems.
@@ -699,8 +696,8 @@ ret:
  *
  * Usermode can read/write to/from PSP using the sysfs interface
  * For example:
- *     to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
- *     to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ *     to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ *     to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
  */
 static const struct bin_attribute data_attr = {
        .attr = {.name = "hdcp_srm", .mode = 0664},
@@ -709,10 +706,9 @@ static const struct bin_attribute data_attr = {
        .read = srm_data_read,
 };
 
-
-struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
+                                            struct cp_psp *cp_psp, struct dc *dc)
 {
-
        int max_caps = dc->caps.max_links;
        struct hdcp_workqueue *hdcp_work;
        int i = 0;
@@ -721,14 +717,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
        if (ZERO_OR_NULL_PTR(hdcp_work))
                return NULL;
 
-       hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+       hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+                                sizeof(*hdcp_work->srm), GFP_KERNEL);
 
-       if (hdcp_work->srm == NULL)
+       if (!hdcp_work->srm)
                goto fail_alloc_context;
 
-       hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+       hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+                                     sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
 
-       if (hdcp_work->srm_temp == NULL)
+       if (!hdcp_work->srm_temp)
                goto fail_alloc_context;
 
        hdcp_work->max_link = max_caps;
@@ -781,10 +779,5 @@ fail_alloc_context:
        kfree(hdcp_work);
 
        return NULL;
-
-
-
 }
 
-
-
index d9a482908380dfd06bbac67b42ca6b5795c52997..e94eeeb9768870b6e060002d0c9bd1684c6fa0e3 100644 (file)
@@ -68,15 +68,15 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
        }
 }
 
-/* dm_helpers_parse_edid_caps
- *
- * Parse edid caps
+/**
+ * dm_helpers_parse_edid_caps() - Parse edid caps
  *
+ * @link: current detected link
  * @edid:      [in] pointer to edid
- *  edid_caps: [in] pointer to edid caps
- * @return
- *     void
- * */
+ * @edid_caps: [in] pointer to edid caps
+ *
+ * Return: void
+ */
 enum dc_edid_status dm_helpers_parse_edid_caps(
                struct dc_link *link,
                const struct dc_edid *edid,
@@ -255,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
        /* Accessing the connector state is required for vcpi_slots allocation
         * and directly relies on behaviour in commit check
         * that blocks before commit guaranteeing that the state
-        * is not gonna be swapped while still in use in commit tail */
+        * is not gonna be swapped while still in use in commit tail
+        */
 
        if (!aconnector || !aconnector->mst_root)
                return false;
@@ -282,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
        /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
         * AUX message. The sequence is slot 1-63 allocated sequence for each
         * stream. AMD ASIC stream slot allocation should follow the same
-        * sequence. copy DRM MST allocation to dc */
+        * sequence. copy DRM MST allocation to dc
+        */
        fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
 
        return true;
@@ -426,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx,
        total = log_ctx->pos + n + 1;
 
        if (total > log_ctx->size) {
-               char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
+               char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
 
                if (buf) {
                        memcpy(buf, log_ctx->buf, log_ctx->pos);
@@ -633,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
        ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
 
        if (ret < 0) {
-               DRM_ERROR("     execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
+               DRM_ERROR("%s: write cmd ..., err = %d\n",  __func__, ret);
                return false;
        }
 
@@ -655,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
                drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
        }
 
-       DC_LOG_DC("     execute_synaptics_rc_command - success = %d\n", success);
+       DC_LOG_DC("%s: success = %d\n", __func__, success);
 
        return success;
 }
@@ -664,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
 {
        unsigned char data[16] = {0};
 
-       DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
+       DC_LOG_DC("Start %s\n", __func__);
 
        // Step 2
        data[0] = 'P';
@@ -722,7 +724,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
        if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
                return;
 
-       DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
+       DC_LOG_DC("Done %s\n", __func__);
 }
 
 /* MST Dock */
@@ -995,9 +997,8 @@ void dm_helpers_override_panel_settings(
        struct dc_panel_config *panel_config)
 {
        // Feature DSC
-       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
                panel_config->dsc.disable_dsc_edp = true;
-       }
 }
 
 void *dm_helpers_allocate_gpu_mem(
index 19f543ba7205e99124b44be029216720b2244652..51467f132c2604a81ea5cc2e9e30e408d9c2cb38 100644 (file)
@@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work)
 
        /* Call a DAL subcomponent which registered for interrupt notification
         * at INTERRUPT_LOW_IRQ_CONTEXT.
-        * (The most common use is HPD interrupt) */
+        * (The most common use is HPD interrupt)
+        */
 }
 
 /*
@@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
 
        if (handler_removed == false) {
                /* Not necessarily an error - caller may not
-                * know the context. */
+                * know the context.
+                */
                return NULL;
        }
 
@@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params,
 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
                                               irq_handler_idx handler_idx)
 {
-       if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+       if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
                DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
                return false;
        }
@@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
        /* This pointer will be stored by code which requested interrupt
         * registration.
         * The same pointer will be needed in order to unregister the
-        * interrupt. */
+        * interrupt.
+        */
 
        DRM_DEBUG_KMS(
                "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
@@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 
        if (handler_list == NULL) {
                /* If we got here, it means we searched all irq contexts
-                * for this irq source, but the handler was not found. */
+                * for this irq source, but the handler was not found.
+                */
                DRM_ERROR(
                "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
                        ih, irq_source);
@@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
                DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
                /* The handler was removed from the table,
                 * it means it is safe to flush all the 'work'
-                * (because no code can schedule a new one). */
+                * (because no code can schedule a new one).
+                */
                lh = &adev->dm.irq_handler_list_low_tab[src];
                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 
@@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 
                if (!list_empty(hnd_list_l)) {
-                       list_for_each_safe (entry, tmp, hnd_list_l) {
+                       list_for_each_safe(entry, tmp, hnd_list_l) {
                                handler = list_entry(
                                        entry,
                                        struct amdgpu_dm_irq_handler_data,
@@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
        if (list_empty(handler_list))
                return;
 
-       list_for_each_entry (handler_data, handler_list, list) {
+       list_for_each_entry(handler_data, handler_list, list) {
                if (queue_work(system_highpri_wq, &handler_data->work)) {
                        work_queued = true;
                        break;
@@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
                            &adev->dm.irq_handler_list_high_tab[irq_source],
                            list) {
                /* Call a subcomponent which registered for immediate
-                * interrupt notification */
+                * interrupt notification
+                */
                handler_data->handler(handler_data->handler_arg);
        }
 
@@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
        return 0;
 }
 
-static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
 {
        switch (type) {
        case AMDGPU_HPD_1:
@@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
 
 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
                                       struct amdgpu_irq_src *source,
-                                      unsigned type,
+                                      unsigned int type,
                                       enum amdgpu_interrupt_state state)
 {
        enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
@@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
 
 static inline int dm_irq_state(struct amdgpu_device *adev,
                               struct amdgpu_irq_src *source,
-                              unsigned crtc_id,
+                              unsigned int crtc_id,
                               enum amdgpu_interrupt_state state,
                               const enum irq_type dal_irq_type,
                               const char *func)
@@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
 
 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
                                         struct amdgpu_irq_src *source,
-                                        unsigned crtc_id,
+                                        unsigned int crtc_id,
                                         enum amdgpu_interrupt_state state)
 {
        return dm_irq_state(
@@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
 
 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
-                                       unsigned crtc_id,
+                                       unsigned int crtc_id,
                                        enum amdgpu_interrupt_state state)
 {
        return dm_irq_state(
@@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
 
                const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+               if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
                        dc_interrupt_set(adev->dm.dc,
                                        dc_link->irq_source_hpd,
                                        true);
                }
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+               if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
                        dc_interrupt_set(adev->dm.dc,
                                        dc_link->irq_source_hpd_rx,
                                        true);
@@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
                                to_amdgpu_dm_connector(connector);
                const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+               if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
                        dc_interrupt_set(adev->dm.dc,
                                        dc_link->irq_source_hpd,
                                        false);
                }
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+               if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
                        dc_interrupt_set(adev->dm.dc,
                                        dc_link->irq_source_hpd_rx,
                                        false);
index 46d0a8f57e552b87ef1f8f7159b19af6641b21f2..943959012d04cfd7c5b139526aa0a6d4e98476f6 100644 (file)
@@ -296,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
        if (!aconnector->edid) {
                struct edid *edid;
+
                edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
 
                if (!edid) {
@@ -619,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        return connector;
 }
 
+void dm_handle_mst_sideband_msg_ready_event(
+       struct drm_dp_mst_topology_mgr *mgr,
+       enum mst_msg_ready_type msg_rdy_type)
+{
+       uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+       uint8_t dret;
+       bool new_irq_handled = false;
+       int dpcd_addr;
+       uint8_t dpcd_bytes_to_read;
+       const uint8_t max_process_count = 30;
+       uint8_t process_count = 0;
+       u8 retry;
+       struct amdgpu_dm_connector *aconnector =
+                       container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+       const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+       if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+               dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+               /* DPCD 0x200 - 0x201 for downstream IRQ */
+               dpcd_addr = DP_SINK_COUNT;
+       } else {
+               dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+               /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+               dpcd_addr = DP_SINK_COUNT_ESI;
+       }
+
+       mutex_lock(&aconnector->handle_mst_msg_ready);
+
+       while (process_count < max_process_count) {
+               u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+               process_count++;
+
+               dret = drm_dp_dpcd_read(
+                       &aconnector->dm_dp_aux.aux,
+                       dpcd_addr,
+                       esi,
+                       dpcd_bytes_to_read);
+
+               if (dret != dpcd_bytes_to_read) {
+                       DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+                       break;
+               }
+
+               DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+               switch (msg_rdy_type) {
+               case DOWN_REP_MSG_RDY_EVENT:
+                       /* Only handle DOWN_REP_MSG_RDY case*/
+                       esi[1] &= DP_DOWN_REP_MSG_RDY;
+                       break;
+               case UP_REQ_MSG_RDY_EVENT:
+                       /* Only handle UP_REQ_MSG_RDY case*/
+                       esi[1] &= DP_UP_REQ_MSG_RDY;
+                       break;
+               default:
+                       /* Handle both cases*/
+                       esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+                       break;
+               }
+
+               if (!esi[1])
+                       break;
+
+               /* handle MST irq */
+               if (aconnector->mst_mgr.mst_state)
+                       drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+                                                esi,
+                                                ack,
+                                                &new_irq_handled);
+
+               if (new_irq_handled) {
+                       /* ACK at DPCD to notify down stream */
+                       for (retry = 0; retry < 3; retry++) {
+                               ssize_t wret;
+
+                               wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+                                                         dpcd_addr + 1,
+                                                         ack[1]);
+                               if (wret == 1)
+                                       break;
+                       }
+
+                       if (retry == 3) {
+                               DRM_ERROR("Failed to ack MST event.\n");
+                               break;
+                       }
+
+                       drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+                       new_irq_handled = false;
+               } else {
+                       break;
+               }
+       }
+
+       mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+       if (process_count == max_process_count)
+               DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+       dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
        .add_connector = dm_dp_add_mst_connector,
+       .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
 };
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
@@ -717,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
                                        &dsc_options,
                                        0,
                                        params[i].timing,
+                                       dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
                                        &params[i].timing->dsc_cfg)) {
                        params[i].timing->flags.DSC = 1;
 
@@ -767,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
                        param.sink->ctx->dc->res_pool->dscs[0],
                        &param.sink->dsc_caps.dsc_dec_caps,
                        &dsc_options,
-                       (int) kbps, param.timing, &dsc_config);
+                       (int) kbps, param.timing,
+                       dc_link_get_highest_encoding_format(param.aconnector->dc_link),
+                       &dsc_config);
 
        return dsc_config.bits_per_pixel;
 }
@@ -1005,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                                dsc_policy.min_target_bpp * 16,
                                dsc_policy.max_target_bpp * 16,
                                &stream->sink->dsc_caps.dsc_dec_caps,
-                               &stream->timing, &params[count].bw_range))
-                       params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+                               &stream->timing,
+                               dc_link_get_highest_encoding_format(dc_link),
+                               &params[count].bw_range))
+                       params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+                                       dc_link_get_highest_encoding_format(dc_link));
 
                count++;
        }
@@ -1466,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
                                       dsc_policy.min_target_bpp * 16,
                                       dsc_policy.max_target_bpp * 16,
                                       &stream->sink->dsc_caps.dsc_dec_caps,
-                                      &stream->timing, bw_range);
+                                      &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
 
        return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
 }
index 1e4ede1e57abd3f83399f5cb16e2fa42818e9779..37c820ab0fdbc42da5d299cdaf6a974774dc71fc 100644 (file)
 #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B     1031
 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B  1000
 
+enum mst_msg_ready_type {
+       NONE_MSG_RDY_EVENT = 0,
+       DOWN_REP_MSG_RDY_EVENT = 1,
+       UP_REQ_MSG_RDY_EVENT = 2,
+       DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
 struct amdgpu_display_manager;
 struct amdgpu_dm_connector;
 
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
+void dm_handle_mst_sideband_msg_ready_event(
+       struct drm_dp_mst_topology_mgr *mgr,
+       enum mst_msg_ready_type msg_rdy_type);
+
 struct dsc_mst_fairness_vars {
        int pbn;
        bool dsc_enabled;
index 32266897374792b30b6e474d1586dc9c413efd01..2198df96ed6ff02f4294c6de97cd64bfe4f0415a 100644 (file)
@@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
                        DRM_FORMAT_ARGB8888,
                        DRM_FORMAT_RGBA8888,
                        DRM_FORMAT_ABGR8888,
+                       DRM_FORMAT_ARGB2101010,
+                       DRM_FORMAT_ABGR2101010,
+                       DRM_FORMAT_ARGB16161616,
+                       DRM_FORMAT_ABGR16161616,
+                       DRM_FORMAT_ARGB16161616F,
                };
                uint32_t format = plane_state->fb->format->format;
                unsigned int i;
@@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier)
        return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
 }
 
-static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
 {
        if (modifier == DRM_FORMAT_MOD_LINEAR)
                return 0;
@@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev,
        int pkrs = 0;
        u32 gb_addr_config;
        u8 i = 0;
-       unsigned swizzle_r_x;
+       unsigned int swizzle_r_x;
        uint64_t modifier_r_x;
        uint64_t modifier_dcc_best;
        uint64_t modifier_dcc_4k;
@@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane,
         * caps list.
         */
 
-       switch (plane->type) {
-       case DRM_PLANE_TYPE_PRIMARY:
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
+               (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
                for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
                        if (num_formats >= max_formats)
                                break;
@@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane,
                        formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
                        formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
                }
-               break;
+       } else {
+               switch (plane->type) {
+               case DRM_PLANE_TYPE_OVERLAY:
+                       for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+                               if (num_formats >= max_formats)
+                                       break;
 
-       case DRM_PLANE_TYPE_OVERLAY:
-               for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
-                       if (num_formats >= max_formats)
-                               break;
+                               formats[num_formats++] = overlay_formats[i];
+                       }
+                       break;
 
-                       formats[num_formats++] = overlay_formats[i];
-               }
-               break;
+               case DRM_PLANE_TYPE_CURSOR:
+                       for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+                               if (num_formats >= max_formats)
+                                       break;
 
-       case DRM_PLANE_TYPE_CURSOR:
-               for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
-                       if (num_formats >= max_formats)
-                               break;
+                               formats[num_formats++] = cursor_formats[i];
+                       }
+                       break;
 
-                       formats[num_formats++] = cursor_formats[i];
+               default:
+                       break;
                }
-               break;
        }
 
        return num_formats;
@@ -1459,6 +1468,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
                drm_plane_create_blend_mode_property(plane, blend_caps);
        }
 
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+               drm_plane_create_zpos_immutable_property(plane, 0);
+       } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+               unsigned int zpos = 1 + drm_plane_index(plane);
+               drm_plane_create_zpos_property(plane, zpos, 1, 254);
+       } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+               drm_plane_create_zpos_immutable_property(plane, 255);
+       }
+
        if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
            plane_cap &&
            (plane_cap->pixel_format_support.nv12 ||
index 75284e2cec747b3aa9b1de6504db2524c75988ed..848c5b4bb301a5045e80ab2f4e67084015c2cb61 100644 (file)
@@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type(
                        if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
                                /* This clock is higher the validation clock.
                                 * Than means the previous one is the highest
-                                * non-boosted one. */
+                                * non-boosted one.
+                                */
                                DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
                                                dc_clks->num_levels, i);
                                dc_clks->num_levels = i > 0 ? i : 1;
@@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes(
         * TODO: expand this to other ASICs
         */
        if ((adev->asic_type >= CHIP_POLARIS10) &&
-            (adev->asic_type <= CHIP_VEGAM) &&
-            !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
-                                               (void *)wm_with_clock_ranges))
-                       return true;
+           (adev->asic_type <= CHIP_VEGAM) &&
+           !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
+                                                        (void *)wm_with_clock_ranges))
+               return true;
 
        return false;
 }
index 4f61d4f257cd7a908fcbfaeae9540451ec6111b0..08ce3bb8f640d9dc24de199526383b18b1cb4469 100644 (file)
@@ -166,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
         */
        if (vsync_rate_hz != 0) {
                unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+
                num_frames_static = (30000 / frame_time_microsec) + 1;
        }
 
index 352e9afb85c6d67354eb47204b36c8dbc4e9da83..e295a839ab4761337475e58ff0264115eded2830 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 #include "dm_services.h"
-#include "conversion.h"
+#include "basics/conversion.h"
 
 #define DIVIDER 10000
 
index 84aeccf36b4bedf70ea97f056b7f5afd6d7ea271..6d2924114a3e8b8c70fbb2c69ee7f07c0aad3b85 100644 (file)
@@ -50,12 +50,11 @@ bool dal_vector_construct(
        return true;
 }
 
-static bool dal_vector_presized_costruct(
-       struct vector *vector,
-       struct dc_context *ctx,
-       uint32_t count,
-       void *initial_value,
-       uint32_t struct_size)
+static bool dal_vector_presized_costruct(struct vector *vector,
+                                        struct dc_context *ctx,
+                                        uint32_t count,
+                                        void *initial_value,
+                                        uint32_t struct_size)
 {
        uint32_t i;
 
index 27af9d3c2b73d66a96094cf35cb569181f5bac56..4f005ae1516c47f9d1455609e79c15240f899159 100644 (file)
@@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info(
        return NULL;
 }
 
-static enum bp_result update_slot_layout_info(
-       struct dc_bios *dcb,
-       unsigned int i,
-       struct slot_layout_info *slot_layout_info,
-       unsigned int record_offset)
+static enum bp_result update_slot_layout_info(struct dc_bios *dcb,
+                                             unsigned int i,
+                                             struct slot_layout_info *slot_layout_info,
+                                             unsigned int record_offset)
 {
        unsigned int j;
        struct bios_parser *bp;
@@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info(
 }
 
 
-static enum bp_result get_bracket_layout_record(
-       struct dc_bios *dcb,
-       unsigned int bracket_layout_id,
-       struct slot_layout_info *slot_layout_info)
+static enum bp_result get_bracket_layout_record(struct dc_bios *dcb,
+                                               unsigned int bracket_layout_id,
+                                               struct slot_layout_info *slot_layout_info)
 {
        unsigned int i;
        unsigned int record_offset;
index cce47d3f1a13988d97b6206d6ff3fe422c3de589..540d19efad8f6af9a0c3d2ff43a394cff8dec3e1 100644 (file)
@@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object(
 }
 
 /* from graphics_object_id, find display path which includes the object_id */
-static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(
-       struct bios_parser *bp,
-       struct graphics_object_id id)
+static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp,
+                                                                       struct graphics_object_id id)
 {
        unsigned int i;
        struct graphics_object_id obj_id = {0};
@@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info(
        return BP_RESULT_OK;
 }
 
-static struct atom_hpd_int_record *get_hpd_record_for_path_v3(
-       struct bios_parser *bp,
-       struct atom_display_object_path_v3 *object)
+static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp,
+                                                             struct atom_display_object_path_v3 *object)
 {
        struct atom_common_record_header *header;
        uint32_t offset;
@@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record(
        return NULL;
 }
 
-static struct atom_connector_caps_record *get_connector_caps_record(
-       struct bios_parser *bp,
-       struct atom_display_object_path_v3 *object)
+static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp,
+                                                                   struct atom_display_object_path_v3 *object)
 {
        struct atom_common_record_header *header;
        uint32_t offset;
@@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
        return BP_RESULT_OK;
 }
 
-static struct atom_connector_speed_record *get_connector_speed_cap_record(
-       struct bios_parser *bp,
-       struct atom_display_object_path_v3 *object)
+static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp,
+                                                                         struct atom_display_object_path_v3 *object)
 {
        struct atom_common_record_header *header;
        uint32_t offset;
index 7ccd96959256d06aa9ec1f70e74784362c103e4e..3db4ef564b997a54880fcfff1aa28c7efa0d4b51 100644 (file)
@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
                                stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
                                stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
                        tmds_present = true;
+
+               /* Checking stream / link detection ensuring that PHY is active*/
+               if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+                       display_count++;
+
        }
 
        for (i = 0; i < dc->link_count; i++) {
index 2f7c8996b19d33b4785fa721de3f4d82f1421fa7..3ba2e13d691d96e17115835124023439e3c0b14d 100644 (file)
@@ -87,6 +87,14 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK     0x0000F000L
 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK     0xFFFF0000L
 
+#define regCLK1_CLK2_BYPASS_CNTL                       0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX      0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT  0x0
+#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV__SHIFT   0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK            0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV_MASK             0x000F0000L
+
 #define REG(reg_name) \
        (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
 
@@ -436,6 +444,11 @@ static DpmClocks314_t dummy_clocks;
 
 static struct dcn314_watermarks dummy_wms = { 0 };
 
+static struct dcn314_ss_info_table ss_info_table = {
+       .ss_divider = 1000,
+       .ss_percentage = {0, 0, 375, 375, 375}
+};
+
 static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
 {
        int i, num_valid_sets;
@@ -715,6 +728,20 @@ static struct clk_mgr_funcs dcn314_funcs = {
 };
 extern struct clk_mgr_funcs dcn3_fpga_funcs;
 
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+       uint32_t clock_source;
+
+       REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+
+       clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+       if (clk_mgr->dprefclk_ss_percentage != 0) {
+               clk_mgr->ss_on_dprefclk = true;
+               clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+       }
+}
+
 void dcn314_clk_mgr_construct(
                struct dc_context *ctx,
                struct clk_mgr_dcn314 *clk_mgr,
@@ -781,9 +808,11 @@ void dcn314_clk_mgr_construct(
 
        clk_mgr->base.base.dprefclk_khz = 600000;
        clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
-       dce_clock_read_ss_info(&clk_mgr->base);
+
+       dcn314_read_ss_info_from_lut(&clk_mgr->base);
        /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
-       //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
+       clk_mgr->base.base.dprefclk_khz =
+               dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
 
        clk_mgr->base.base.bw_params = &dcn314_bw_params;
 
index 171f84340eb2fb1d532776ac348cc1fbfad858f5..e0670dafe2600de68729a0e2b8b04b9b5ab163e7 100644 (file)
@@ -28,6 +28,8 @@
 #define __DCN314_CLK_MGR_H__
 #include "clk_mgr_internal.h"
 
+#define NUM_CLOCK_SOURCES   5
+
 struct dcn314_watermarks;
 
 struct dcn314_smu_watermark_set {
@@ -40,6 +42,11 @@ struct clk_mgr_dcn314 {
        struct dcn314_smu_watermark_set smu_wm_set;
 };
 
+struct dcn314_ss_info_table {
+       uint32_t ss_divider;
+       uint32_t ss_percentage[NUM_CLOCK_SOURCES];
+};
+
 bool dcn314_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b);
 
index d7de756301cf7346e19b84a330a46a88de28ebf9..0349631991b84ed437fbe56d4edbb21ce1307d9c 100644 (file)
@@ -55,14 +55,6 @@ struct IP_BASE
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 };
 
-static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } },
-                                        { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } },
-                                        { { 0x00017000, 0x02402000, 0, 0, 0, 0 } },
-                                        { { 0x00017200, 0x02402400, 0, 0, 0, 0 } },
-                                        { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } },
-                                        { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } },
-                                        { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } };
-
 #define regCLK1_CLK_PLL_REQ                                            0x0237
 #define regCLK1_CLK_PLL_REQ_BASE_IDX                   0
 
@@ -73,9 +65,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK             0x0000F000L
 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK             0xFFFF0000L
 
-#define REG(reg_name) \
-       (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-
 #define TO_CLK_MGR_DCN316(clk_mgr)\
        container_of(clk_mgr, struct clk_mgr_dcn316, base)
 
@@ -577,36 +566,6 @@ static struct clk_mgr_funcs dcn316_funcs = {
 };
 extern struct clk_mgr_funcs dcn3_fpga_funcs;
 
-static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
-{
-       /* get FbMult value */
-       struct fixed31_32 pll_req;
-       unsigned int fbmult_frac_val = 0;
-       unsigned int fbmult_int_val = 0;
-
-       /*
-        * Register value of fbmult is in 8.16 format, we are converting to 31.32
-        * to leverage the fix point operations available in driver
-        */
-
-       REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
-       REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
-
-       pll_req = dc_fixpt_from_int(fbmult_int_val);
-
-       /*
-        * since fractional part is only 16 bit in register definition but is 32 bit
-        * in our fix point definiton, need to shift left by 16 to obtain correct value
-        */
-       pll_req.value |= fbmult_frac_val << 16;
-
-       /* multiply by REFCLK period */
-       pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
-
-       /* integer part is now VCO frequency in kHz */
-       return dc_fixpt_floor(pll_req);
-}
-
 void dcn316_clk_mgr_construct(
                struct dc_context *ctx,
                struct clk_mgr_dcn316 *clk_mgr,
@@ -660,7 +619,8 @@ void dcn316_clk_mgr_construct(
                clk_mgr->base.smu_present = true;
 
        // Skip this for now as it did not work on DCN315, renable during bring up
-       clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+       //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+       clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
 
        /* in case we don't get a value from the register, use default */
        if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
index cb992aca760dc4690da4b6c348a045be6cd0733e..0701d03b88a9cee10612c751d2e6c235e67f4cc6 100644 (file)
@@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
 
        clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
        for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
-               int dpp_inst, dppclk_khz, prev_dppclk_khz;
+               int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
 
                dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
 
index d133e4186a52017615f2d430261a2721d9a190bf..7cac14f493f6e0c5be9a0707e77065c76c87b26c 100644 (file)
@@ -1047,8 +1047,10 @@ static void disable_all_writeback_pipes_for_stream(
                stream->writeback_info[i].wb_enabled = false;
 }
 
-static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
-                                         struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc,
+                                         struct dc_state *context,
+                                         struct dc_stream_state *stream,
+                                         bool lock)
 {
        int i;
 
@@ -3582,9 +3584,9 @@ static void commit_planes_for_stream_fast(struct dc *dc,
                        context->block_sequence_steps);
        /* Clear update flags so next flip doesn't have redundant programming
         * (if there's no stream update, the update flags are not cleared).
+        * Surface updates are cleared unconditionally at the beginning of each flip,
+        * so no need to clear here.
         */
-       if (top_pipe_to_program->plane_state)
-               top_pipe_to_program->plane_state->update_flags.raw = 0;
        if (top_pipe_to_program->stream)
                top_pipe_to_program->stream->update_flags.raw = 0;
 }
@@ -4088,9 +4090,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
                struct dc_state *transition_base_context)
 {
        struct dc_state *transition_context = dc_create_state(dc);
-       enum pipe_split_policy tmp_mpc_policy;
-       bool temp_dynamic_odm_policy;
-       bool temp_subvp_policy;
+       enum pipe_split_policy tmp_mpc_policy = 0;
+       bool temp_dynamic_odm_policy = 0;
+       bool temp_subvp_policy = 0;
        enum dc_status ret = DC_ERROR_UNEXPECTED;
        unsigned int i, j;
        unsigned int pipe_in_use = 0;
@@ -4284,7 +4286,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c
        return false;
 }
 
-static bool full_update_required(struct dc_surface_update *srf_updates,
+static bool full_update_required(struct dc *dc,
+               struct dc_surface_update *srf_updates,
                int surface_count,
                struct dc_stream_update *stream_update,
                struct dc_stream_state *stream)
@@ -4292,6 +4295,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
 
        int i;
        struct dc_stream_status *stream_status;
+       const struct dc_state *context = dc->current_state;
 
        for (i = 0; i < surface_count; i++) {
                if (srf_updates &&
@@ -4302,7 +4306,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
                                srf_updates[i].in_transfer_func ||
                                srf_updates[i].func_shaper ||
                                srf_updates[i].lut3d_func ||
-                               srf_updates[i].blend_tf))
+                               srf_updates[i].blend_tf ||
+                               srf_updates[i].surface->force_full_update ||
+                               (srf_updates[i].flip_addr &&
+                               srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+                               !is_surface_in_context(context, srf_updates[i].surface)))
                        return true;
        }
 
@@ -4340,18 +4348,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
                if (stream_status == NULL || stream_status->plane_count != surface_count)
                        return true;
        }
+       if (dc->idle_optimizations_allowed)
+               return true;
 
        return false;
 }
 
-static bool fast_update_only(struct dc_fast_update *fast_update,
+static bool fast_update_only(struct dc *dc,
+               struct dc_fast_update *fast_update,
                struct dc_surface_update *srf_updates,
                int surface_count,
                struct dc_stream_update *stream_update,
                struct dc_stream_state *stream)
 {
        return fast_updates_exist(fast_update, surface_count)
-                       && !full_update_required(srf_updates, surface_count, stream_update, stream);
+                       && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
 }
 
 bool dc_update_planes_and_stream(struct dc *dc,
@@ -4369,8 +4380,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
         * cause underflow. Apply stream configuration with minimal pipe
         * split first to avoid unsupported transitions for active pipes.
         */
-       bool force_minimal_pipe_splitting;
-       bool is_plane_addition;
+       bool force_minimal_pipe_splitting = 0;
+       bool is_plane_addition = 0;
 
        populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
        force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
@@ -4423,7 +4434,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
        }
 
        update_seamless_boot_flags(dc, context, surface_count, stream);
-       if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+       if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
                        !dc->debug.enable_legacy_fast_update) {
                commit_planes_for_stream_fast(dc,
                                srf_updates,
@@ -4569,7 +4580,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
        TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
 
        update_seamless_boot_flags(dc, context, surface_count, stream);
-       if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+       if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
                        !dc->debug.enable_legacy_fast_update) {
                commit_planes_for_stream_fast(dc,
                                srf_updates,
@@ -5273,3 +5284,56 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
        if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
                pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
 }
+
+/*****************************************************************************
+ *  dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ *                          ABM
+ *  @dc: dc structure
+ *     @stream: stream where vsync int state changed
+ *  @pData: abm hw states
+ *
+ ****************************************************************************/
+bool dc_abm_save_restore(
+               struct dc *dc,
+               struct dc_stream_state *stream,
+               struct abm_save_restore *pData)
+{
+       int i;
+       int edp_num;
+       struct pipe_ctx *pipe = NULL;
+       struct dc_link *link = stream->sink->link;
+       struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+       /*find primary pipe associated with stream*/
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream == stream && pipe->stream_res.tg)
+                       break;
+       }
+
+       if (i == MAX_PIPES) {
+               ASSERT(0);
+               return false;
+       }
+
+       dc_get_edp_links(dc, edp_links, &edp_num);
+
+       /* Determine panel inst */
+       for (i = 0; i < edp_num; i++)
+               if (edp_links[i] == link)
+                       break;
+
+       if (i == edp_num)
+               return false;
+
+       if (pipe->stream_res.abm &&
+               pipe->stream_res.abm->funcs->save_restore)
+               return pipe->stream_res.abm->funcs->save_restore(
+                               pipe->stream_res.abm,
+                               i,
+                               pData);
+       return false;
+}
+
index cb2bf9a466f5fba52111b29abeaeb489b4ad582d..f99ec1b0efaffb8666bc77caee5347ba3dbe6725 100644 (file)
@@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type(
                ret = true;
        return ret;
 }
+
 static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
 {
        enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
index 18e098568cb468bd0d1806e1533376bd427a3e96..0d19d4cd191659eae0a62453f6db614e3808471f 100644 (file)
@@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
        return link->dc->link_srv->dp_get_verified_link_cap(link);
 }
 
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link)
+{
+       if (dc_is_dp_signal(link->connector_signal)) {
+               if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE &&
+                               link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE)
+                       return DC_LINK_ENCODING_HDMI_TMDS;
+               else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+                               DP_8b_10b_ENCODING)
+                       return DC_LINK_ENCODING_DP_8b_10b;
+               else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+                               DP_128b_132b_ENCODING)
+                       return DC_LINK_ENCODING_DP_128b_132b;
+       } else if (dc_is_hdmi_signal(link->connector_signal)) {
+       }
+
+       return DC_LINK_ENCODING_UNSPECIFIED;
+}
+
 bool dc_link_is_dp_sink_present(struct dc_link *link)
 {
        return link->dc->link_srv->dp_is_sink_present(link);
index 2f3d9a698486d2d4461bb2da749a64f082c1a5f9..d0f4b86cadf1f259a1f0390dbe4add94564675fa 100644 (file)
 #include "../dcn32/dcn32_resource.h"
 #include "../dcn321/dcn321_resource.h"
 
+#define VISUAL_CONFIRM_BASE_DEFAULT 3
+#define VISUAL_CONFIRM_BASE_MIN 1
+#define VISUAL_CONFIRM_BASE_MAX 10
+#define VISUAL_CONFIRM_DPP_OFFSET 3
 
 #define DC_LOGGER_INIT(logger)
 
+#define HEAD_NOT_IN_ODM -2
+#define UNABLE_TO_SPLIT -1
+
 enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 {
        enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -740,7 +747,12 @@ int get_num_mpc_splits(struct pipe_ctx *pipe)
 int get_num_odm_splits(struct pipe_ctx *pipe)
 {
        int odm_split_count = 0;
-       struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+       struct pipe_ctx *next_pipe = NULL;
+
+       while (pipe->top_pipe)
+               pipe = pipe->top_pipe;
+
+       next_pipe = pipe->next_odm_pipe;
        while (next_pipe) {
                odm_split_count++;
                next_pipe = next_pipe->next_odm_pipe;
@@ -753,32 +765,35 @@ int get_num_odm_splits(struct pipe_ctx *pipe)
        return odm_split_count;
 }
 
-static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+static int get_odm_split_index(struct pipe_ctx *pipe_ctx)
 {
-       *split_count = get_num_odm_splits(pipe_ctx);
-       *split_idx = 0;
-       if (*split_count == 0) {
-               /*Check for mpc split*/
-               struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
-
-               *split_count = get_num_mpc_splits(pipe_ctx);
-               while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
-                       (*split_idx)++;
-                       split_pipe = split_pipe->top_pipe;
-               }
+       struct pipe_ctx *split_pipe = NULL;
+       int index = 0;
 
-               /* MPO window on right side of ODM split */
-               if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
-                       (*split_idx)++;
-       } else {
-               /*Get odm split index*/
-               struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+       while (pipe_ctx->top_pipe)
+               pipe_ctx = pipe_ctx->top_pipe;
 
-               while (split_pipe) {
-                       (*split_idx)++;
-                       split_pipe = split_pipe->prev_odm_pipe;
-               }
+       split_pipe = pipe_ctx->prev_odm_pipe;
+
+       while (split_pipe) {
+               index++;
+               split_pipe = split_pipe->prev_odm_pipe;
        }
+
+       return index;
+}
+
+static int get_mpc_split_index(struct pipe_ctx *pipe_ctx)
+{
+       struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+       int index = 0;
+
+       while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+               index++;
+               split_pipe = split_pipe->top_pipe;
+       }
+
+       return index;
 }
 
 /*
@@ -800,82 +815,357 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
        }
 }
 
-static void calculate_recout(struct pipe_ctx *pipe_ctx)
+static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
 {
-       const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
-       const struct dc_stream_state *stream = pipe_ctx->stream;
-       struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-       struct rect surf_clip = plane_state->clip_rect;
-       bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-       int split_count, split_idx;
+       struct rect rec;
+       int r0_x_end = r0->x + r0->width;
+       int r1_x_end = r1->x + r1->width;
+       int r0_y_end = r0->y + r0->height;
+       int r1_y_end = r1->y + r1->height;
+
+       rec.x = r0->x > r1->x ? r0->x : r1->x;
+       rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x;
+       rec.y = r0->y > r1->y ? r0->y : r1->y;
+       rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y;
+
+       /* in case that there is no intersection */
+       if (rec.width < 0 || rec.height < 0)
+               memset(&rec, 0, sizeof(rec));
+
+       return rec;
+}
 
-       calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
-       if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
-               split_idx = 0;
+static struct rect shift_rec(const struct rect *rec_in, int x, int y)
+{
+       struct rect rec_out = *rec_in;
+
+       rec_out.x += x;
+       rec_out.y += y;
+
+       return rec_out;
+}
+
+static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx)
+{
+       const struct dc_stream_state *stream = pipe_ctx->stream;
+       int odm_slice_count = get_num_odm_splits(pipe_ctx) + 1;
+       int odm_slice_idx = get_odm_split_index(pipe_ctx);
+       bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count;
+       int h_active = stream->timing.h_addressable +
+                       stream->timing.h_border_left +
+                       stream->timing.h_border_right;
+       int odm_slice_width = h_active / odm_slice_count;
+       struct rect odm_rec;
+
+       odm_rec.x = odm_slice_width * odm_slice_idx;
+       odm_rec.width = is_last_odm_slice ?
+                       /* last slice width is the reminder of h_active */
+                       h_active - odm_slice_width * (odm_slice_count - 1) :
+                       /* odm slice width is the floor of h_active / count */
+                       odm_slice_width;
+       odm_rec.y = 0;
+       odm_rec.height = stream->timing.v_addressable +
+                       stream->timing.v_border_bottom +
+                       stream->timing.v_border_top;
+
+       return odm_rec;
+}
 
+static struct rect calculate_plane_rec_in_timing_active(
+               struct pipe_ctx *pipe_ctx,
+               const struct rect *rec_in)
+{
        /*
-        * Only the leftmost ODM pipe should be offset by a nonzero distance
+        * The following diagram shows an example where we map a 1920x1200
+        * desktop to a 2560x1440 timing with a plane rect in the middle
+        * of the screen. To map a plane rect from Stream Source to Timing
+        * Active space, we first multiply stream scaling ratios (i.e 2304/1920
+        * horizontal and 1440/1200 vertical) to the plane's x and y, then
+        * we add stream destination offsets (i.e 128 horizontal, 0 vertical).
+        * This will give us a plane rect's position in Timing Active. However
+        * we have to remove the fractional. The rule is that we find left/right
+        * and top/bottom positions and round the value to the adjacent integer.
+        *
+        * Stream Source Space
+        * ------------
+        *        __________________________________________________
+        *       |Stream Source (1920 x 1200) ^                     |
+        *       |                            y                     |
+        *       |         <------- w --------|>                    |
+        *       |          __________________V                     |
+        *       |<-- x -->|Plane//////////////| ^                  |
+        *       |         |(pre scale)////////| |                  |
+        *       |         |///////////////////| |                  |
+        *       |         |///////////////////| h                  |
+        *       |         |///////////////////| |                  |
+        *       |         |///////////////////| |                  |
+        *       |         |///////////////////| V                  |
+        *       |                                                  |
+        *       |                                                  |
+        *       |__________________________________________________|
+        *
+        *
+        * Timing Active Space
+        * ---------------------------------
+        *
+        *       Timing Active (2560 x 1440)
+        *        __________________________________________________
+        *       |*****|  Stteam Destination (2304 x 1440)    |*****|
+        *       |*****|                                      |*****|
+        *       |<128>|                                      |*****|
+        *       |*****|     __________________               |*****|
+        *       |*****|    |Plane/////////////|              |*****|
+        *       |*****|    |(post scale)//////|              |*****|
+        *       |*****|    |//////////////////|              |*****|
+        *       |*****|    |//////////////////|              |*****|
+        *       |*****|    |//////////////////|              |*****|
+        *       |*****|    |//////////////////|              |*****|
+        *       |*****|                                      |*****|
+        *       |*****|                                      |*****|
+        *       |*****|                                      |*****|
+        *       |*****|______________________________________|*****|
+        *
+        * So the resulting formulas are shown below:
+        *
+        * recout_x = 128 + round(plane_x * 2304 / 1920)
+        * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
+        * recout_y = 0 + round(plane_y * 1440 / 1280)
+        * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
+        *
+        * NOTE: fixed point division is not error free. To reduce errors
+        * introduced by fixed point division, we divide only after
+        * multiplication is complete.
         */
-       if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
-               /* MPO window on right side of ODM split */
-               data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) *
-                               stream->dst.width / stream->src.width;
-       } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
-               data->recout.x = stream->dst.x;
-               if (stream->src.x < surf_clip.x)
-                       data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
-                                               / stream->src.width;
-       } else
-               data->recout.x = 0;
-
-       if (stream->src.x > surf_clip.x)
-               surf_clip.width -= stream->src.x - surf_clip.x;
-       data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
-       if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
-               data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
-
-       data->recout.y = stream->dst.y;
-       if (stream->src.y < surf_clip.y)
-               data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
-                                               / stream->src.height;
-       else if (stream->src.y > surf_clip.y)
-               surf_clip.height -= stream->src.y - surf_clip.y;
-
-       data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
-       if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
-               data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
-
-       /* Handle h & v split */
-       if (split_tb) {
-               ASSERT(data->recout.height % 2 == 0);
-               data->recout.height /= 2;
-       } else if (split_count) {
-               if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
-                       /* extra pixels in the division remainder need to go to pipes after
-                        * the extra pixel index minus one(epimo) defined here as:
-                        */
-                       int epimo = split_count - data->recout.width % (split_count + 1);
+       const struct dc_stream_state *stream = pipe_ctx->stream;
+       struct rect rec_out = {0};
+       struct fixed31_32 temp;
 
-                       data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
-                       if (split_idx > epimo)
-                               data->recout.x += split_idx - epimo - 1;
-                       ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
-                       data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
-               } else {
-                       /* odm */
-                       if (split_idx == split_count) {
-                               /* rightmost pipe is the remainder recout */
-                               data->recout.width -= data->h_active * split_count - data->recout.x;
-
-                               /* ODM combine cases with MPO we can get negative widths */
-                               if (data->recout.width < 0)
-                                       data->recout.width = 0;
-
-                               data->recout.x = 0;
-                       } else
-                               data->recout.width = data->h_active - data->recout.x;
-               }
+       temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width,
+                       stream->src.width);
+       rec_out.x = stream->dst.x + dc_fixpt_round(temp);
+
+       temp = dc_fixpt_from_fraction(
+                       (rec_in->x + rec_in->width) * stream->dst.width,
+                       stream->src.width);
+       rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x;
+
+       temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height,
+                       stream->src.height);
+       rec_out.y = stream->dst.y + dc_fixpt_round(temp);
+
+       temp = dc_fixpt_from_fraction(
+                       (rec_in->y + rec_in->height) * stream->dst.height,
+                       stream->src.height);
+       rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y;
+
+       return rec_out;
+}
+
+static struct rect calculate_mpc_slice_in_timing_active(
+               struct pipe_ctx *pipe_ctx,
+               struct rect *plane_clip_rec)
+{
+       const struct dc_stream_state *stream = pipe_ctx->stream;
+       int mpc_slice_count = get_num_mpc_splits(pipe_ctx) + 1;
+       int mpc_slice_idx = get_mpc_split_index(pipe_ctx);
+       int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
+       struct rect mpc_rec;
+
+       mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+       mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+       mpc_rec.height = plane_clip_rec->height;
+       mpc_rec.y = plane_clip_rec->y;
+       ASSERT(mpc_slice_count == 1 ||
+                       stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+                       mpc_rec.width % 2 == 0);
+
+       /* extra pixels in the division remainder need to go to pipes after
+        * the extra pixel index minus one(epimo) defined here as:
+        */
+       if (mpc_slice_idx > epimo) {
+               mpc_rec.x += mpc_slice_idx - epimo - 1;
+               mpc_rec.width += 1;
        }
+
+       if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+               ASSERT(mpc_rec.height % 2 == 0);
+               mpc_rec.height /= 2;
+       }
+       return mpc_rec;
+}
+
+static void adjust_recout_for_visual_confirm(struct rect *recout,
+               struct pipe_ctx *pipe_ctx)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       int dpp_offset, base_offset;
+
+       if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
+               return;
+
+       dpp_offset = pipe_ctx->plane_res.dpp->inst * VISUAL_CONFIRM_DPP_OFFSET;
+
+       if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) &&
+                       dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX)
+               base_offset = dc->debug.visual_confirm_rect_height;
+       else
+               base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
+
+       recout->height -= base_offset;
+       recout->height -= dpp_offset;
+}
+
+/*
+ * The function maps a plane clip from Stream Source Space to ODM Slice Space
+ * and calculates the rec of the overlapping area of MPC slice of the plane
+ * clip, ODM slice associated with the pipe context and stream destination rec.
+ */
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
+{
+       /*
+        * A plane clip represents the desired plane size and position in Stream
+        * Source Space. Stream Source is the destination where all planes are
+        * blended (i.e. positioned, scaled and overlaid). It is a canvas where
+        * all planes associated with the current stream are drawn together.
+        * After Stream Source is completed, we will further scale and
+        * reposition the entire canvas of the stream source to Stream
+        * Destination in Timing Active Space. This could be due to display
+        * overscan adjustment where we will need to rescale and reposition all
+        * the planes so they can fit into a TV with overscan or downscale
+        * upscale features such as GPU scaling or VSR.
+        *
+        * This two step blending is a virtual procedure in software. In
+        * hardware there is no such thing as Stream Source. all planes are
+        * blended once in Timing Active Space. Software virtualizes a Stream
+        * Source space to decouple the math complicity so scaling param
+        * calculation focuses on one step at a time.
+        *
+        * In the following two diagrams, user applied 10% overscan adjustment
+        * so the Stream Source needs to be scaled down a little before mapping
+        * to Timing Active Space. As a result the Plane Clip is also scaled
+        * down by the same ratio, Plane Clip position (i.e. x and y) with
+        * respect to Stream Source is also scaled down. To map it in Timing
+        * Active Space additional x and y offsets from Stream Destination are
+        * added to Plane Clip as well.
+        *
+        * Stream Source Space
+        * ------------
+        *        __________________________________________________
+        *       |Stream Source (3840 x 2160) ^                     |
+        *       |                            y                     |
+        *       |                            |                     |
+        *       |          __________________V                     |
+        *       |<-- x -->|Plane Clip/////////|                    |
+        *       |         |(pre scale)////////|                    |
+        *       |         |///////////////////|                    |
+        *       |         |///////////////////|                    |
+        *       |         |///////////////////|                    |
+        *       |         |///////////////////|                    |
+        *       |         |///////////////////|                    |
+        *       |                                                  |
+        *       |                                                  |
+        *       |__________________________________________________|
+        *
+        *
+        * Timing Active Space (3840 x 2160)
+        * ---------------------------------
+        *
+        *       Timing Active
+        *        __________________________________________________
+        *       | y_____________________________________________   |
+        *       |x |Stream Destination (3456 x 1944)            |  |
+        *       |  |                                            |  |
+        *       |  |        __________________                  |  |
+        *       |  |       |Plane Clip////////|                 |  |
+        *       |  |       |(post scale)//////|                 |  |
+        *       |  |       |//////////////////|                 |  |
+        *       |  |       |//////////////////|                 |  |
+        *       |  |       |//////////////////|                 |  |
+        *       |  |       |//////////////////|                 |  |
+        *       |  |                                            |  |
+        *       |  |                                            |  |
+        *       |  |____________________________________________|  |
+        *       |__________________________________________________|
+        *
+        *
+        * In Timing Active Space a plane clip could be further sliced into
+        * pieces called MPC slices. Each Pipe Context is responsible for
+        * processing only one MPC slice so the plane processing workload can be
+        * distributed to multiple DPP Pipes. MPC slices could be blended
+        * together to a single ODM slice. Each ODM slice is responsible for
+        * processing a portion of Timing Active divided horizontally so the
+        * output pixel processing workload can be distributed to multiple OPP
+        * pipes. All ODM slices are mapped together in ODM block so all MPC
+        * slices belong to different ODM slices could be pieced together to
+        * form a single image in Timing Active. MPC slices must belong to
+        * single ODM slice. If an MPC slice goes across ODM slice boundary, it
+        * needs to be divided into two MPC slices one for each ODM slice.
+        *
+        * In the following diagram the output pixel processing workload is
+        * divided horizontally into two ODM slices one for each OPP blend tree.
+        * OPP0 blend tree is responsible for processing left half of Timing
+        * Active, while OPP2 blend tree is responsible for processing right
+        * half.
+        *
+        * The plane has two MPC slices. However since the right MPC slice goes
+        * across ODM boundary, two DPP pipes are needed one for each OPP blend
+        * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree).
+        *
+        * Assuming that we have a Pipe Context associated with OPP0 and DPP1
+        * working on processing the plane in the diagram. We want to know the
+        * width and height of the shaded rectangle and its relative position
+        * with respect to the ODM slice0. This is called the recout of the pipe
+        * context.
+        *
+        * Planes can be at arbitrary size and position and there could be an
+        * arbitrary number of MPC and ODM slices. The algorithm needs to take
+        * all scenarios into account.
+        *
+        * Timing Active Space (3840 x 2160)
+        * ---------------------------------
+        *
+        *       Timing Active
+        *        __________________________________________________
+        *       |OPP0(ODM slice0)^        |OPP2(ODM slice1)        |
+        *       |                y        |                        |
+        *       |                |  <- w ->                        |
+        *       |           _____V________|____                    |
+        *       |          |DPP0 ^  |DPP1 |DPP2|                   |
+        *       |<------ x |-----|->|/////|    |                   |
+        *       |          |     |  |/////|    |                   |
+        *       |          |     h  |/////|    |                   |
+        *       |          |     |  |/////|    |                   |
+        *       |          |_____V__|/////|____|                   |
+        *       |                         |                        |
+        *       |                         |                        |
+        *       |                         |                        |
+        *       |_________________________|________________________|
+        *
+        *
+        */
+       struct rect plane_clip;
+       struct rect mpc_slice_of_plane_clip;
+       struct rect odm_slice;
+       struct rect overlapping_area;
+
+       plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
+                       &pipe_ctx->plane_state->clip_rect);
+       /* guard plane clip from drawing beyond stream dst here */
+       plane_clip = intersect_rec(&plane_clip,
+                               &pipe_ctx->stream->dst);
+       mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
+                       pipe_ctx, &plane_clip);
+       odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
+       overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice);
+       /* shift the overlapping area so it is with respect to current ODM
+        * slice's position
+        */
+       pipe_ctx->plane_res.scl_data.recout = shift_rec(
+                       &overlapping_area,
+                       -odm_slice.x, -odm_slice.y);
+
+       adjust_recout_for_visual_confirm(&pipe_ctx->plane_res.scl_data.recout,
+                       pipe_ctx);
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -997,34 +1287,25 @@ static void calculate_init_and_vp(
 static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
-       const struct dc_stream_state *stream = pipe_ctx->stream;
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
        struct rect src = plane_state->src_rect;
+       struct rect recout_dst_in_active_timing;
+       struct rect recout_clip_in_active_timing;
+       struct rect recout_clip_in_recout_dst;
+       struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
        int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
                                || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
-       int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
        bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
 
-       calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
-       /*
-        * recout full is what the recout would have been if we didnt clip
-        * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
-        * offsets of recout within recout full because those are the directions
-        * we scan from and therefore the only ones that affect inits.
-        */
-       recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
-                       * stream->dst.width / stream->src.width;
-       recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
-                       * stream->dst.height / stream->src.height;
-       if (pipe_ctx->prev_odm_pipe && split_idx)
-               ro_lb = data->h_active * split_idx - recout_full_x;
-       else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
-               ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
-       else
-               ro_lb = data->recout.x - recout_full_x;
-       ro_tb = data->recout.y - recout_full_y;
-       ASSERT(ro_lb >= 0 && ro_tb >= 0);
-
+       recout_clip_in_active_timing = shift_rec(
+                       &data->recout, odm_slice.x, odm_slice.y);
+       recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
+                       pipe_ctx, &plane_state->dst_rect);
+       recout_clip_in_recout_dst = shift_rec(&recout_clip_in_active_timing,
+                       -recout_dst_in_active_timing.x,
+                       -recout_dst_in_active_timing.y);
+       ASSERT(recout_clip_in_recout_dst.x >= 0 &&
+                       recout_clip_in_recout_dst.y >= 0);
        /*
         * Work in recout rotation since that requires less transformations
         */
@@ -1042,7 +1323,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
 
        calculate_init_and_vp(
                        flip_horz_scan_dir,
-                       ro_lb,
+                       recout_clip_in_recout_dst.x,
                        data->recout.width,
                        src.width,
                        data->taps.h_taps,
@@ -1052,7 +1333,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
                        &data->viewport.width);
        calculate_init_and_vp(
                        flip_horz_scan_dir,
-                       ro_lb,
+                       recout_clip_in_recout_dst.x,
                        data->recout.width,
                        src.width / vpc_div,
                        data->taps.h_taps_c,
@@ -1062,7 +1343,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
                        &data->viewport_c.width);
        calculate_init_and_vp(
                        flip_vert_scan_dir,
-                       ro_tb,
+                       recout_clip_in_recout_dst.y,
                        data->recout.height,
                        src.height,
                        data->taps.v_taps,
@@ -1072,7 +1353,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
                        &data->viewport.height);
        calculate_init_and_vp(
                        flip_vert_scan_dir,
-                       ro_tb,
+                       recout_clip_in_recout_dst.y,
                        data->recout.height,
                        src.height / vpc_div,
                        data->taps.v_taps_c,
@@ -1097,6 +1378,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+       const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
        bool res = false;
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
@@ -1121,30 +1403,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        pipe_ctx->stream->dst.y += timing->v_border_top;
 
        /* Calculate H and V active size */
-       pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
-                       timing->h_border_left + timing->h_border_right;
-       pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
-               timing->v_border_top + timing->v_border_bottom;
-       if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) {
-               pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
-
-               DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d   prev_odm_pipe:%d\n",
-                               __func__,
-                               pipe_ctx->pipe_idx,
-                               pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1,
-                               pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1);
-       }       /* ODM + windows MPO, where window is on either right or left ODM half */
-       else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) {
-
-               pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
-
-               DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d   top_pipe->next_odm_pipe:%d   top_pipe->prev_odm_pipe:%d\n",
-                               __func__,
-                               pipe_ctx->pipe_idx,
-                               pipe_ctx->top_pipe->pipe_idx,
-                               pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1,
-                               pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1);
-       }
+       pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
+       pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+
        /* depends on h_active */
        calculate_recout(pipe_ctx);
        /* depends on pixel format */
@@ -1449,7 +1710,24 @@ static int acquire_first_split_pipe(
                        return i;
                } else if (split_pipe->prev_odm_pipe &&
                                split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
+
+                       // Fix case where ODM slice has child planes
+                       // Re-attach child planes
+                       struct pipe_ctx *temp_head_pipe = resource_get_head_pipe_for_stream(res_ctx, split_pipe->stream);
+
+                       if (split_pipe->bottom_pipe && temp_head_pipe) {
+
+                               struct pipe_ctx *temp_tail_pipe = resource_get_tail_pipe(res_ctx, temp_head_pipe);
+
+                               if (temp_tail_pipe) {
+
+                                       split_pipe->bottom_pipe->top_pipe = temp_tail_pipe;
+                                       temp_tail_pipe->bottom_pipe = split_pipe->bottom_pipe;
+                               }
+                       }
+
                        split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
+
                        if (split_pipe->next_odm_pipe)
                                split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
 
@@ -1457,6 +1735,11 @@ static int acquire_first_split_pipe(
                                resource_build_scaling_params(split_pipe->prev_odm_pipe);
 
                        memset(split_pipe, 0, sizeof(*split_pipe));
+
+                       // We cannot split if head pipe is not odm
+                       if (temp_head_pipe && !temp_head_pipe->next_odm_pipe && !temp_head_pipe->prev_odm_pipe)
+                               return HEAD_NOT_IN_ODM;
+
                        split_pipe->stream_res.tg = pool->timing_generators[i];
                        split_pipe->plane_res.hubp = pool->hubps[i];
                        split_pipe->plane_res.ipp = pool->ipps[i];
@@ -1469,7 +1752,7 @@ static int acquire_first_split_pipe(
                        return i;
                }
        }
-       return -1;
+       return UNABLE_TO_SPLIT;
 }
 
 bool dc_add_plane_to_context(
@@ -1521,6 +1804,10 @@ bool dc_add_plane_to_context(
                        int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
                        if (pipe_idx >= 0)
                                free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+                       else if (pipe_idx == HEAD_NOT_IN_ODM)
+                               break;
+                       else
+                               ASSERT(false);
                }
 
                if (!free_pipe) {
@@ -1677,12 +1964,14 @@ bool dc_add_plane_to_context(
                                                (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
                                                free_pipe->stream->src.x + free_pipe->stream->src.width/2))) {
                                        if (!free_pipe->next_odm_pipe &&
-                                               tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
+                                               tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe &&
+                                               tail_pipe->next_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) {
                                                free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
                                                tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
                                        }
                                        if (!free_pipe->prev_odm_pipe &&
-                                               tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
+                                               tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe &&
+                                               tail_pipe->prev_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) {
                                                free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
                                                tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
                                        }
index 6e11d2b701f82fc5e437eda80dbed72d6d21a998..ea3d4b328e8e5436fee0f63bebd0ad5d96ecc475 100644 (file)
@@ -306,6 +306,32 @@ bool dc_optimize_timing_for_fsft(
 }
 #endif
 
+static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
+{
+       uint32_t refresh_rate;
+       struct dc *dc = stream->ctx->dc;
+
+       refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
+               stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
+       refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
+       refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
+
+       /* If there's any stream that fits the SubVP high refresh criteria,
+        * we must return true. This is because cursor updates are asynchronous
+        * with full updates, so we could transition into a SubVP config and
+        * remain in HW cursor mode if there's no cursor update which will
+        * then cause corruption.
+        */
+       if ((refresh_rate >= 120 && refresh_rate <= 165 &&
+                       stream->timing.v_addressable >= 1440 &&
+                       stream->timing.v_addressable <= 2160) &&
+                       (dc->current_state->stream_count > 1 ||
+                       (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
+               return true;
+
+       return false;
+}
+
 /*
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
  */
@@ -334,12 +360,13 @@ bool dc_stream_set_cursor_attributes(
 
        /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
         * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
-        * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz
-        * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz
-        *
-        * [< 120hz is a requirement for SubVP configs]
+        * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
+        * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
+        * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
         */
        if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) {
+               if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
+                       return false;
                if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
                                ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
                        return false;
index 63948170fd6d9d78cb86abe7ddf8a59b5c4b5a25..eadb53853131074603819df1b4cce63eaf211b82 100644 (file)
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
+struct abm_save_restore;
+
 /* forward declaration */
 struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.241"
+#define DC_VER "3.2.244"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -506,7 +508,7 @@ enum dcn_zstate_support_state {
        DCN_ZSTATE_SUPPORT_DISALLOW,
 };
 
-/**
+/*
  * struct dc_clocks - DC pipe clocks
  *
  * For any clocks that may differ per pipe only the max is stored in this
@@ -728,7 +730,7 @@ struct resource_pool;
 struct dce_hwseq;
 struct link_service;
 
-/**
+/*
  * struct dc_debug_options - DC debug struct
  *
  * This struct provides a simple mechanism for developers to change some
@@ -756,7 +758,7 @@ struct dc_debug_options {
        bool use_max_lb;
        enum dcc_option disable_dcc;
 
-       /**
+       /*
         * @pipe_split_policy: Define which pipe split policy is used by the
         * display core.
         */
@@ -861,6 +863,7 @@ struct dc_debug_options {
        bool psr_skip_crtc_disable;
        union dpia_debug_options dpia_debug;
        bool disable_fixed_vs_aux_timeout_wa;
+       uint32_t fixed_vs_aux_delay_config_wa;
        bool force_disable_subvp;
        bool force_subvp_mclk_switch;
        bool allow_sw_cursor_fallback;
@@ -1334,7 +1337,7 @@ struct dc_validation_set {
        struct dc_stream_state *stream;
 
        /**
-        * @plane_state: Surface state
+        * @plane_states: Surface state
         */
        struct dc_plane_state *plane_states[MAX_SURFACES];
 
@@ -1409,10 +1412,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
 
 uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
 
+void dc_set_disable_128b_132b_stream_overhead(bool disable);
+
 /* The function returns minimum bandwidth required to drive a given timing
  * return - minimum required timing bandwidth in kbps.
  */
-uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+               const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding);
 
 /* Link Interfaces */
 /*
@@ -1514,6 +1521,7 @@ struct dc_link {
        enum edp_revision edp_revision;
        union dpcd_sink_ext_caps dpcd_sink_ext_caps;
 
+       struct backlight_settings backlight_settings;
        struct psr_settings psr_settings;
 
        /* Drive settings read from integrated info table */
@@ -1849,6 +1857,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
  */
 const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
 
+/* Get the highest encoding format that the link supports; highest meaning the
+ * encoding format which supports the maximum bandwidth.
+ *
+ * @link - a link with DP RX connection
+ * return - highest encoding format link supports.
+ */
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link);
+
 /* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
  * to a link with dp connector signal type.
  * @link - a link with dp connector signal type
@@ -2230,6 +2246,11 @@ void dc_z10_save_init(struct dc *dc);
 bool dc_is_dmub_outbox_supported(struct dc *dc);
 bool dc_enable_dmub_notifications(struct dc *dc);
 
+bool dc_abm_save_restore(
+               struct dc *dc,
+               struct dc_stream_state *stream,
+               struct abm_save_restore *pData);
+
 void dc_enable_dmub_outbox(struct dc *dc);
 
 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
index c753c6f30dd71445fc9476e41d20985b366f27ff..24433409d7def831a9098b70ae78e05c2edfe7f1 100644 (file)
@@ -381,6 +381,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
 {
        union dmub_rb_cmd cmd = { 0 };
 
+       if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
+               return;
+
        memset(&cmd, 0, sizeof(cmd));
 
        /* Prepare fw command */
index 9491b76d61f58f572a3c4a48100d082a5d3045bc..fe3078b8789ef142ea4e3eb9b1c6792ac3aea5cb 100644 (file)
@@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range(
                uint32_t max_bpp_x16,
                const struct dsc_dec_dpcd_caps *dsc_sink_caps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_bw_range *range);
 
 bool dc_dsc_compute_config(
@@ -81,6 +82,7 @@ bool dc_dsc_compute_config(
                const struct dc_dsc_config_options *options,
                uint32_t target_bandwidth_kbps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_config *dsc_cfg);
 
 uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
index 0ce7728a5a4be5671279de5300b1e569539490ee..14d7804b70b28733273c4e37128a2c131f1027e4 100644 (file)
@@ -189,7 +189,6 @@ struct dc_panel_patch {
        unsigned int disable_fams;
        unsigned int skip_avmute;
        unsigned int mst_start_top_delay;
-       unsigned int delay_disable_aux_intercept_ms;
 };
 
 struct dc_edid_caps {
@@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps {
        uint32_t branch_overall_throughput_0_mps; /* In MPs */
        uint32_t branch_overall_throughput_1_mps; /* In MPs */
        uint32_t branch_max_line_width;
-       bool is_dp;
+       bool is_dp; /* Decoded format */
 };
 
 struct dc_golden_table {
@@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type {
        DC_MEM_ALLOC_TYPE_AGP
 };
 
+enum dc_link_encoding_format {
+       DC_LINK_ENCODING_UNSPECIFIED = 0,
+       DC_LINK_ENCODING_DP_8b_10b,
+       DC_LINK_ENCODING_DP_128b_132b,
+       DC_LINK_ENCODING_HDMI_TMDS,
+       DC_LINK_ENCODING_HDMI_FRL
+};
+
 enum dc_psr_version {
        DC_PSR_VERSION_1                        = 0,
        DC_PSR_VERSION_SU_1                     = 1,
@@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table {
        struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
 };
 
+struct backlight_settings {
+       uint32_t backlight_millinits;
+};
+
 /* PSR feature flags */
 struct psr_settings {
        bool psr_feature_enabled;               // PSR is supported by sink
index 63009db8b5a7237476ea690032c7ad6004e775ba..b87bfecb7755ae5a55c7b1f8bedc2ec7836bfb4e 100644 (file)
@@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu)
 }
 
 static bool dce_dmcu_load_iram(struct dmcu *dmcu,
-               unsigned int start_offset,
-               const char *src,
-               unsigned int bytes)
+                              unsigned int start_offset,
+                              const char *src,
+                              unsigned int bytes)
 {
        struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
        unsigned int count = 0;
index 6d1b01c267b751c14a03b39289f0206f051a93ad..4f552c3e7663134b9705dcbb5270d43a7728b374 100644 (file)
@@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
        return dce_i2c_hw;
 }
 
-static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
-       struct dce_i2c_hw *dce_i2c_hw,
-       uint32_t timeout,
-       enum i2c_channel_operation_result expected_result)
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw,
+                                                                                   uint32_t timeout,
+                                                                                   enum i2c_channel_operation_result expected_result)
 {
        enum i2c_channel_operation_result result;
        uint32_t i = 0;
@@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw(
        return period_timeout * num_of_clock_stretches;
 }
 
-static bool dce_i2c_hw_engine_submit_payload(
-       struct dce_i2c_hw *dce_i2c_hw,
-       struct i2c_payload *payload,
-       bool middle_of_transaction,
-       uint32_t speed)
+static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
+                                            struct i2c_payload *payload,
+                                            bool middle_of_transaction,
+                                            uint32_t speed)
 {
 
        struct i2c_request_transaction_data request;
index f1aeb6d1967c330405bb8f6b1c58a783d0398c9b..e188447c8156b75344ed2d9c4ddab070f880ee9c 100644 (file)
@@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine(
 
        return true;
 }
+
 bool dce_i2c_engine_acquire_sw(
        struct dce_i2c_sw *dce_i2c_sw,
        struct ddc *ddc_handle)
@@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw(
        return result;
 }
 
-
-
-
-static void dce_i2c_sw_engine_submit_channel_request(
-       struct dce_i2c_sw *engine,
-       struct i2c_request_transaction_data *req)
+static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine,
+                                                    struct i2c_request_transaction_data *req)
 {
        struct ddc *ddc = engine->ddc;
        uint16_t clock_delay_div_4 = engine->clock_delay >> 2;
@@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request(
                I2C_CHANNEL_OPERATION_FAILED;
 }
 
-static bool dce_i2c_sw_engine_submit_payload(
-       struct dce_i2c_sw *engine,
-       struct i2c_payload *payload,
-       bool middle_of_transaction)
+static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
+                                            struct i2c_payload *payload,
+                                            bool middle_of_transaction)
 {
        struct i2c_request_transaction_data request;
 
index 2fb9572ce25dbb80f8a9e2432542a217f0bd415c..d3e6544022b787d3dff8827544d9e91e46470204 100644 (file)
@@ -27,6 +27,7 @@
 #include "dmub_abm_lcd.h"
 #include "dc.h"
 #include "core_types.h"
+#include "dmub_cmd.h"
 
 #define TO_DMUB_ABM(abm)\
        container_of(abm, struct dce_abm, base)
@@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane
        return ret;
 }
 
+/*****************************************************************************
+ *  dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's
+ *                              Varibright states for LCD only. OLED is TBD
+ *  @abm: used to check get dc context
+ *  @panel_inst: panel instance index
+ *  @pData: contains command to pause/un-pause abm and abm parameters
+ *
+ *
+ ***************************************************************************/
+static bool dmub_abm_save_restore_ex(
+               struct abm *abm,
+               unsigned int panel_inst,
+               struct abm_save_restore *pData)
+{
+       bool ret = false;
+       unsigned int feature_support;
+       struct dc_context *dc = abm->ctx;
+
+       feature_support = abm_feature_support(abm, panel_inst);
+
+       if (feature_support == ABM_LCD_SUPPORT)
+               ret = dmub_abm_save_restore(dc, panel_inst, pData);
+
+       return ret;
+}
+
 static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
 {
        bool ret = false;
@@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = {
        .get_target_backlight = dmub_abm_get_target_backlight_ex,
        .init_abm_config = dmub_abm_init_config_ex,
        .set_abm_pause = dmub_abm_set_pause_ex,
+       .save_restore = dmub_abm_save_restore_ex,
        .set_pipe_ex = dmub_abm_set_pipe_ex,
        .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex,
 };
index 39da73eba86e503416c7fc90c975e2fad58d9414..592a8f7a1c6d00835eb33973e3460a3d805d0eef 100644 (file)
@@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
        return true;
 }
 
+
+/*****************************************************************************
+ *  dmub_abm_save_restore() - dmub interface for abm save+pause and restore+
+ *                           un-pause
+ *  @dc: dc context
+ *  @panel_inst: panel instance index
+ *  @pData: contains command to pause/un-pause abm and exchange abm parameters
+ *
+ *  When called Pause will get abm data and store in pData, and un-pause will
+ *  set/apply abm data stored in pData.
+ *
+ *****************************************************************************/
+bool dmub_abm_save_restore(
+               struct dc_context *dc,
+               unsigned int panel_inst,
+               struct abm_save_restore *pData)
+{
+       union dmub_rb_cmd cmd;
+       uint8_t panel_mask = 0x01 << panel_inst;
+       unsigned int bytes = sizeof(struct abm_save_restore);
+
+       // TODO: Optimize by only reading back final 4 bytes
+       dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+       // Copy iramtable into cw7
+       memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes);
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.abm_save_restore.header.type = DMUB_CMD__ABM;
+       cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE;
+
+       cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+       cmd.abm_save_restore.abm_init_config_data.bytes = bytes;
+       cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+       cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
+
+       cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+
+       dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+       // Copy iramtable data into local structure
+       memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+       return true;
+}
+
 bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
 {
        union dmub_rb_cmd cmd;
index 00b4e268768ef20f36eeaa5a10399b0a0b69a59f..853564d7f4714c7ccbaa78d21522f5e3c7e20fc9 100644 (file)
@@ -28,6 +28,8 @@
 
 #include "abm.h"
 
+struct abm_save_restore;
+
 void dmub_abm_init(struct abm *abm, uint32_t backlight);
 bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
 unsigned int dmub_abm_get_current_backlight(struct abm *abm);
@@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm,
        unsigned int inst);
 
 bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst);
+bool dmub_abm_save_restore(
+               struct dc_context *dc,
+               unsigned int panel_inst,
+               struct abm_save_restore *pData);
 bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
 bool dmub_abm_set_backlight_level(struct abm *abm,
                unsigned int backlight_pwm_u16_16,
index 6c9ca43d1040b3610699ec94615b40356cee3ceb..20d4d08a6a2f3c9d2190f3a5efd4d97db8dfe0c3 100644 (file)
@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                        hws->funcs.edp_backlight_control(edp_link_with_sink, false);
                }
                /*resume from S3, no vbios posting, no need to power down again*/
+               clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
                power_down_all_hw_blocks(dc);
                disable_vga_and_power_gate_all_controllers(dc);
                if (edp_link_with_sink && !keep_edp_vdd_on)
                        dc->hwss.edp_power_control(edp_link_with_sink, false);
+               clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
        }
        bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
 }
index 3935fd455f0f9117fe4e6e8fa3f0c3126c904ce2..061221394ce0791d202438d289906cb85fb84133 100644 (file)
 #include "dce/dce_i2c.h"
 /* TODO remove this include */
 
-#include "dce80_resource.h"
-
 #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
 #include "gmc/gmc_7_1_d.h"
 #include "gmc/gmc_7_1_sh_mask.h"
 #endif
 
+#include "dce80/dce80_resource.h"
+
 #ifndef mmDP_DPHY_INTERNAL_CTRL
 #define mmDP_DPHY_INTERNAL_CTRL                         0x1CDE
 #define mmDP0_DP_DPHY_INTERNAL_CTRL                     0x1CDE
index b33955928bd0b8e0158219b3ca2f837533622af3..7e140c35a0ced724097c0f26b880b21d0d8d98a8 100644 (file)
@@ -39,9 +39,6 @@
 #define BLACK_OFFSET_RGB_Y 0x0
 #define BLACK_OFFSET_CBCR  0x8000
 
-#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
-#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
-#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
 
 #define REG(reg)\
        dpp->tf_regs->reg
@@ -591,18 +588,6 @@ static void dpp1_dscl_set_manual_ratio_init(
 static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
                                 const struct rect *recout)
 {
-       int visual_confirm_on = 0;
-       unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
-
-       if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
-               visual_confirm_on = 1;
-
-       /* Check bounds to ensure the VC bar height was set to a sane value */
-       if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
-                       (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
-               visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
-       }
-
        REG_SET_2(RECOUT_START, 0,
                  /* First pixel of RECOUT in the active OTG area */
                  RECOUT_START_X, recout->x,
@@ -613,8 +598,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
                  /* Number of RECOUT horizontal pixels */
                  RECOUT_WIDTH, recout->width,
                  /* Number of RECOUT vertical lines */
-                 RECOUT_HEIGHT, recout->height
-                        - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
+                 RECOUT_HEIGHT, recout->height);
 }
 
 /**
index a50309039d083dfa23a339761fa5652cd82d8d83..9834b75f1837bacf78ac47184f168e70fbe88b4f 100644 (file)
@@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
                if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
                        struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
 
-                       if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+                       if (pipe_ctx->stream_res.tg &&
+                               pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
                                res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
                        pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
                        hubp->funcs->set_blank(hubp, true);
index ee08b545aaeaf353749fad791ef86ba0696faee9..377f1ba1a81b70cf05bb7e66763d40ca6ff2fe45 100644 (file)
@@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output(
        struct bp_transmitter_control cntl = { 0 };
        enum bp_result result;
 
-       if (!dcn10_is_dig_enabled(enc)) {
+       if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) {
                /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
        /*in DP_Alt_No_Connect case, we turn off the dig already,
        after excuation the PHY w/a sequence, not allow touch PHY any more*/
index 4492bc2392b63c92078ac0a7b94636393ba7a48a..e32d3246e82a088793fd097adaaf26bbb1d33d6d 100644 (file)
@@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data(
        enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
        struct pipe_ctx *odm_pipe;
        int odm_cnt = 1;
-
-       int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-       int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+       int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+       int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+       int odm_slice_width, last_odm_slice_width, offset = 0;
 
        if (stream->link->test_pattern_enabled)
                return;
@@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data(
 
        for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
                odm_cnt++;
-
-       width = width / odm_cnt;
+       odm_slice_width = h_active / odm_cnt;
+       last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
 
        if (blank) {
                dc->hwss.set_abm_immediate_disable(pipe_ctx);
@@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data(
                test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
        }
 
-       dc->hwss.set_disp_pattern_generator(dc,
-                       pipe_ctx,
-                       test_pattern,
-                       test_pattern_color_space,
-                       stream->timing.display_color_depth,
-                       &black_color,
-                       width,
-                       height,
-                       0);
+       odm_pipe = pipe_ctx;
 
-       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+       while (odm_pipe->next_odm_pipe) {
                dc->hwss.set_disp_pattern_generator(dc,
-                               odm_pipe,
-                               dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
-                                               CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+                               pipe_ctx,
+                               test_pattern,
                                test_pattern_color_space,
                                stream->timing.display_color_depth,
                                &black_color,
-                               width,
-                               height,
-                               0);
+                               odm_slice_width,
+                               v_active,
+                               offset);
+               offset += odm_slice_width;
+               odm_pipe = odm_pipe->next_odm_pipe;
        }
 
+       dc->hwss.set_disp_pattern_generator(dc,
+                       odm_pipe,
+                       test_pattern,
+                       test_pattern_color_space,
+                       stream->timing.display_color_depth,
+                       &black_color,
+                       last_odm_slice_width,
+                       v_active,
+                       offset);
+
        if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
                /* when exiting dynamic ODM need to reinit DPG state for unused pipes */
                struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
@@ -2123,6 +2126,15 @@ void dcn20_optimize_bandwidth(
        if (hubbub->funcs->program_compbuf_size)
                hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
 
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+               dc_dmub_srv_p_state_delegate(dc,
+                       true, context);
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+               dc->clk_mgr->clks.fw_based_mclk_switching = true;
+       } else {
+               dc->clk_mgr->clks.fw_based_mclk_switching = false;
+       }
+
        dc->clk_mgr->funcs->update_clocks(
                        dc->clk_mgr,
                        context,
index 33fc9aa8621b45570dbab9555b827ec1043954a2..d07c04458d31a7ca16885d67a847bfa29cabef4a 100644 (file)
@@ -43,7 +43,7 @@
 #define DC_LOGGER \
        dccg->ctx->logger
 
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
 {
        struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 
index e44a37491c1e94b3c91aca3feaeb3ea41f60e537..b7efa777ec7305190276459e84791781f1ec1bec 100644 (file)
@@ -32,6 +32,5 @@ struct dccg *dccg21_create(
        const struct dccg_shift *dccg_shift,
        const struct dccg_mask *dccg_mask);
 
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
 
 #endif /* __DCN21_DCCG_H__ */
index d693ea42d033b9e9259ccd4ce9e43a53e075caa7..82dfcf773b1ada8c1f77887e5b725542ea6369a2 100644 (file)
@@ -854,8 +854,8 @@ bool dcn21_fast_validate_bw(struct dc *dc,
                /* We only support full screen mpo with ODM */
                if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
                                && pipe->plane_state && mpo_pipe
-                               && memcmp(&mpo_pipe->plane_res.scl_data.recout,
-                                               &pipe->plane_res.scl_data.recout,
+                               && memcmp(&mpo_pipe->plane_state->clip_rect,
+                                               &pipe->stream->src,
                                                sizeof(struct rect)) != 0) {
                        ASSERT(mpo_pipe->plane_state != pipe->plane_state);
                        goto validate_fail;
index bf8864bc8a99ee6e38abb2d55f08c28442f61a7e..4cd4ae07d73dcff7aebe80a3e2416e6430a4cfda 100644 (file)
@@ -949,13 +949,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
 }
 
 void dcn30_prepare_bandwidth(struct dc *dc,
-                            struct dc_state *context)
+       struct dc_state *context)
 {
+       bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+       /* Any transition into an FPO config should disable MCLK switching first to avoid
+        * driver and FW P-State synchronization issues.
+        */
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+               dc->optimized_required = true;
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+       }
+
        if (dc->clk_mgr->dc_mode_softmax_enabled)
                if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
                                context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
                        dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
 
        dcn20_prepare_bandwidth(dc, context);
+       /*
+        * enabled -> enabled: do not disable
+        * enabled -> disabled: disable
+        * disabled -> enabled: don't care
+        * disabled -> disabled: don't care
+        */
+       if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+               dc_dmub_srv_p_state_delegate(dc, false, context);
+
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+               /* After disabling P-State, restore the original value to ensure we get the correct P-State
+                * on the next optimize. */
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
+       }
 }
 
index dfb8f62765f28e6f52110355cd9edd560184e246..5bf4d0aa62305e5a5e68a36f172aac0ef7705c84 100644 (file)
@@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
        optc1->opp_count = 1;
 }
 
-static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
                struct dc_crtc_timing *timing)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
                   OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
 }
 
-static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
index fb06dc9a48937dd3cbe4caa62af1c8bef68c6e69..d3a056c12b0dc794edf33f136e980bb9853d3b36 100644 (file)
@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
 
 void optc3_set_odm_bypass(struct timing_generator *optc,
                const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+               struct dc_crtc_timing *timing);
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
 void optc3_tg_init(struct timing_generator *optc);
 void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
 #endif /* __DC_OPTC_DCN30_H__ */
index abe4c12a10b5b6d8b725536535fa25f6dec9fb39..f5bfcd2a0dbcddc391eb5986052cb96c10f80c0b 100644 (file)
@@ -1705,8 +1705,8 @@ noinline bool dcn30_internal_validate_bw(
                        /* We only support full screen mpo with ODM */
                        if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
                                        && pipe->plane_state && mpo_pipe
-                                       && memcmp(&mpo_pipe->plane_res.scl_data.recout,
-                                                       &pipe->plane_res.scl_data.recout,
+                                       && memcmp(&mpo_pipe->plane_state->clip_rect,
+                                                       &pipe->stream->src,
                                                        sizeof(struct rect)) != 0) {
                                ASSERT(mpo_pipe->plane_state != pipe->plane_state);
                                goto validate_fail;
index 7aa628c21973499fbe9ae5e2521c2ffe042c5f70..9002cb10a6aef7eb8a8578de51b496857c14b30f 100644 (file)
@@ -11,7 +11,8 @@
 # Makefile for dcn30.
 
 DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
-               dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
+               dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
+               dcn301_optc.o
 
 AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
new file mode 100644 (file)
index 0000000..b3cfcb8
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn301_optc.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
+
+#include "dml/dcn30/dcn30_fpu.h"
+#include "dc_trace.h"
+
+#define REG(reg)\
+       optc1->tg_regs->reg
+
+#define CTX \
+       optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+       optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+/**
+ * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ * @optc: timing_generator instance.
+ * @params: parameters used for Dynamic Refresh Rate.
+ */
+void optc301_set_drr(
+       struct timing_generator *optc,
+       const struct drr_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       if (params != NULL &&
+               params->vertical_total_max > 0 &&
+               params->vertical_total_min > 0) {
+
+               if (params->vertical_total_mid != 0) {
+
+                       REG_SET(OTG_V_TOTAL_MID, 0,
+                               OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+                       REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+                                       OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+                                       OTG_VTOTAL_MID_FRAME_NUM,
+                                       (uint8_t)params->vertical_total_mid_frame_num);
+
+               }
+
+               optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
+
+               REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+                               OTG_V_TOTAL_MIN_SEL, 1,
+                               OTG_V_TOTAL_MAX_SEL, 1,
+                               OTG_FORCE_LOCK_ON_EVENT, 0,
+                               OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+                               OTG_SET_V_TOTAL_MIN_MASK, 0);
+               // Setup manual flow control for EOF via TRIG_A
+               optc->funcs->setup_manual_trigger(optc);
+
+       } else {
+               REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+                               OTG_SET_V_TOTAL_MIN_MASK, 0,
+                               OTG_V_TOTAL_MIN_SEL, 0,
+                               OTG_V_TOTAL_MAX_SEL, 0,
+                               OTG_FORCE_LOCK_ON_EVENT, 0);
+
+               optc->funcs->set_vtotal_min_max(optc, 0, 0);
+       }
+}
+
+
+void optc301_setup_manual_trigger(struct timing_generator *optc)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_SET_8(OTG_TRIGA_CNTL, 0,
+                       OTG_TRIGA_SOURCE_SELECT, 21,
+                       OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+                       OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+                       OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+                       OTG_TRIGA_POLARITY_SELECT, 0,
+                       OTG_TRIGA_FREQUENCY_SELECT, 0,
+                       OTG_TRIGA_DELAY, 0,
+                       OTG_TRIGA_CLEAR, 1);
+}
+
+static struct timing_generator_funcs dcn30_tg_funcs = {
+               .validate_timing = optc1_validate_timing,
+               .program_timing = optc1_program_timing,
+               .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+               .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+               .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+               .program_global_sync = optc1_program_global_sync,
+               .enable_crtc = optc2_enable_crtc,
+               .disable_crtc = optc1_disable_crtc,
+               /* used by enable_timing_synchronization. Not need for FPGA */
+               .is_counter_moving = optc1_is_counter_moving,
+               .get_position = optc1_get_position,
+               .get_frame_count = optc1_get_vblank_counter,
+               .get_scanoutpos = optc1_get_crtc_scanoutpos,
+               .get_otg_active_size = optc1_get_otg_active_size,
+               .set_early_control = optc1_set_early_control,
+               /* used by enable_timing_synchronization. Not need for FPGA */
+               .wait_for_state = optc1_wait_for_state,
+               .set_blank_color = optc3_program_blank_color,
+               .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+               .triplebuffer_lock = optc3_triplebuffer_lock,
+               .triplebuffer_unlock = optc2_triplebuffer_unlock,
+               .enable_reset_trigger = optc1_enable_reset_trigger,
+               .enable_crtc_reset = optc1_enable_crtc_reset,
+               .disable_reset_trigger = optc1_disable_reset_trigger,
+               .lock = optc3_lock,
+               .unlock = optc1_unlock,
+               .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+               .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+               .enable_optc_clock = optc1_enable_optc_clock,
+               .set_drr = optc301_set_drr,
+               .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+               .set_vtotal_min_max = optc3_set_vtotal_min_max,
+               .set_static_screen_control = optc1_set_static_screen_control,
+               .program_stereo = optc1_program_stereo,
+               .is_stereo_left_eye = optc1_is_stereo_left_eye,
+               .tg_init = optc3_tg_init,
+               .is_tg_enabled = optc1_is_tg_enabled,
+               .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+               .clear_optc_underflow = optc1_clear_optc_underflow,
+               .setup_global_swap_lock = NULL,
+               .get_crc = optc1_get_crc,
+               .configure_crc = optc2_configure_crc,
+               .set_dsc_config = optc3_set_dsc_config,
+               .get_dsc_status = optc2_get_dsc_status,
+               .set_dwb_source = NULL,
+               .set_odm_bypass = optc3_set_odm_bypass,
+               .set_odm_combine = optc3_set_odm_combine,
+               .get_optc_source = optc2_get_optc_source,
+               .set_out_mux = optc3_set_out_mux,
+               .set_drr_trigger_window = optc3_set_drr_trigger_window,
+               .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+               .set_gsl = optc2_set_gsl,
+               .set_gsl_source_select = optc2_set_gsl_source_select,
+               .set_vtg_params = optc1_set_vtg_params,
+               .program_manual_trigger = optc2_program_manual_trigger,
+               .setup_manual_trigger = optc301_setup_manual_trigger,
+               .get_hw_timing = optc1_get_hw_timing,
+               .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+};
+
+void dcn301_timing_generator_init(struct optc *optc1)
+{
+       optc1->base.funcs = &dcn30_tg_funcs;
+
+       optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+       optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+       optc1->min_h_blank = 32;
+       optc1->min_v_blank = 3;
+       optc1->min_v_blank_interlace = 5;
+       optc1->min_h_sync_width = 4;
+       optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
new file mode 100644 (file)
index 0000000..b495856
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN301_H__
+#define __DC_OPTC_DCN301_H__
+
+#include "dcn20/dcn20_optc.h"
+#include "dcn30/dcn30_optc.h"
+
+void dcn301_timing_generator_init(struct optc *optc1);
+void optc301_setup_manual_trigger(struct timing_generator *optc);
+void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
+#endif /* __DC_OPTC_DCN301_H__ */
index 3485fbb1093efbafa1673692e3bdcf07a729a469..f856a4773c27a642a3141c7d3033694781a3c504 100644 (file)
@@ -42,7 +42,7 @@
 #include "dcn30/dcn30_hubp.h"
 #include "irq/dcn30/irq_service_dcn30.h"
 #include "dcn30/dcn30_dpp.h"
-#include "dcn30/dcn30_optc.h"
+#include "dcn301/dcn301_optc.h"
 #include "dcn20/dcn20_hwseq.h"
 #include "dcn30/dcn30_hwseq.h"
 #include "dce110/dce110_hw_sequencer.h"
@@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
        tgn10->tg_shift = &optc_shift;
        tgn10->tg_mask = &optc_mask;
 
-       dcn30_timing_generator_init(tgn10);
+       dcn301_timing_generator_init(tgn10);
 
        return &tgn10->base;
 }
@@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct(
        dc->caps.max_cursor_size = 256;
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
-       dc->caps.max_slave_planes = 1;
-       dc->caps.max_slave_yuv_planes = 1;
-       dc->caps.max_slave_rgb_planes = 1;
+       dc->caps.max_slave_planes = 2;
+       dc->caps.max_slave_yuv_planes = 2;
+       dc->caps.max_slave_rgb_planes = 2;
        dc->caps.is_apu = true;
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
index 45956ef6f3f9dd2b4ede34a40a7077051f4e4ebc..131b8b82afc07be16295bc63481aeeb9944651ec 100644 (file)
@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+               .pipe_split_policy = MPC_SPLIT_AVOID,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 65c1d754e2d6b1fb173f86d2603fb1a17550b809..8664f0c4c9b7633cde8b1af492e7112376014175 100644 (file)
@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
                struct dcn_dccg *dccg_dcn,
                enum phyd32clk_clock_source src)
 {
-       if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+       if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+                       dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
                if (src == PHYD32CLKC)
                        src = PHYD32CLKF;
                if (src == PHYD32CLKD)
@@ -284,19 +285,11 @@ void dccg31_enable_symclk32_le(
        /* select one of the PHYD32CLKs as the source for symclk32_le */
        switch (hpo_le_inst) {
        case 0:
-               if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
-                                       SYMCLK32_LE0_GATE_DISABLE, 1,
-                                       SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE0_SRC_SEL, phyd32clk,
                                SYMCLK32_LE0_EN, 1);
                break;
        case 1:
-               if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
-                                       SYMCLK32_LE1_GATE_DISABLE, 1,
-                                       SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE1_SRC_SEL, phyd32clk,
                                SYMCLK32_LE1_EN, 1);
@@ -319,19 +312,38 @@ void dccg31_disable_symclk32_le(
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE0_SRC_SEL, 0,
                                SYMCLK32_LE0_EN, 0);
-               if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
-                                       SYMCLK32_LE0_GATE_DISABLE, 0,
-                                       SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
                break;
        case 1:
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE1_SRC_SEL, 0,
                                SYMCLK32_LE1_EN, 0);
-               if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
-                                       SYMCLK32_LE1_GATE_DISABLE, 0,
-                                       SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+               return;
+       }
+}
+
+void dccg31_set_symclk32_le_root_clock_gating(
+               struct dccg *dccg,
+               int hpo_le_inst,
+               bool enable)
+{
+       struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+       if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+               return;
+
+       switch (hpo_le_inst) {
+       case 0:
+               REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                               SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0,
+                               SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       case 1:
+               REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                               SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0,
+                               SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0);
                break;
        default:
                BREAK_TO_DEBUGGER();
@@ -660,10 +672,8 @@ void dccg31_init(struct dccg *dccg)
        dccg31_disable_symclk32_se(dccg, 2);
        dccg31_disable_symclk32_se(dccg, 3);
 
-       if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
-               dccg31_disable_symclk32_le(dccg, 0);
-               dccg31_disable_symclk32_le(dccg, 1);
-       }
+       dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false);
+       dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false);
 
        if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
                dccg31_disable_dpstreamclk(dccg, 0);
index 0902ce5eb8a116c3e302112aba5497ae52376d44..e3caaacf7493b11bd52e3806eb1a082198cc3694 100644 (file)
@@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le(
                struct dccg *dccg,
                int hpo_le_inst);
 
+void dccg31_set_symclk32_le_root_clock_gating(
+               struct dccg *dccg,
+               int hpo_le_inst,
+               bool enable);
+
 void dccg31_set_physymclk(
                struct dccg *dccg,
                int phy_inst,
index bd62502380d8da7961634084f8627f9cc2e18437..4596f3bac1b4c7654974c75bdc5bae9a32f3ab2d 100644 (file)
@@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output(
                struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
                struct dc_link *link;
 
-               if (!dcn10_is_dig_enabled(enc))
+               if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
                        return;
 
                link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
index 0278bae50a9d6c9d7a035558f1236af443b901f7..45143459eeddcf9a6085249d1a2352f32e514d0f 100644 (file)
@@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
                        VID_STREAM_STATUS, 0,
                        10, 5000);
 
-       /* Disable SDP tranmission */
+       /* Disable SDP transmission */
        REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
                        SDP_STREAM_ENABLE, 0);
 
index 0746ed31d1d14ae9d511b6db9efe04a41c251d68..ad3f019a784f3ccb09ad00e76f1316e3a0d719ed 100644 (file)
@@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = {
        .disable_symclk32_se = dccg31_disable_symclk32_se,
        .enable_symclk32_le = dccg31_enable_symclk32_le,
        .disable_symclk32_le = dccg31_disable_symclk32_le,
+       .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
        .set_physymclk = dccg31_set_physymclk,
        .set_dtbclk_dto = dccg314_set_dtbclk_dto,
        .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
index 6a9024aa32853178d94e661ccecbde93e3aeaf37..9b8e0f6f32b499a3c4779fe88d78c1968e99a95d 100644 (file)
@@ -908,15 +908,15 @@ static const struct dc_debug_options debug_defaults_drv = {
        .root_clock_optimization = {
                        .bits = {
                                        .dpp = true,
-                                       .dsc = false,
-                                       .hdmistream = false,
-                                       .hdmichar = false,
-                                       .dpstream = false,
-                                       .symclk32_se = false,
-                                       .symclk32_le = false,
-                                       .symclk_fe = false,
-                                       .physymclk = false,
-                                       .dpiasymclk = false,
+                                       .dsc = true,
+                                       .hdmistream = true,
+                                       .hdmichar = true,
+                                       .dpstream = true,
+                                       .symclk32_se = true,
+                                       .symclk32_le = true,
+                                       .symclk_fe = true,
+                                       .physymclk = true,
+                                       .dpiasymclk = true,
                        }
        },
 
index df3a438abda82c9f433696d930fa58e76e00a012..2e3fa0fb8bd490a25d3c187175bf441038eb720f 100644 (file)
@@ -1659,7 +1659,7 @@ static int dcn315_populate_dml_pipes_from_context(
 {
        int i, pipe_cnt, crb_idx, crb_pipes;
        struct resource_context *res_ctx = &context->res_ctx;
-       struct pipe_ctx *pipe;
+       struct pipe_ctx *pipe = NULL;
        const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
        int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
        bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
index 11e28e056cf7c61e780755e53af2a9bbb82a88d3..61ceff6bc0b19331000a9b4b7057a3d276123f45 100644 (file)
@@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync(
        uint32_t dispclk_rdivider_value = 0;
 
        REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
-       REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+
+       /* Not valid for the WDIVIDER to be set to 0 */
+       if (dispclk_rdivider_value != 0)
+               REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
 }
 
 static void dccg32_get_pixel_rate_div(
index d52d5feeb311bac3113c92380b8bf34acbe24786..a87afb796f470a23256cb19f8071af139e31c997 100644 (file)
 #include "clk_mgr.h"
 #include "dsc.h"
 #include "dcn20/dcn20_optc.h"
-#include "dmub_subvp_state.h"
 #include "dce/dmub_hw_lock_mgr.h"
 #include "dcn32_resource.h"
 #include "link.h"
-#include "dmub/inc/dmub_subvp_state.h"
 
 #define DC_LOGGER_INIT(logger)
 
index 1cc09799f92d386ca338fa5a1cd77d63dfc28c7a..0d1f18f8348e5934527ac655997e00f647b7e4c1 100644 (file)
@@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context(
 {
        int i, pipe_cnt;
        struct resource_context *res_ctx = &context->res_ctx;
-       struct pipe_ctx *pipe;
+       struct pipe_ctx *pipe = NULL;
        bool subvp_in_use = false;
        struct dc_crtc_timing *timing;
        bool vsr_odm_support = false;
index 5be242a1b82c4c3768505c034167140f458a6497..db9c55a09d9f4f62c75d5130fa1416fc985288ca 100644 (file)
@@ -641,6 +641,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
        uint8_t non_subvp_pipes = 0;
        bool drr_pipe_found = false;
        bool drr_psr_capable = false;
+       uint64_t refresh_rate = 0;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -649,8 +650,14 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
                        continue;
 
                if (pipe->plane_state && !pipe->top_pipe) {
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
                                subvp_count++;
+
+                               refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+                                       pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+                               refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+                               refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+                       }
                        if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
                                non_subvp_pipes++;
                                drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
@@ -662,7 +669,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
                }
        }
 
-       if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable)
+       if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+               ((uint32_t)refresh_rate < 120))
                result = true;
 
        return result;
@@ -693,6 +701,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
        bool drr_pipe_found = false;
        struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
        bool vblank_psr_capable = false;
+       uint64_t refresh_rate = 0;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -701,8 +710,14 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
                        continue;
 
                if (pipe->plane_state && !pipe->top_pipe) {
-                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+                       if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
                                subvp_count++;
+
+                               refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+                                       pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+                               refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+                               refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+                       }
                        if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
                                non_subvp_pipes++;
                                vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
@@ -715,7 +730,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
        }
 
        if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
-                       vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
+               ((uint32_t)refresh_rate < 120) &&
+               vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
                result = true;
 
        return result;
index f294f2f8c75bcf6dd102ab6ac199b96e5a9a0c10..57cf0358cc438516eb81d0dfc6bbbbf9ff8e9009 100644 (file)
@@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule(
        unsigned int HostVMDynamicLevels;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
-       double ImmediateFlipBW;
+       double ImmediateFlipBW = 1.0;
        double HostVMInefficiencyFactor;
        double VRatioClamped;
 
index 43016c462251f48a9d85d4a6a2f3134263c61cb7..eba51144fee75f6e53bbb8d9ab5c454c619e60cb 100644 (file)
@@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule(
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
-       double ImmediateFlipBW;
+       double ImmediateFlipBW = 1.0;
        double LineTime = v->HTotal[k] / v->PixelClock[k];
 
        if (v->GPUVMEnable == true && v->HostVMEnable == true) {
index d9e049e7ff0a6e9ca6c755dd72869a39461983be..07adb614366eb5c71964ea0b5b4f3701a7427f2a 100644 (file)
@@ -31,6 +31,7 @@
 #include "dml/dcn20/dcn20_fpu.h"
 #include "dml/dcn31/dcn31_fpu.h"
 #include "dml/display_mode_vba.h"
+#include "dml/dml_inline_defs.h"
 
 struct _vcs_dpi_ip_params_st dcn3_14_ip = {
        .VBlankNomDefaultUS = 668,
@@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format)
        return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
 }
 
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ *          it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+       unsigned int num_lines = 0;
+       unsigned int lines_time_in_ns = 1000.0 *
+                       (((float)timing->h_total * 1000.0) /
+                        ((float)timing->pix_clk_100hz / 10.0));
+
+       num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+       return num_lines;
+}
+
 int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
                                               display_e2e_pipe_params_st *pipes,
                                               bool fast_validate)
@@ -289,15 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
 
        for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
                struct dc_crtc_timing *timing;
+               unsigned int num_lines = 0;
 
                if (!res_ctx->pipe_ctx[i].stream)
                        continue;
                pipe = &res_ctx->pipe_ctx[i];
                timing = &pipe->stream->timing;
 
-               pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+               num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing);
+
+               if (pipe->stream->adjust.v_total_min != 0)
+                       pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+               else
+                       pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+
                pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
-               pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
+               pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
                pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
                pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
 
index 9010c47476e923e6f8d1f95aca7de12fe9e47165..32251af76935d1d41a204b0906530a060d444bb7 100644 (file)
@@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule(
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
-       double ImmediateFlipBW;
+       double ImmediateFlipBW = 1.0;
        double LineTime = v->HTotal[k] / v->PixelClock[k];
 
        if (v->GPUVMEnable == true && v->HostVMEnable == true) {
index a950348017128bcead306d0b6f7de9b3b4517ad3..0f882b879b0db028b95755601dcf730089236b99 100644 (file)
@@ -1040,7 +1040,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
        uint32_t i;
        uint8_t subvp_count = 0;
        uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
-       uint32_t refresh_rate = 0;
+       uint64_t refresh_rate = 0;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1050,19 +1050,21 @@ static bool subvp_subvp_admissable(struct dc *dc,
 
                if (pipe->plane_state && !pipe->top_pipe &&
                                pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
-                       refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
-                                       pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
-                                       / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
-                       if (refresh_rate < min_refresh)
-                               min_refresh = refresh_rate;
-                       if (refresh_rate > max_refresh)
-                               max_refresh = refresh_rate;
+                       refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+                               pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+                       refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+                       refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+                       if ((uint32_t)refresh_rate < min_refresh)
+                               min_refresh = (uint32_t)refresh_rate;
+                       if ((uint32_t)refresh_rate > max_refresh)
+                               max_refresh = (uint32_t)refresh_rate;
                        subvp_count++;
                }
        }
 
        if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
-                       (min_refresh >= 120 && max_refresh >= 120)))
+               (min_refresh >= 120 && max_refresh <= 165)))
                result = true;
 
        return result;
@@ -1715,8 +1717,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
                if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
                                && !dc->config.enable_windowed_mpo_odm
                                && pipe->plane_state && mpo_pipe
-                               && memcmp(&mpo_pipe->plane_res.scl_data.recout,
-                                               &pipe->plane_res.scl_data.recout,
+                               && memcmp(&mpo_pipe->plane_state->clip_rect,
+                                               &pipe->stream->src,
                                                sizeof(struct rect)) != 0) {
                        ASSERT(mpo_pipe->plane_state != pipe->plane_state);
                        goto validate_fail;
index a50e7f4dce4215f1b5f537186f9185dbd7f146b6..ecea008f19d3aa0d3427e97adb8e33219a22a35f 100644 (file)
@@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule(
        double TimeForFetchingMetaPTE = 0;
        double TimeForFetchingRowInVBlank = 0;
        double LinesToRequestPrefetchPixelData = 0;
+       double LinesForPrefetchBandwidth = 0;
        unsigned int HostVMDynamicLevelsTrips;
        double  trip_to_mem;
        double  Tvm_trips;
@@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule(
                        TimeForFetchingMetaPTE = Tvm_oto;
                        TimeForFetchingRowInVBlank = Tr0_oto;
                        *PrefetchBandwidth = prefetch_bw_oto;
+                       /* Clamp to oto for bandwidth calculation */
+                       LinesForPrefetchBandwidth = dst_y_prefetch_oto;
                } else {
                        *DestinationLinesForPrefetch = dst_y_prefetch_equ;
                        TimeForFetchingMetaPTE = Tvm_equ;
                        TimeForFetchingRowInVBlank = Tr0_equ;
                        *PrefetchBandwidth = prefetch_bw_equ;
+                       /* Clamp to equ for bandwidth calculation */
+                       LinesForPrefetchBandwidth = dst_y_prefetch_equ;
                }
 
                *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
@@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule(
                *DestinationLinesToRequestRowInVBlank =
                                dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
 
-               LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+               LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
                                *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
 
 #ifdef __DML_VBA_DEBUG__
@@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule(
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
-       double ImmediateFlipBW;
+       double ImmediateFlipBW = 1.0;
 
        if (GPUVMEnable == true && HostVMEnable == true)
                HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
index 58dd62cce4bb9b473cb3197b8b9f7302dacde8a0..3966845c769453888675b80c32ede95c0b62fa5f 100644 (file)
@@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed;
 
 static bool dsc_policy_disable_dsc_stream_overhead;
 
+static bool disable_128b_132b_stream_overhead;
+
 #ifndef MAX
 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
 #endif
@@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead;
 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
 #endif
 
+/* Need to account for padding due to pixel-to-symbol packing
+ * for uncompressed 128b/132b streams.
+ */
+static uint32_t apply_128b_132b_stream_overhead(
+       const struct dc_crtc_timing *timing, const uint32_t kbps)
+{
+       uint32_t total_kbps = kbps;
+
+       if (disable_128b_132b_stream_overhead)
+               return kbps;
+
+       if (!timing->flags.DSC) {
+               struct fixed31_32 bpp;
+               struct fixed31_32 overhead_factor;
+
+               bpp = dc_fixpt_from_int(kbps);
+               bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
+
+               /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
+                * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
+                */
+               overhead_factor = dc_fixpt_from_int(timing->h_addressable);
+               overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
+               overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
+               overhead_factor = dc_fixpt_div(
+                       dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
+                       overhead_factor);
+
+               total_kbps = dc_fixpt_ceil(
+                       dc_fixpt_mul_int(overhead_factor, total_kbps));
+       }
+
+       return total_kbps;
+}
+
 uint32_t dc_bandwidth_in_kbps_from_timing(
-       const struct dc_crtc_timing *timing)
+       const struct dc_crtc_timing *timing,
+       const enum dc_link_encoding_format link_encoding)
 {
        uint32_t bits_per_channel = 0;
        uint32_t kbps;
@@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
                        kbps = kbps * 2 / 3;
        }
 
+       if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+               kbps = apply_128b_132b_stream_overhead(timing, kbps);
+
        return kbps;
 }
 
@@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range(
                const uint32_t num_slices_h,
                const struct dsc_enc_caps *dsc_caps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_bw_range *range);
 
 static uint32_t compute_bpp_x16_from_target_bandwidth(
@@ -133,6 +175,7 @@ static bool setup_dsc_config(
                int target_bandwidth_kbps,
                const struct dc_crtc_timing *timing,
                const struct dc_dsc_config_options *options,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_config *dsc_cfg);
 
 static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range(
                uint32_t max_bpp_x16,
                const struct dsc_dec_dpcd_caps *dsc_sink_caps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_bw_range *range)
 {
        bool is_dsc_possible = false;
@@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range(
 
        if (is_dsc_possible)
                is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
-                               &options, &config);
+                               &options, link_encoding, &config);
 
        if (is_dsc_possible)
                is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
-                               config.num_slices_h, &dsc_common_caps, timing, range);
+                               config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
 
        return is_dsc_possible;
 }
@@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range(
                const uint32_t num_slices_h,
                const struct dsc_enc_caps *dsc_caps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_bw_range *range)
 {
        uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
@@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range(
        /* populate output structure */
        if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
                /* native stream bandwidth */
-               range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+               range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
 
                /* max dsc target bpp */
                range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
@@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16(
                const int target_bandwidth_kbps,
                const struct dc_crtc_timing *timing,
                const int num_slices_h,
+               const enum dc_link_encoding_format link_encoding,
                int *target_bpp_x16)
 {
        struct dc_dsc_bw_range range;
@@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16(
        *target_bpp_x16 = 0;
 
        if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
-                       num_slices_h, dsc_common_caps, timing, &range)) {
+                       num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
                if (target_bandwidth_kbps >= range.stream_kbps) {
                        if (policy->enable_dsc_when_not_needed)
                                /* enable max bpp even dsc is not needed */
@@ -796,6 +842,7 @@ static bool setup_dsc_config(
                int target_bandwidth_kbps,
                const struct dc_crtc_timing *timing,
                const struct dc_dsc_config_options *options,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_config *dsc_cfg)
 {
        struct dsc_enc_caps dsc_common_caps;
@@ -995,6 +1042,7 @@ static bool setup_dsc_config(
                                target_bandwidth_kbps,
                                timing,
                                num_slices_h,
+                               link_encoding,
                                &target_bpp);
                dsc_cfg->bits_per_pixel = target_bpp;
        }
@@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config(
                const struct dc_dsc_config_options *options,
                uint32_t target_bandwidth_kbps,
                const struct dc_crtc_timing *timing,
+               const enum dc_link_encoding_format link_encoding,
                struct dc_dsc_config *dsc_cfg)
 {
        bool is_dsc_possible = false;
@@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config(
        is_dsc_possible = setup_dsc_config(dsc_sink_caps,
                &dsc_enc_caps,
                target_bandwidth_kbps,
-               timing, options, dsc_cfg);
+               timing, options, link_encoding, dsc_cfg);
        return is_dsc_possible;
 }
 
@@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
        dsc_policy_disable_dsc_stream_overhead = disable;
 }
 
+void dc_set_disable_128b_132b_stream_overhead(bool disable)
+{
+       disable_128b_132b_stream_overhead = disable;
+}
+
 void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
 {
        options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
index d2190a3320f64cd8e9336f7cf4ba53384e1934ac..33db15d69f23373c1f95c03622ddd3394e07cb90 100644 (file)
@@ -27,6 +27,8 @@
 
 #include "dm_services_types.h"
 
+struct abm_save_restore;
+
 struct abm {
        struct dc_context *ctx;
        const struct abm_funcs *funcs;
@@ -55,6 +57,10 @@ struct abm_funcs {
                        unsigned int bytes,
                        unsigned int inst);
        bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
+       bool (*save_restore)(
+                       struct abm *abm,
+                       unsigned int panel_inst,
+                       struct abm_save_restore *pData);
        bool (*set_pipe_ex)(struct abm *abm,
                        unsigned int otg_inst,
                        unsigned int option,
index 7254182b7c721075f10d53f36d8679bbe7594bcc..af6b9509d09d32582a32b36b45792b46fdd7d97d 100644 (file)
@@ -172,8 +172,6 @@ struct aux_engine_funcs {
                struct aux_engine *engine,
                uint8_t *returned_bytes);
        bool (*is_engine_available)(struct aux_engine *engine);
-       enum i2caux_engine_type (*get_engine_type)(
-               const struct aux_engine *engine);
        bool (*acquire)(
                struct aux_engine *engine,
                struct ddc *ddc);
index 8dc804bbe98bd11e89b1cb9578be40935399b317..93592281de32c814db3471b15144bca6adc60051 100644 (file)
@@ -123,6 +123,11 @@ struct dccg_funcs {
                        struct dccg *dccg,
                        int hpo_le_inst);
 
+       void (*set_symclk32_le_root_clock_gating)(
+                       struct dccg *dccg,
+                       int hpo_le_inst,
+                       bool enable);
+
        void (*set_physymclk)(
                        struct dccg *dccg,
                        int phy_inst,
index c923b2af8510362b51d4c675254cecc17e46737d..37bc98faa7a0d050cfc5dce3b7db9a67f72dc88b 100644 (file)
 
 #define DCN_BASE__INST0_SEG2                       0x000034C0
 
-static enum dc_irq_source to_dal_irq_source_dcn314(
-               struct irq_service *irq_service,
-               uint32_t src_id,
-               uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service,
+                                                  uint32_t src_id,
+                                                  uint32_t ext_id)
 {
        switch (src_id) {
        case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
index db9f1baa27e5e5e5e6542948d7c4c74eb2e7e015..bce0428ad6123a88e28b69a131e0336da4020f4a 100644 (file)
@@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link,
                stream->timing.display_color_depth;
        struct bit_depth_reduction_params params;
        struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
-       int width = pipe_ctx->stream->timing.h_addressable +
+       struct pipe_ctx *odm_pipe;
+       int odm_cnt = 1;
+       int h_active = pipe_ctx->stream->timing.h_addressable +
                pipe_ctx->stream->timing.h_border_left +
                pipe_ctx->stream->timing.h_border_right;
-       int height = pipe_ctx->stream->timing.v_addressable +
+       int v_active = pipe_ctx->stream->timing.v_addressable +
                pipe_ctx->stream->timing.v_border_bottom +
                pipe_ctx->stream->timing.v_border_top;
+       int odm_slice_width, last_odm_slice_width, offset = 0;
 
        memset(&params, 0, sizeof(params));
 
+       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+               odm_cnt++;
+
+       odm_slice_width = h_active / odm_cnt;
+       last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+
        switch (test_pattern) {
        case DP_TEST_PATTERN_COLOR_SQUARES:
                controller_test_pattern =
@@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link,
        {
                /* disable bit depth reduction */
                pipe_ctx->stream->bit_depth_params = params;
-               opp->funcs->opp_program_bit_depth_reduction(opp, &params);
-               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+                       opp->funcs->opp_program_bit_depth_reduction(opp, &params);
                        pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                                controller_test_pattern, color_depth);
-               else if (link->dc->hwss.set_disp_pattern_generator) {
-                       struct pipe_ctx *odm_pipe;
+               } else if (link->dc->hwss.set_disp_pattern_generator) {
                        enum controller_dp_color_space controller_color_space;
-                       int opp_cnt = 1;
-                       int offset = 0;
-                       int dpg_width = width;
+                       struct output_pixel_processor *odm_opp;
 
                        switch (test_pattern_color_space) {
                        case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -502,36 +508,33 @@ static void set_crtc_test_pattern(struct dc_link *link,
                                break;
                        }
 
-                       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
-                               opp_cnt++;
-                       dpg_width = width / opp_cnt;
-                       offset = dpg_width;
-
-                       link->dc->hwss.set_disp_pattern_generator(link->dc,
-                                       pipe_ctx,
-                                       controller_test_pattern,
-                                       controller_color_space,
-                                       color_depth,
-                                       NULL,
-                                       dpg_width,
-                                       height,
-                                       0);
-
-                       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
-                               struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
+                       odm_pipe = pipe_ctx;
+                       while (odm_pipe->next_odm_pipe) {
+                               odm_opp = odm_pipe->stream_res.opp;
                                odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
                                link->dc->hwss.set_disp_pattern_generator(link->dc,
-                                               odm_pipe,
+                                               pipe_ctx,
                                                controller_test_pattern,
                                                controller_color_space,
                                                color_depth,
                                                NULL,
-                                               dpg_width,
-                                               height,
+                                               odm_slice_width,
+                                               v_active,
                                                offset);
-                               offset += offset;
+                               offset += odm_slice_width;
+                               odm_pipe = odm_pipe->next_odm_pipe;
                        }
+                       odm_opp = odm_pipe->stream_res.opp;
+                       odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+                       link->dc->hwss.set_disp_pattern_generator(link->dc,
+                                       odm_pipe,
+                                       controller_test_pattern,
+                                       controller_color_space,
+                                       color_depth,
+                                       NULL,
+                                       last_odm_slice_width,
+                                       v_active,
+                                       offset);
                }
        }
        break;
@@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link,
                /* restore bitdepth reduction */
                resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
                pipe_ctx->stream->bit_depth_params = params;
-               opp->funcs->opp_program_bit_depth_reduction(opp, &params);
-               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+                       opp->funcs->opp_program_bit_depth_reduction(opp, &params);
                        pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
-                               CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
-                               color_depth);
-               else if (link->dc->hwss.set_disp_pattern_generator) {
-                       struct pipe_ctx *odm_pipe;
-                       int opp_cnt = 1;
-                       int dpg_width;
-
-                       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
-                               opp_cnt++;
-
-                       dpg_width = width / opp_cnt;
-                       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
-                               struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+                                       CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+                                       color_depth);
+               } else if (link->dc->hwss.set_disp_pattern_generator) {
+                       struct output_pixel_processor *odm_opp;
 
+                       odm_pipe = pipe_ctx;
+                       while (odm_pipe->next_odm_pipe) {
+                               odm_opp = odm_pipe->stream_res.opp;
                                odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
                                link->dc->hwss.set_disp_pattern_generator(link->dc,
                                                odm_pipe,
@@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
                                                CONTROLLER_DP_COLOR_SPACE_UDEFINED,
                                                color_depth,
                                                NULL,
-                                               dpg_width,
-                                               height,
-                                               0);
+                                               odm_slice_width,
+                                               v_active,
+                                               offset);
+                               offset += odm_slice_width;
+                               odm_pipe = odm_pipe->next_odm_pipe;
                        }
+                       odm_opp = odm_pipe->stream_res.opp;
+                       odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
                        link->dc->hwss.set_disp_pattern_generator(link->dc,
-                                       pipe_ctx,
+                                       odm_pipe,
                                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                        CONTROLLER_DP_COLOR_SPACE_UDEFINED,
                                        color_depth,
                                        NULL,
-                                       dpg_width,
-                                       height,
-                                       0);
+                                       last_odm_slice_width,
+                                       v_active,
+                                       offset);
                }
        }
        break;
index 586fe25c170295af0aa43742d32823088c9eaa95..dc1cb5478e08f4c2519bd7de85e7e6d79c6729d0 100644 (file)
@@ -108,6 +108,11 @@ static void enable_hpo_dp_link_output(struct dc_link *link,
                enum clock_source_id clock_source,
                const struct dc_link_settings *link_settings)
 {
+       if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+               link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+                               link->dc->res_pool->dccg,
+                               link_res->hpo_dp_link_enc->inst,
+                               true);
        link_res->hpo_dp_link_enc->funcs->enable_link_phy(
                        link_res->hpo_dp_link_enc,
                        link_settings,
@@ -122,6 +127,11 @@ static void disable_hpo_dp_link_output(struct dc_link *link,
                link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
                link_res->hpo_dp_link_enc->funcs->disable_link_phy(
                                link_res->hpo_dp_link_enc, signal);
+               if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+                       link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+                                       link->dc->res_pool->dccg,
+                                       link_res->hpo_dp_link_enc->inst,
+                                       false);
 }
 
 static void set_hpo_dp_link_test_pattern(struct dc_link *link,
index 8041b8369e45f1d11d7b9929a71afcb066fc8f69..c9b6676eaf53b959d55e3030136736a0c39d61a3 100644 (file)
@@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
                        (link->dpcd_sink_ext_caps.bits.oled == 1)) {
                        dpcd_set_source_specific_data(link);
                        msleep(post_oui_delay);
-                       set_default_brightness_aux(link);
-                       //TODO: use cached
+                       set_cached_brightness_aux(link);
                }
 
                return true;
index 1a7b93e41e352adb0e67a11d01a36e7d76eaffbc..7997936613fc35a4416e55520a267e2b16db0d36 100644 (file)
@@ -1079,8 +1079,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
 static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
 {
        uint64_t kbps;
+       enum dc_link_encoding_format link_encoding;
 
-       kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+       if (dp_is_128b_132b_signal(pipe_ctx))
+               link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+       else
+               link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+
+       kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding);
        return get_pbn_from_bw_in_kbps(kbps);
 }
 
@@ -1538,7 +1544,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
                        dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
        struct fixed31_32 timing_bw =
                        dc_fixpt_from_int(
-                                       dc_bandwidth_in_kbps_from_timing(&stream->timing));
+                                       dc_bandwidth_in_kbps_from_timing(&stream->timing,
+                                                       dc_link_get_highest_encoding_format(link)));
        struct fixed31_32 avg_time_slots_per_mtp =
                        dc_fixpt_div(timing_bw, timeslot_bw_effective);
 
@@ -1971,6 +1978,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
        bool is_vga_mode = (stream->timing.h_addressable == 640)
                        && (stream->timing.v_addressable == 480);
        struct dc *dc = pipe_ctx->stream->ctx->dc;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
 
        if (stream->phy_pix_clk == 0)
                stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2010,6 +2018,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
        if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
                display_color_depth = COLOR_DEPTH_888;
 
+       /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS
+        * character clock in case that beyond 340MHz.
+        */
+       if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+               link_hwss->setup_stream_encoder(pipe_ctx);
+
        dc->hwss.enable_tmds_link_output(
                        link,
                        &pipe_ctx->link_res,
@@ -2129,7 +2143,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
        if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
                link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
                link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
-               set_default_brightness_aux(link); // TODO: use cached if known
+               set_cached_brightness_aux(link);
+
                if (link->dpcd_sink_ext_caps.bits.oled == 1)
                        msleep(bl_oled_enable_delay);
                edp_backlight_enable_aux(link, true);
index e8b2fc4002a52d07dc91e7cee0c3250acd23dc51..b45fda96eaf649bf16f291df2294d787680e0287 100644 (file)
@@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing(
                                /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
                                outputTiming.flags.DSC = 0;
 #endif
-                       if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+                       if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
+                                       dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
                                return false;
                } else { // DP to HDMI TMDS converter
                        if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
@@ -285,7 +286,7 @@ static bool dp_validate_mode_timing(
                link_setting = &link->verified_link_cap;
        */
 
-       req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+       req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
        max_bw = dp_link_bandwidth_kbps(link, link_setting);
 
        if (req_bw <= max_bw) {
@@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
        for (uint8_t i = 0; i < num_streams; ++i) {
 
                link[i] = stream[i].link;
-               bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
+               bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+                               dc_link_get_highest_encoding_format(link[i]));
        }
 
        ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
index 0fa1228bc178a0b1d4c66e555c08ecc5f09b5d25..0f19c07011b560c775528acfa58583e60706386a 100644 (file)
@@ -427,7 +427,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
 
        if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
                        !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
-                       ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+                       ddc->ctx->dce_version == DCN_VERSION_3_1) {
                /* Fixed VS workaround for AUX timeout */
                const uint32_t fixed_vs_address = 0xF004F;
                const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
index 3a5e80b5771169a79c94c076f1a945634e156b57..b38ac3ea06b0f094b8da4ae2e2c400db30c38fb5 100644 (file)
@@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
        struct dc_link_settings *link_setting)
 {
        struct dc_link *link = stream->link;
-       uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+       uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link));
 
        memset(link_setting, 0, sizeof(*link_setting));
 
@@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
 
                                tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
                                tmp_timing.flags.DSC = 0;
-                               orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+                               orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing,
+                                               dc_link_get_highest_encoding_format(link));
                                edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
                                max_link_rate = tmp_link_setting.link_rate;
                        }
@@ -2165,7 +2166,9 @@ static bool dp_verify_link_cap(
                                                        link,
                                                        &irq_data))
                                (*fail_count)++;
-
+               } else if (status == LINK_TRAINING_LINK_LOSS) {
+                       success = true;
+                       (*fail_count)++;
                } else {
                        (*fail_count)++;
                }
@@ -2188,6 +2191,7 @@ bool dp_verify_link_cap_with_retries(
        int i = 0;
        bool success = false;
        int fail_count = 0;
+       struct dc_link_settings last_verified_link_cap = fail_safe_link_settings;
 
        dp_trace_detect_lt_init(link);
 
@@ -2204,10 +2208,14 @@ bool dp_verify_link_cap_with_retries(
                if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
                        link->verified_link_cap = fail_safe_link_settings;
                        break;
-               } else if (dp_verify_link_cap(link, known_limit_link_setting,
-                               &fail_count) && fail_count == 0) {
-                       success = true;
-                       break;
+               } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
+                       last_verified_link_cap = link->verified_link_cap;
+                       if (fail_count == 0) {
+                               success = true;
+                               break;
+                       }
+               } else {
+                       link->verified_link_cap = last_verified_link_cap;
                }
                fsleep(10 * 1000);
        }
index e011df4bdaf216a872a6c6fee711faaed645e523..90339c2dfd84871bfc15a4169b20c01b7687aa97 100644 (file)
@@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries(
                } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
                        uint32_t req_bw;
                        uint32_t link_bw;
+                       enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED;
 
                        decide_fallback_link_setting(link, &max_link_settings,
                                        &cur_link_settings, status);
+
+                       if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+                               link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+                       else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING)
+                               link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+
                        /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
                         * minimum link bandwidth.
                         */
-                       req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+                       req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding);
                        link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
                        is_link_bw_low = (req_bw > link_bw);
                        is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
index 15faaf645b145dfbac9c2ee8c690cd1b46ba1106..ca0543e62917c19d3467916b3b6ff5debe1b82c3 100644 (file)
@@ -236,6 +236,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
        uint32_t pre_disable_intercept_delay_ms = 0;
        uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
        uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+       const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+       const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+       const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+       const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+       const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
        uint32_t vendor_lttpr_write_address = 0xF004F;
        enum link_training_result status = LINK_TRAINING_SUCCESS;
        uint8_t lane = 0;
@@ -244,10 +249,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
        uint8_t toggle_rate;
        uint8_t rate;
 
-       if (link->local_sink)
-               pre_disable_intercept_delay_ms =
-                               link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
        /* Only 8b/10b is supported */
        ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
                        DP_8b_10b_ENCODING);
@@ -260,10 +261,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
        if (offset != 0xFF) {
                vendor_lttpr_write_address +=
                                ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+               if (offset == 2) {
+                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
 
                /* Certain display and cable configuration require extra delay */
-               if (offset > 2)
-                       pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+               } else if (offset > 2) {
+                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+               }
        }
 
        /* Vendor specific: Reset lane settings */
@@ -339,6 +343,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
                DP_DOWNSPREAD_CTRL,
                lt_settings->link_settings.link_spread);
 
+       if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_1[0],
+                               sizeof(vendor_lttpr_write_data_4lane_1));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_2[0],
+                               sizeof(vendor_lttpr_write_data_4lane_2));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_3[0],
+                               sizeof(vendor_lttpr_write_data_4lane_3));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_4[0],
+                               sizeof(vendor_lttpr_write_data_4lane_4));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_5[0],
+                               sizeof(vendor_lttpr_write_data_4lane_5));
+       }
+
        /* 2. Perform link training */
 
        /* Perform Clock Recovery Sequence */
@@ -596,9 +628,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
        const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
        const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
        const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
+       uint32_t pre_disable_intercept_delay_ms = 0;
        uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
        uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
-       uint32_t pre_disable_intercept_delay_ms = 0;
+       const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+       const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+       const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+       const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+       const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
        uint32_t vendor_lttpr_write_address = 0xF004F;
        enum link_training_result status = LINK_TRAINING_SUCCESS;
        uint8_t lane = 0;
@@ -607,10 +644,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
        uint8_t toggle_rate;
        uint8_t rate;
 
-       if (link->local_sink)
-               pre_disable_intercept_delay_ms =
-                               link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
        /* Only 8b/10b is supported */
        ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
                        DP_8b_10b_ENCODING);
@@ -623,10 +656,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
        if (offset != 0xFF) {
                vendor_lttpr_write_address +=
                                ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+               if (offset == 2) {
+                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
 
                /* Certain display and cable configuration require extra delay */
-               if (offset > 2)
-                       pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+               } else if (offset > 2) {
+                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+               }
        }
 
        /* Vendor specific: Reset lane settings */
@@ -702,6 +738,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
                DP_DOWNSPREAD_CTRL,
                lt_settings->link_settings.link_spread);
 
+       if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_1[0],
+                               sizeof(vendor_lttpr_write_data_4lane_1));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_2[0],
+                               sizeof(vendor_lttpr_write_data_4lane_2));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_3[0],
+                               sizeof(vendor_lttpr_write_data_4lane_3));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_4[0],
+                               sizeof(vendor_lttpr_write_data_4lane_4));
+               core_link_write_dpcd(
+                               link,
+                               vendor_lttpr_write_address,
+                               &vendor_lttpr_write_data_4lane_5[0],
+                               sizeof(vendor_lttpr_write_data_4lane_5));
+       }
+
        /* 2. Perform link training */
 
        /* Perform Clock Recovery Sequence */
index 2039a345f23a174f4689008365bd573c03ee3b40..8b360c09e0e87861a77c91c44ffd8f37f0a5e1e5 100644 (file)
@@ -46,43 +46,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
 {
        union dpcd_edp_config edp_config_set;
        bool panel_mode_edp = false;
+       enum dc_status result;
 
        memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
 
-       if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+       switch (panel_mode) {
+       case DP_PANEL_MODE_EDP:
+       case DP_PANEL_MODE_SPECIAL:
+               panel_mode_edp = true;
+               break;
 
-               switch (panel_mode) {
-               case DP_PANEL_MODE_EDP:
-               case DP_PANEL_MODE_SPECIAL:
-                       panel_mode_edp = true;
-                       break;
+       default:
+               break;
+       }
 
-               default:
-                               break;
-               }
+       /*set edp panel mode in receiver*/
+       result = core_link_read_dpcd(
+               link,
+               DP_EDP_CONFIGURATION_SET,
+               &edp_config_set.raw,
+               sizeof(edp_config_set.raw));
 
-               /*set edp panel mode in receiver*/
-               core_link_read_dpcd(
+       if (result == DC_OK &&
+               edp_config_set.bits.PANEL_MODE_EDP
+               != panel_mode_edp) {
+
+               edp_config_set.bits.PANEL_MODE_EDP =
+               panel_mode_edp;
+               result = core_link_write_dpcd(
                        link,
                        DP_EDP_CONFIGURATION_SET,
                        &edp_config_set.raw,
                        sizeof(edp_config_set.raw));
 
-               if (edp_config_set.bits.PANEL_MODE_EDP
-                       != panel_mode_edp) {
-                       enum dc_status result;
-
-                       edp_config_set.bits.PANEL_MODE_EDP =
-                       panel_mode_edp;
-                       result = core_link_write_dpcd(
-                               link,
-                               DP_EDP_CONFIGURATION_SET,
-                               &edp_config_set.raw,
-                               sizeof(edp_config_set.raw));
-
-                       ASSERT(result == DC_OK);
-               }
+               ASSERT(result == DC_OK);
        }
+
        link->panel_mode = panel_mode;
        DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
                 "eDP panel mode enabled: %d \n",
@@ -164,6 +163,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
        *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
        *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
 
+       link->backlight_settings.backlight_millinits = backlight_millinits;
 
        if (!link->dpcd_caps.panel_luminance_control) {
                if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
@@ -251,10 +251,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin
                link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
                return false;
 
-       if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
-               (uint8_t *) backlight_millinits,
-               sizeof(uint32_t)))
-               return false;
+       if (!link->dpcd_caps.panel_luminance_control) {
+               if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+                       (uint8_t *)backlight_millinits,
+                       sizeof(uint32_t)))
+                       return false;
+       } else {
+               //setting to 0 as a precaution, since target_luminance_value is 3 bytes
+               memset(backlight_millinits, 0, sizeof(uint32_t));
+
+               if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+                       (uint8_t *)backlight_millinits,
+                       sizeof(struct target_luminance_value)))
+                       return false;
+       }
 
        return true;
 }
@@ -276,6 +286,16 @@ bool set_default_brightness_aux(struct dc_link *link)
        return false;
 }
 
+bool set_cached_brightness_aux(struct dc_link *link)
+{
+       if (link->backlight_settings.backlight_millinits)
+               return edp_set_backlight_level_nits(link, true,
+                                                   link->backlight_settings.backlight_millinits, 0);
+       else
+               return set_default_brightness_aux(link);
+       return false;
+}
+
 bool edp_is_ilr_optimization_required(struct dc_link *link,
                struct dc_crtc_timing *crtc_timing)
 {
@@ -309,7 +329,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
        core_link_read_dpcd(link, DP_LANE_COUNT_SET,
                                &lane_count_set.raw, sizeof(lane_count_set));
 
-       req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+       req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link));
 
        if (!crtc_timing->flags.DSC)
                edp_decide_link_settings(link, &link_setting, req_bw);
index 28f552080558cf9c1410b47e16a35144554f4990..fa89bdb3a336197c32865ac8777885e842165c82 100644 (file)
@@ -30,6 +30,7 @@
 enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
 void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
 bool set_default_brightness_aux(struct dc_link *link);
+bool set_cached_brightness_aux(struct dc_link *link);
 void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
 int edp_get_backlight_level(const struct dc_link *link);
 bool edp_get_backlight_level_nits(struct dc_link *link,
index 4585e0419da615d2791401d9f8cd973d4b86426f..2d995c87fbb986aef9e0366773452b9a07d92f0e 100644 (file)
@@ -378,6 +378,7 @@ struct dmub_srv_hw_funcs {
 
        union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub);
 
+       union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub);
 
        void (*set_gpint)(struct dmub_srv *dmub,
                          union dmub_gpint_data_register reg);
@@ -778,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb);
 enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
                                             union dmub_fw_boot_status *status);
 
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+                                            union dmub_fw_boot_options *option);
+
 enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
                                              union dmub_rb_cmd *cmd);
 
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+                                            bool skip);
+
 bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
 
 bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
index af1f50742371f83fe02ad22a787e6cb5ecfabf05..adde1d84d773265ca47dcd9f14dd779838a41552 100644 (file)
@@ -170,6 +170,95 @@ extern "C" {
 #endif
 
 #pragma pack(push, 1)
+#define ABM_NUM_OF_ACE_SEGMENTS         5
+
+union abm_flags {
+       struct {
+               /**
+                * @abm_enabled: Indicates if ABM is enabled.
+                */
+               unsigned int abm_enabled : 1;
+
+               /**
+                * @disable_abm_requested: Indicates if driver has requested ABM to be disabled.
+                */
+               unsigned int disable_abm_requested : 1;
+
+               /**
+                * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled
+                * immediately.
+                */
+               unsigned int disable_abm_immediately : 1;
+
+               /**
+                * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM
+                * to be disabled immediately and keep gain.
+                */
+               unsigned int disable_abm_immediate_keep_gain : 1;
+
+               /**
+                * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled.
+                */
+               unsigned int fractional_pwm : 1;
+
+               /**
+                * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment
+                * of user backlight level.
+                */
+               unsigned int abm_gradual_bl_change : 1;
+       } bitfields;
+
+       unsigned int u32All;
+};
+
+struct abm_save_restore {
+       /**
+        * @flags: Misc. ABM flags.
+        */
+       union abm_flags flags;
+
+       /**
+        * @pause: true:  pause ABM and get state
+        *         false: unpause ABM after setting state
+        */
+       uint32_t pause;
+
+       /**
+        * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13)
+        */
+       uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS];
+
+       /**
+        * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6)
+        */
+       uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS];
+
+       /**
+        * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6)
+        */
+       uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS];
+
+
+       /**
+        * @knee_threshold: Current x-position of ACE knee (u0.16).
+        */
+       uint32_t knee_threshold;
+       /**
+        * @current_gain: Current backlight reduction (u16.16).
+        */
+       uint32_t current_gain;
+       /**
+        * @curr_bl_level: Current actual backlight level converging to target backlight level.
+        */
+       uint16_t curr_bl_level;
+
+       /**
+        * @curr_user_bl_level: Current nominal backlight level converging to level requested by user.
+        */
+       uint16_t curr_user_bl_level;
+
+};
+
 /**
  * union dmub_addr - DMUB physical/virtual 64-bit address.
  */
@@ -2672,6 +2761,12 @@ enum dmub_cmd_abm_type {
         * unregister vertical interrupt after steady state is reached
         */
        DMUB_CMD__ABM_PAUSE     = 6,
+
+       /**
+        * Save and Restore ABM state. On save we save parameters, and
+        * on restore we update state with passed in data.
+        */
+       DMUB_CMD__ABM_SAVE_RESTORE      = 7,
 };
 
 /**
@@ -3056,6 +3151,7 @@ struct dmub_cmd_abm_pause_data {
        uint8_t pad[1];
 };
 
+
 /**
  * Definition of a DMUB_CMD__ABM_PAUSE command.
  */
@@ -3071,6 +3167,36 @@ struct dmub_rb_cmd_abm_pause {
        struct dmub_cmd_abm_pause_data abm_pause_data;
 };
 
+/**
+ * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+ */
+struct dmub_rb_cmd_abm_save_restore {
+       /**
+        * Command header.
+        */
+       struct dmub_cmd_header header;
+
+       /**
+        * OTG hw instance
+        */
+       uint8_t otg_inst;
+
+       /**
+        * Enable or disable ABM pause
+        */
+       uint8_t freeze;
+
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t debug;
+
+       /**
+        * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command.
+        */
+       struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
 /**
  * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command.
  */
@@ -3508,6 +3634,11 @@ union dmub_rb_cmd {
         */
        struct dmub_rb_cmd_abm_pause abm_pause;
 
+       /**
+        * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+        */
+       struct dmub_rb_cmd_abm_save_restore abm_save_restore;
+
        /**
         * Definition of a DMUB_CMD__DP_AUX_ACCESS command.
         */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h
deleted file mode 100644 (file)
index 21b02ba..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DMUB_SUBVP_STATE_H
-#define DMUB_SUBVP_STATE_H
-
-#include "dmub_cmd.h"
-
-#define DMUB_SUBVP_INST0 0
-#define DMUB_SUBVP_INST1 1
-#define SUBVP_MAX_WATERMARK 0xFFFF
-
-struct dmub_subvp_hubp_state {
-       uint32_t CURSOR0_0_CURSOR_POSITION;
-       uint32_t CURSOR0_0_CURSOR_HOT_SPOT;
-       uint32_t CURSOR0_0_CURSOR_DST_OFFSET;
-       uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
-       uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
-       uint32_t CURSOR0_0_CURSOR_SIZE;
-       uint32_t CURSOR0_0_CURSOR_CONTROL;
-       uint32_t HUBPREQ0_CURSOR_SETTINGS;
-       uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
-       uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
-       uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
-};
-
-enum subvp_error_code {
-       DMUB_SUBVP_INVALID_STATE,
-       DMUB_SUBVP_INVALID_TRANSITION,
-};
-
-enum subvp_state {
-       DMUB_SUBVP_DISABLED,
-       DMUB_SUBVP_IDLE,
-       DMUB_SUBVP_TRY_ACQUIRE_LOCKS,
-       DMUB_SUBVP_WAIT_FOR_LOCKS,
-       DMUB_SUBVP_PRECONFIGURE,
-       DMUB_SUBVP_PREPARE,
-       DMUB_SUBVP_ENABLE,
-       DMUB_SUBVP_SWITCHING,
-       DMUB_SUBVP_END,
-       DMUB_SUBVP_RESTORE,
-};
-
-/* Defines information for SUBVP to handle vertical interrupts. */
-struct dmub_subvp_vertical_interrupt_event {
-       /**
-        * @inst: Hardware instance of vertical interrupt.
-        */
-       uint8_t otg_inst;
-
-       /**
-        * @pad: Align structure to 4 byte boundary.
-        */
-       uint8_t pad[3];
-
-       enum subvp_state curr_state;
-};
-
-struct dmub_subvp_vertical_interrupt_state {
-       /**
-        * @events: Event list.
-        */
-       struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS];
-};
-
-struct dmub_subvp_vline_interrupt_event {
-
-       uint8_t hubp_inst;
-       uint8_t pad[3];
-};
-
-struct dmub_subvp_vline_interrupt_state {
-       struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES];
-};
-
-struct dmub_subvp_interrupt_ctx {
-       struct dmub_subvp_vertical_interrupt_state vertical_int;
-       struct dmub_subvp_vline_interrupt_state vline_int;
-};
-
-struct dmub_subvp_pipe_state {
-       uint32_t pix_clk_100hz;
-       uint16_t main_vblank_start;
-       uint16_t main_vblank_end;
-       uint16_t mall_region_lines;
-       uint16_t prefetch_lines;
-       uint16_t prefetch_to_mall_start_lines;
-       uint16_t processing_delay_lines;
-       uint8_t main_pipe_index;
-       uint8_t phantom_pipe_index;
-       uint16_t htotal; // htotal for main / phantom pipe
-       uint16_t vtotal;
-       uint16_t optc_underflow_count;
-       uint16_t hubp_underflow_count;
-       uint8_t pad[2];
-};
-
-/**
- * struct dmub_subvp_vblank_drr_info - Store DRR state when handling
- * SubVP + VBLANK with DRR multi-display case.
- *
- * The info stored in this struct is only valid if drr_in_use = 1.
- */
-struct dmub_subvp_vblank_drr_info {
-       uint8_t drr_in_use;
-       uint8_t drr_window_size_ms;     // DRR window size -- indicates largest VMIN/VMAX adjustment per frame
-       uint16_t min_vtotal_supported;  // Min VTOTAL that supports switching in VBLANK
-       uint16_t max_vtotal_supported;  // Max VTOTAL that can still support SubVP static scheduling requirements
-       uint16_t prev_vmin;             // Store VMIN value before MCLK switch (used to restore after MCLK end)
-       uint16_t prev_vmax;             // Store VMAX value before MCLK switch (used to restore after MCLK end)
-       uint8_t use_ramping;            // Use ramping or not
-       uint8_t pad[1];
-};
-
-struct dmub_subvp_vblank_pipe_info {
-       uint32_t pix_clk_100hz;
-       uint16_t vblank_start;
-       uint16_t vblank_end;
-       uint16_t vstartup_start;
-       uint16_t vtotal;
-       uint16_t htotal;
-       uint8_t pipe_index;
-       uint8_t pad[1];
-       struct dmub_subvp_vblank_drr_info drr_info;     // DRR considered as part of SubVP + VBLANK case
-};
-
-enum subvp_switch_type {
-       DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE
-       DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays
-       DMUB_SUBVP_AND_VBLANK,
-       DMUB_SUBVP_AND_FPO,
-};
-
-/* SubVP state. */
-struct dmub_subvp_state {
-       struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS];
-       struct dmub_subvp_interrupt_ctx int_ctx;
-       struct dmub_subvp_vblank_pipe_info vblank_info;
-       enum subvp_state state; // current state
-       enum subvp_switch_type switch_type; // enum take up 4 bytes (?)
-       uint8_t mclk_pending;
-       uint8_t num_subvp_streams;
-       uint8_t vertical_int_margin_us;
-       uint8_t pstate_allow_width_us;
-       uint32_t subvp_mclk_switch_count;
-       uint32_t subvp_wait_lock_count;
-       uint32_t driver_wait_lock_count;
-       uint32_t subvp_vblank_frame_count;
-       uint16_t watermark_a_cache;
-       uint8_t pad[2];
-};
-
-#endif /* _DMUB_SUBVP_STATE_H_ */
index 5e952541e72d5160edb6bd3eeae7356aebf8618e..094e9f8645571bbead4da7a9874071acea51e9da 100644 (file)
@@ -352,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub)
        return status;
 }
 
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub)
+{
+       union dmub_fw_boot_options option;
+
+       option.all = REG_READ(DMCUB_SCRATCH14);
+       return option;
+}
+
 void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
 {
        union dmub_fw_boot_options boot_options = {0};
index 89c5a948b67d5c1228c4d73c7cd029902e5575c6..4d520a893c7b3ca7153f538c50b967918d736741 100644 (file)
@@ -239,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
 
 union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub);
 
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub);
+
 void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub,
                              const struct dmub_region *outbox0);
 
index bdaf43892f47bbcf83d4a9d3b20a9756697672b1..93624ffe4eb82447939c9e813944cb14697f294b 100644 (file)
@@ -255,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
                funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
                funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;
                funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
+               funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option;
                funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
                funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
                //outbox0 call stacks
@@ -639,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
        if (dmub->hw_funcs.enable_dmub_boot_options)
                dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
 
-       if (dmub->hw_funcs.skip_dmub_panel_power_sequence)
+       if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
                dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub,
                        params->skip_panel_power_sequence);
 
-       if (dmub->hw_funcs.reset_release)
+       if (dmub->hw_funcs.reset_release && !dmub->is_virtual)
                dmub->hw_funcs.reset_release(dmub);
 
        dmub->hw_init = true;
@@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
        return DMUB_STATUS_OK;
 }
 
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+                                            union dmub_fw_boot_options *option)
+{
+       option->all = 0;
+
+       if (!dmub->sw_init)
+               return DMUB_STATUS_INVALID;
+
+       if (dmub->hw_funcs.get_fw_boot_option)
+               *option = dmub->hw_funcs.get_fw_boot_option(dmub);
+
+       return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+                                            bool skip)
+{
+       if (!dmub->sw_init)
+               return DMUB_STATUS_INVALID;
+
+       if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
+               dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip);
+
+       return DMUB_STATUS_OK;
+}
+
 enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
                                              union dmub_rb_cmd *cmd)
 {
index cd870af5fd2502586718fde9458d1b719d9aa791..1b8ab20f17152d3a024aa2959fbc32a97dbe279f 100644 (file)
@@ -53,7 +53,7 @@ enum {
        BITS_PER_DP_BYTE = 10,
        DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
        DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */
-       DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
+       DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */
 };
 
 enum lttpr_mode {
index f175e65b853a001ea1bcc66d8edfd5b726cbc952..abe829bbd54af668e113a4b2724e4ed72ae57511 100644 (file)
@@ -250,6 +250,7 @@ enum DC_DEBUG_MASK {
        DC_DISABLE_PSR = 0x10,
        DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
        DC_DISABLE_MPO = 0x40,
+       DC_ENABLE_DPIA_TRACE = 0x80,
 };
 
 enum amd_dpm_forced_level;
index d0df3381539f0a6452189c74697ede5bfd1d55b5..8433f99f66679dbcee25bb7567d993fcb8673294 100644 (file)
@@ -315,16 +315,19 @@ struct kfd2kgd_calls {
                                        uint32_t watch_address_mask,
                                        uint32_t watch_id,
                                        uint32_t watch_mode,
-                                       uint32_t debug_vmid);
+                                       uint32_t debug_vmid,
+                                       uint32_t inst);
        uint32_t (*clear_address_watch)(struct amdgpu_device *adev,
                        uint32_t watch_id);
        void (*get_iq_wait_times)(struct amdgpu_device *adev,
-                       uint32_t *wait_times);
+                       uint32_t *wait_times,
+                       uint32_t inst);
        void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
                        uint32_t wait_times,
                        uint32_t grace_period,
                        uint32_t *reg_offset,
-                       uint32_t *reg_data);
+                       uint32_t *reg_data,
+                       uint32_t inst);
        void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
                        int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
        void (*program_trap_handler_settings)(struct amdgpu_device *adev,
index 9f542f6e19ed06730f36c1586f9bee597c63d90d..90989405eddcf6ef3eea0fde4ee37e6cd9c29c64 100644 (file)
@@ -892,4 +892,73 @@ struct gpu_metrics_v2_3 {
        uint16_t                        average_temperature_core[8]; // average CPU core temperature on APUs
        uint16_t                        average_temperature_l3[2];
 };
+
+struct gpu_metrics_v2_4 {
+       struct metrics_table_header     common_header;
+
+       /* Temperature (unit: centi-Celsius) */
+       uint16_t                        temperature_gfx;
+       uint16_t                        temperature_soc;
+       uint16_t                        temperature_core[8];
+       uint16_t                        temperature_l3[2];
+
+       /* Utilization (unit: centi) */
+       uint16_t                        average_gfx_activity;
+       uint16_t                        average_mm_activity;
+
+       /* Driver attached timestamp (in ns) */
+       uint64_t                        system_clock_counter;
+
+       /* Power/Energy (unit: mW) */
+       uint16_t                        average_socket_power;
+       uint16_t                        average_cpu_power;
+       uint16_t                        average_soc_power;
+       uint16_t                        average_gfx_power;
+       uint16_t                        average_core_power[8];
+
+       /* Average clocks (unit: MHz) */
+       uint16_t                        average_gfxclk_frequency;
+       uint16_t                        average_socclk_frequency;
+       uint16_t                        average_uclk_frequency;
+       uint16_t                        average_fclk_frequency;
+       uint16_t                        average_vclk_frequency;
+       uint16_t                        average_dclk_frequency;
+
+       /* Current clocks (unit: MHz) */
+       uint16_t                        current_gfxclk;
+       uint16_t                        current_socclk;
+       uint16_t                        current_uclk;
+       uint16_t                        current_fclk;
+       uint16_t                        current_vclk;
+       uint16_t                        current_dclk;
+       uint16_t                        current_coreclk[8];
+       uint16_t                        current_l3clk[2];
+
+       /* Throttle status (ASIC dependent) */
+       uint32_t                        throttle_status;
+
+       /* Fans */
+       uint16_t                        fan_pwm;
+
+       uint16_t                        padding[3];
+
+       /* Throttle status (ASIC independent) */
+       uint64_t                        indep_throttle_status;
+
+       /* Average Temperature (unit: centi-Celsius) */
+       uint16_t                        average_temperature_gfx;
+       uint16_t                        average_temperature_soc;
+       uint16_t                        average_temperature_core[8];
+       uint16_t                        average_temperature_l3[2];
+
+       /* Power/Voltage (unit: mV) */
+       uint16_t                        average_cpu_voltage;
+       uint16_t                        average_soc_voltage;
+       uint16_t                        average_gfx_voltage;
+
+       /* Power/Current (unit: mA) */
+       uint16_t                        average_cpu_current;
+       uint16_t                        average_soc_current;
+       uint16_t                        average_gfx_current;
+};
 #endif
index 0997e999416a48ec905379083fb4b2b0ba7b7f76..b1db2b19018742eb4a41bc23084c3a882323c05a 100644 (file)
@@ -275,7 +275,9 @@ union MESAPI__ADD_QUEUE {
                        uint32_t trap_en                : 1;
                        uint32_t is_aql_queue           : 1;
                        uint32_t skip_process_ctx_clear : 1;
-                       uint32_t reserved               : 19;
+                       uint32_t map_legacy_kq          : 1;
+                       uint32_t exclusively_scheduled  : 1;
+                       uint32_t reserved               : 17;
                };
                struct MES_API_STATUS           api_status;
                uint64_t                        tma_addr;
index 0fea6a746611b728c993a3cd9d46863d31b20b7e..a2c8dca2425e50808da6f0b3e4bc9a0277cdb8d4 100644 (file)
@@ -7,13 +7,11 @@
 #define MAX_SEGMENT                                         6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 } __maybe_unused;
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index 9ef88a0b1b57e46112c1b9cecfaf4794811ce196..3922dd274f30717c4568b4ef35219bae042899f4 100644 (file)
@@ -2049,8 +2049,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
                if (gc_ver < IP_VERSION(9, 0, 0) ||
-                   gc_ver == IP_VERSION(9, 4, 1) ||
-                   gc_ver == IP_VERSION(9, 4, 2))
+                   !amdgpu_device_has_display_hardware(adev))
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
                if (mp1_ver < IP_VERSION(10, 0, 0))
index 52045ad59beda8838e76d5eb3110167a72ba41c4..eec816f0cbf925debbcf79f3f6a1afe2e4a56497 100644 (file)
@@ -24,8 +24,7 @@
 #ifndef __AMDGPU_PM_H__
 #define __AMDGPU_PM_H__
 
-struct cg_flag_name
-{
+struct cg_flag_name {
        u64 flag;
        const char *name;
 };
index 1dc7a065a6d484b37cf285c9b44d5d82b8dc83aa..251ed011b3b0745811bdf43097bab97e72912c0d 100644 (file)
@@ -41,8 +41,7 @@
 #define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x83        // OverDrive 8 Table Version 0.2
 #define SMU_13_0_0_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
 
-enum SMU_13_0_0_ODFEATURE_CAP
-{
+enum SMU_13_0_0_ODFEATURE_CAP {
     SMU_13_0_0_ODCAP_GFXCLK_LIMITS = 0,
     SMU_13_0_0_ODCAP_UCLK_LIMITS,
     SMU_13_0_0_ODCAP_POWER_LIMIT,
@@ -62,8 +61,7 @@ enum SMU_13_0_0_ODFEATURE_CAP
     SMU_13_0_0_ODCAP_COUNT,
 };
 
-enum SMU_13_0_0_ODFEATURE_ID
-{
+enum SMU_13_0_0_ODFEATURE_ID {
     SMU_13_0_0_ODFEATURE_GFXCLK_LIMITS           = 1 << SMU_13_0_0_ODCAP_GFXCLK_LIMITS,           //GFXCLK Limit feature
     SMU_13_0_0_ODFEATURE_UCLK_LIMITS             = 1 << SMU_13_0_0_ODCAP_UCLK_LIMITS,             //UCLK Limit feature
     SMU_13_0_0_ODFEATURE_POWER_LIMIT             = 1 << SMU_13_0_0_ODCAP_POWER_LIMIT,             //Power Limit feature
@@ -85,8 +83,7 @@ enum SMU_13_0_0_ODFEATURE_ID
 
 #define SMU_13_0_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
 
-enum SMU_13_0_0_ODSETTING_ID
-{
+enum SMU_13_0_0_ODSETTING_ID {
     SMU_13_0_0_ODSETTING_GFXCLKFMAX = 0,
     SMU_13_0_0_ODSETTING_GFXCLKFMIN,
     SMU_13_0_0_ODSETTING_UCLKFMIN,
@@ -123,8 +120,7 @@ enum SMU_13_0_0_ODSETTING_ID
 };
 #define SMU_13_0_0_MAX_ODSETTING 64 //Maximum Number of ODSettings
 
-enum SMU_13_0_0_PWRMODE_SETTING
-{
+enum SMU_13_0_0_PWRMODE_SETTING {
     SMU_13_0_0_PMSETTING_POWER_LIMIT_QUIET = 0,
     SMU_13_0_0_PMSETTING_POWER_LIMIT_BALANCE,
     SMU_13_0_0_PMSETTING_POWER_LIMIT_TURBO,
@@ -144,8 +140,7 @@ enum SMU_13_0_0_PWRMODE_SETTING
 };
 #define SMU_13_0_0_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings
 
-struct smu_13_0_0_overdrive_table
-{
+struct smu_13_0_0_overdrive_table {
     uint8_t revision;                             //Revision = SMU_13_0_0_PP_OVERDRIVE_VERSION
     uint8_t reserve[3];                           //Zero filled field reserved for future use
     uint32_t feature_count;                       //Total number of supported features
@@ -156,8 +151,7 @@ struct smu_13_0_0_overdrive_table
     int16_t pm_setting[SMU_13_0_0_MAX_PMSETTING]; //Optimized power mode feature settings
 };
 
-enum SMU_13_0_0_PPCLOCK_ID
-{
+enum SMU_13_0_0_PPCLOCK_ID {
     SMU_13_0_0_PPCLOCK_GFXCLK = 0,
     SMU_13_0_0_PPCLOCK_SOCCLK,
     SMU_13_0_0_PPCLOCK_UCLK,
@@ -175,8 +169,7 @@ enum SMU_13_0_0_PPCLOCK_ID
 };
 #define SMU_13_0_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
 
-struct smu_13_0_0_powerplay_table
-{
+struct smu_13_0_0_powerplay_table {
     struct atom_common_table_header header; //For SMU13, header.format_revision = 15, header.content_revision = 0
     uint8_t table_revision;                 //For SMU13, table_revision = 2
     uint8_t padding;
index 6841a4bce186f1c3102b88b67edeb949bdf5ea83..1cb40226449779d004d9cf2fdfae8e7f7f1364c0 100644 (file)
@@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-static bool intel_core_rkl_chk(void)
-{
-#if IS_ENABLED(CONFIG_X86_64)
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
-#else
-       return false;
-#endif
-}
-
 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
        data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
        data->pcie_dpm_key_disabled =
-               intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+               !amdgpu_device_pcie_dynamic_switching_supported() ||
+               !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
        /* need to set voltage control types before EVV patching */
        data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
        data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
index beab6d7b28b7092175183f319db4399eee7c3743..630132c4a76b1a9e85225b2d53049b0378872c91 100644 (file)
@@ -52,8 +52,7 @@ static unsigned int DbiPrbs7[] =
 
 
 //4096 bytes, 256 byte aligned
-static unsigned int NoDbiPrbs7[] =
-{
+static unsigned int NoDbiPrbs7[] = {
     0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
     0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
     0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
@@ -121,8 +120,7 @@ static unsigned int NoDbiPrbs7[] =
 };
 
 // 4096 bytes, 256 byte aligned
-static unsigned int DbiPrbs7[] =
-{
+static unsigned int DbiPrbs7[] = {
     0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
     0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
     0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
index 6a0ac0bbaace995764351cd19add1b25367cfa7d..355c156d871aff86aff1c64d8bc1278c14ea9cbb 100644 (file)
@@ -295,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
                                        uint32_t *size,
                                        uint32_t pptable_id);
 
+int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+                                    uint32_t pcie_gen_cap,
+                                    uint32_t pcie_width_cap);
+
 #endif
 #endif
index eadbe0149cae558be12588a8528d6f5a351a7673..eb694f9f556d89975642654d81a6fe3692c31e93 100644 (file)
@@ -41,8 +41,7 @@
 #define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x83        // OverDrive 8 Table Version 0.2
 #define SMU_13_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
 
-enum SMU_13_0_7_ODFEATURE_CAP
-{
+enum SMU_13_0_7_ODFEATURE_CAP {
     SMU_13_0_7_ODCAP_GFXCLK_LIMITS = 0,
     SMU_13_0_7_ODCAP_UCLK_LIMITS,
     SMU_13_0_7_ODCAP_POWER_LIMIT,
@@ -62,8 +61,7 @@ enum SMU_13_0_7_ODFEATURE_CAP
     SMU_13_0_7_ODCAP_COUNT,
 };
 
-enum SMU_13_0_7_ODFEATURE_ID
-{
+enum SMU_13_0_7_ODFEATURE_ID {
     SMU_13_0_7_ODFEATURE_GFXCLK_LIMITS           = 1 << SMU_13_0_7_ODCAP_GFXCLK_LIMITS,           //GFXCLK Limit feature
     SMU_13_0_7_ODFEATURE_UCLK_LIMITS             = 1 << SMU_13_0_7_ODCAP_UCLK_LIMITS,             //UCLK Limit feature
     SMU_13_0_7_ODFEATURE_POWER_LIMIT             = 1 << SMU_13_0_7_ODCAP_POWER_LIMIT,             //Power Limit feature
@@ -85,8 +83,7 @@ enum SMU_13_0_7_ODFEATURE_ID
 
 #define SMU_13_0_7_MAX_ODFEATURE 32 //Maximum Number of OD Features
 
-enum SMU_13_0_7_ODSETTING_ID
-{
+enum SMU_13_0_7_ODSETTING_ID {
     SMU_13_0_7_ODSETTING_GFXCLKFMAX = 0,
     SMU_13_0_7_ODSETTING_GFXCLKFMIN,
     SMU_13_0_7_ODSETTING_UCLKFMIN,
@@ -123,8 +120,7 @@ enum SMU_13_0_7_ODSETTING_ID
 };
 #define SMU_13_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings
 
-enum SMU_13_0_7_PWRMODE_SETTING
-{
+enum SMU_13_0_7_PWRMODE_SETTING {
     SMU_13_0_7_PMSETTING_POWER_LIMIT_QUIET = 0,
     SMU_13_0_7_PMSETTING_POWER_LIMIT_BALANCE,
     SMU_13_0_7_PMSETTING_POWER_LIMIT_TURBO,
@@ -144,8 +140,7 @@ enum SMU_13_0_7_PWRMODE_SETTING
 };
 #define SMU_13_0_7_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings
 
-struct smu_13_0_7_overdrive_table
-{
+struct smu_13_0_7_overdrive_table {
     uint8_t revision;                             //Revision = SMU_13_0_7_PP_OVERDRIVE_VERSION
     uint8_t reserve[3];                           //Zero filled field reserved for future use
     uint32_t feature_count;                       //Total number of supported features
@@ -156,8 +151,7 @@ struct smu_13_0_7_overdrive_table
     int16_t pm_setting[SMU_13_0_7_MAX_PMSETTING]; //Optimized power mode feature settings
 };
 
-enum SMU_13_0_7_PPCLOCK_ID
-{
+enum SMU_13_0_7_PPCLOCK_ID {
     SMU_13_0_7_PPCLOCK_GFXCLK = 0,
     SMU_13_0_7_PPCLOCK_SOCCLK,
     SMU_13_0_7_PPCLOCK_UCLK,
@@ -175,8 +169,7 @@ enum SMU_13_0_7_PPCLOCK_ID
 };
 #define SMU_13_0_7_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
 
-struct smu_13_0_7_powerplay_table
-{
+struct smu_13_0_7_powerplay_table {
     struct atom_common_table_header header; //For PLUM_BONITO, header.format_revision = 15, header.content_revision = 0
     uint8_t table_revision;                 //For PLUM_BONITO, table_revision = 2
     uint8_t padding;
index 9cd005131f5661683e193f57bfc0dda66b2332e5..c49f770c97b38ff93798dda520ca15eb57f7adaa 100644 (file)
@@ -598,7 +598,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
                                         MetricsMember_t member,
                                         uint32_t *value)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
@@ -1482,7 +1482,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
                return ret;
 
        if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
-            (smu_version >=0x360d00)) {
+            (smu_version >= 0x360d00)) {
                ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                                       WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -2113,7 +2113,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -2130,6 +2129,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
index c94d825a871bdb7bc22d3874a3415e7dfac28e1b..e655071516b735e538592867257beb7a10a8588a 100644 (file)
@@ -136,7 +136,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,                0),
        MSG_MAP(BacoAudioD3PME,                 PPSMC_MSG_BacoAudioD3PME,               0),
        MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                        0),
-       MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange,  0),
+       MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALDisableDummyPstateChange, 0),
        MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange,   0),
        MSG_MAP(GetVoltageByDpm,                PPSMC_MSG_GetVoltageByDpm,              0),
        MSG_MAP(GetVoltageByDpmOverdrive,       PPSMC_MSG_GetVoltageByDpmOverdrive,     0),
@@ -556,7 +556,7 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
                                              MetricsMember_t member,
                                              uint32_t *value)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        SmuMetrics_legacy_t *metrics =
                (SmuMetrics_legacy_t *)smu_table->metrics_table;
        int ret = 0;
@@ -642,7 +642,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
                                       MetricsMember_t member,
                                       uint32_t *value)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        SmuMetrics_t *metrics =
                (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
@@ -731,7 +731,7 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
                                              MetricsMember_t member,
                                              uint32_t *value)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        SmuMetrics_NV12_legacy_t *metrics =
                (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
        int ret = 0;
@@ -817,7 +817,7 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
                                       MetricsMember_t member,
                                       uint32_t *value)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        SmuMetrics_NV12_t *metrics =
                (SmuMetrics_NV12_t *)smu_table->metrics_table;
        int ret = 0;
@@ -1686,7 +1686,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
                        return 0;
                break;
        case SMU_DCEFCLK:
-               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+               dev_info(smu->adev->dev, "Setting DCEFCLK min/max dpm level is not supported!\n");
                break;
 
        default:
@@ -2182,7 +2182,7 @@ static int navi10_read_sensor(struct smu_context *smu,
        struct smu_table_context *table_context = &smu->smu_table;
        PPTable_t *pptable = table_context->driver_pptable;
 
-       if(!data || !size)
+       if (!data || !size)
                return -EINVAL;
 
        switch (sensor) {
@@ -2317,15 +2317,15 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
        uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
        uint32_t max_memory_clock = max_sustainable_clocks->uclock;
 
-       if(smu->disable_uclk_switch == disable_memory_clock_switch)
+       if (smu->disable_uclk_switch == disable_memory_clock_switch)
                return 0;
 
-       if(disable_memory_clock_switch)
+       if (disable_memory_clock_switch)
                ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
        else
                ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
 
-       if(!ret)
+       if (!ret)
                smu->disable_uclk_switch = disable_memory_clock_switch;
 
        return ret;
@@ -2559,7 +2559,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
        return 0;
 }
 
-static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)
+{
        int i;
        int ret = 0;
        struct smu_table_context *table_context = &smu->smu_table;
@@ -3021,7 +3022,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -3038,6 +3038,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
@@ -3368,7 +3369,7 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
                      ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
                        ret = navi10_get_gpu_metrics(smu, table);
                else
-                       ret =navi10_get_legacy_gpu_metrics(smu, table);
+                       ret = navi10_get_legacy_gpu_metrics(smu, table);
                break;
        }
 
index f7ed3e655e39796861ec9e34f342d8eed6f147de..0cda3b276f6115a1a4a18a370966943d0db133d5 100644 (file)
@@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
+               ret = sienna_cichlid_get_smu_metrics_data(smu,
+                                                         METRICS_CURR_UCLK,
+                                                         (uint32_t *)data);
                *(uint32_t *)data *= 100;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+               ret = sienna_cichlid_get_smu_metrics_data(smu,
+                                                         METRICS_AVERAGE_GFXCLK,
+                                                         (uint32_t *)data);
                *(uint32_t *)data *= 100;
                *size = 4;
                break;
@@ -2077,89 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
        return ret;
 }
 
-static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
-                                                     uint32_t *gen_speed_override,
-                                                     uint32_t *lane_width_override)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       *gen_speed_override = 0xff;
-       *lane_width_override = 0xff;
-
-       switch (adev->pdev->device) {
-       case 0x73A0:
-       case 0x73A1:
-       case 0x73A2:
-       case 0x73A3:
-       case 0x73AB:
-       case 0x73AE:
-               /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
-               *lane_width_override = 6;
-               break;
-       case 0x73E0:
-       case 0x73E1:
-       case 0x73E3:
-               *lane_width_override = 4;
-               break;
-       case 0x7420:
-       case 0x7421:
-       case 0x7422:
-       case 0x7423:
-       case 0x7424:
-               *lane_width_override = 3;
-               break;
-       default:
-               break;
-       }
-}
-
-#define MAX(a, b)      ((a) > (b) ? (a) : (b))
-
 static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
                                         uint32_t pcie_gen_cap,
                                         uint32_t pcie_width_cap)
 {
        struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
        struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
-       uint32_t gen_speed_override, lane_width_override;
-       uint8_t *table_member1, *table_member2;
-       uint32_t min_gen_speed, max_gen_speed;
-       uint32_t min_lane_width, max_lane_width;
-       uint32_t smu_pcie_arg;
+       u32 smu_pcie_arg;
        int ret, i;
 
-       GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
-       GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+       /* PCIE gen speed and lane width override */
+       if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+               if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
+                       pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
 
-       sienna_cichlid_get_override_pcie_settings(smu,
-                                                 &gen_speed_override,
-                                                 &lane_width_override);
+               if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
+                       pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
 
-       /* PCIE gen speed override */
-       if (gen_speed_override != 0xff) {
-               min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
-               max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
-       } else {
-               min_gen_speed = MAX(0, table_member1[0]);
-               max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
-               min_gen_speed = min_gen_speed > max_gen_speed ?
-                               max_gen_speed : min_gen_speed;
-       }
-       pcie_table->pcie_gen[0] = min_gen_speed;
-       pcie_table->pcie_gen[1] = max_gen_speed;
-
-       /* PCIE lane width override */
-       if (lane_width_override != 0xff) {
-               min_lane_width = MIN(pcie_width_cap, lane_width_override);
-               max_lane_width = MIN(pcie_width_cap, lane_width_override);
+               /* Force all levels to use the same settings */
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       pcie_table->pcie_gen[i] = pcie_gen_cap;
+                       pcie_table->pcie_lane[i] = pcie_width_cap;
+               }
        } else {
-               min_lane_width = MAX(1, table_member2[0]);
-               max_lane_width = MIN(pcie_width_cap, table_member2[1]);
-               min_lane_width = min_lane_width > max_lane_width ?
-                                max_lane_width : min_lane_width;
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+                               pcie_table->pcie_gen[i] = pcie_gen_cap;
+                       if (pcie_table->pcie_lane[i] > pcie_width_cap)
+                               pcie_table->pcie_lane[i] = pcie_width_cap;
+               }
        }
-       pcie_table->pcie_lane[0] = min_lane_width;
-       pcie_table->pcie_lane[1] = max_lane_width;
 
        for (i = 0; i < NUM_LINK_LEVELS; i++) {
                smu_pcie_arg = (i << 16 |
@@ -3842,7 +3793,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -3859,6 +3809,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
index 067b4e0b026c0b9ce76391723bc45a6492b7afcb..185d0b50ee8eabe8425d9a0e6e01a90cc68b204e 100644 (file)
@@ -1854,6 +1854,86 @@ static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v2_3);
 }
 
+static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu,
+                                           void **table)
+{
+       SmuMetrics_t metrics;
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v2_4 *gpu_metrics =
+                               (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table;
+       int ret = 0;
+
+       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+       if (ret)
+               return ret;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4);
+
+       gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+       gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+       memcpy(&gpu_metrics->temperature_core[0],
+              &metrics.Current.CoreTemperature[0],
+              sizeof(uint16_t) * 4);
+       gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+       gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
+       gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
+       memcpy(&gpu_metrics->average_temperature_core[0],
+              &metrics.Average.CoreTemperature[0],
+              sizeof(uint16_t) * 4);
+       gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
+
+       gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+       gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+       gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+       gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+       gpu_metrics->average_soc_power = metrics.Current.Power[1];
+       gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+
+       gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0];
+       gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1];
+       gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2];
+
+       gpu_metrics->average_cpu_current = metrics.Current.Current[0];
+       gpu_metrics->average_soc_current = metrics.Current.Current[1];
+       gpu_metrics->average_gfx_current = metrics.Current.Current[2];
+
+       memcpy(&gpu_metrics->average_core_power[0],
+              &metrics.Average.CorePower[0],
+              sizeof(uint16_t) * 4);
+
+       gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+       gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+       gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+       gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+       gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+       memcpy(&gpu_metrics->current_coreclk[0],
+              &metrics.Current.CoreFrequency[0],
+              sizeof(uint16_t) * 4);
+       gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+       gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+       gpu_metrics->indep_throttle_status =
+                       smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+                                                          vangogh_throttler_map);
+
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v2_4);
+}
+
 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
                                      void **table)
 {
@@ -1923,23 +2003,34 @@ static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
 {
        uint32_t if_version;
        uint32_t smu_version;
+       uint32_t smu_program;
+       uint32_t fw_version;
        int ret = 0;
 
        ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
-       if (ret) {
+       if (ret)
                return ret;
-       }
 
-       if (smu_version >= 0x043F3E00) {
-               if (if_version < 0x3)
-                       ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+       smu_program = (smu_version >> 24) & 0xff;
+       fw_version = smu_version & 0xffffff;
+       if (smu_program == 6) {
+               if (fw_version >= 0x3F0800)
+                       ret = vangogh_get_gpu_metrics_v2_4(smu, table);
                else
                        ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+
        } else {
-               if (if_version < 0x3)
-                       ret = vangogh_get_legacy_gpu_metrics(smu, table);
-               else
-                       ret = vangogh_get_gpu_metrics(smu, table);
+               if (smu_version >= 0x043F3E00) {
+                       if (if_version < 0x3)
+                               ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+                       else
+                               ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+               } else {
+                       if (if_version < 0x3)
+                               ret = vangogh_get_legacy_gpu_metrics(smu, table);
+                       else
+                               ret = vangogh_get_gpu_metrics(smu, table);
+               }
        }
 
        return ret;
index 8a8ba25c9ad7cc29437103b22d58555cd8afbe31..a7569354229deb2cfea9e44ee45a8754cafc89a7 100644 (file)
@@ -262,15 +262,15 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu,
                        /* mclk levels are in reverse order */
                        *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
-               if(sclk_mask)
+               if (sclk_mask)
                        /* The sclk as gfxclk and has three level about max/min/current */
                        *sclk_mask = 3 - 1;
 
-               if(mclk_mask)
+               if (mclk_mask)
                        /* mclk levels are in reverse order */
                        *mclk_mask = 0;
 
-               if(soc_mask)
+               if (soc_mask)
                        *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
        }
 
index c788aa7a99a9e745d43edeabe2a5740d2e0d10ac..5e408a1958604aaa3f590a6e4615bd9c6c346d5f 100644 (file)
@@ -205,7 +205,8 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
        return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
 }
 
-int smu_v12_0_mode2_reset(struct smu_context *smu){
+int smu_v12_0_mode2_reset(struct smu_context *smu)
+{
        return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
 }
 
index e80f122d8aec5bab03ad03b53f71dade1fe5e377..ce50ef46e73fc2a6ab50b5cc8bbae738e168976a 100644 (file)
@@ -1525,7 +1525,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -1542,6 +1541,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
index 3856da6c3f3d2f8e43d7ae5c9fbf803ec4691575..9b62b45ebb7f0a38d9527ba19eec124a52bed591 100644 (file)
@@ -2424,3 +2424,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
 
        return ret;
 }
+
+int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+                                    uint32_t pcie_gen_cap,
+                                    uint32_t pcie_width_cap)
+{
+       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+       struct smu_13_0_pcie_table *pcie_table =
+                               &dpm_context->dpm_tables.pcie_table;
+       int num_of_levels = pcie_table->num_of_link_levels;
+       uint32_t smu_pcie_arg;
+       int ret, i;
+
+       if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+               if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+                       pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+               if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+                       pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+               /* Force all levels to use the same settings */
+               for (i = 0; i < num_of_levels; i++) {
+                       pcie_table->pcie_gen[i] = pcie_gen_cap;
+                       pcie_table->pcie_lane[i] = pcie_width_cap;
+               }
+       } else {
+               for (i = 0; i < num_of_levels; i++) {
+                       if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+                               pcie_table->pcie_gen[i] = pcie_gen_cap;
+                       if (pcie_table->pcie_lane[i] > pcie_width_cap)
+                               pcie_table->pcie_lane[i] = pcie_width_cap;
+               }
+       }
+
+       for (i = 0; i < num_of_levels; i++) {
+               smu_pcie_arg = i << 16;
+               smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+               smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_OverridePcieParameters,
+                                                     smu_pcie_arg,
+                                                     NULL);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
index 124287cbbff815de84cd95af74db24b787689fe2..3d188616ba24ceb788a344932aeb2ac13106a779 100644 (file)
@@ -1645,37 +1645,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
        return ret;
 }
 
-static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
-                                             uint32_t pcie_gen_cap,
-                                             uint32_t pcie_width_cap)
-{
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-       struct smu_13_0_pcie_table *pcie_table =
-                               &dpm_context->dpm_tables.pcie_table;
-       uint32_t smu_pcie_arg;
-       int ret, i;
-
-       for (i = 0; i < pcie_table->num_of_link_levels; i++) {
-               if (pcie_table->pcie_gen[i] > pcie_gen_cap)
-                       pcie_table->pcie_gen[i] = pcie_gen_cap;
-               if (pcie_table->pcie_lane[i] > pcie_width_cap)
-                       pcie_table->pcie_lane[i] = pcie_width_cap;
-
-               smu_pcie_arg = i << 16;
-               smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
-               smu_pcie_arg |= pcie_table->pcie_lane[i];
-
-               ret = smu_cmn_send_smc_msg_with_param(smu,
-                                                     SMU_MSG_OverridePcieParameters,
-                                                     smu_pcie_arg,
-                                                     NULL);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static const struct smu_temperature_range smu13_thermal_policy[] = {
        {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
        { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
@@ -1765,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
        gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
        gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
 
-       gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+       gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
        gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
        gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
        gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
@@ -2320,7 +2289,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -2337,6 +2305,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
@@ -2654,7 +2623,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .feature_is_enabled = smu_cmn_feature_is_enabled,
        .print_clk_levels = smu_v13_0_0_print_clk_levels,
        .force_clk_levels = smu_v13_0_0_force_clk_levels,
-       .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
+       .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
        .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
        .register_irq_handler = smu_v13_0_register_irq_handler,
        .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
index 6ef12252beb5b9d98839a4c9daa155e632878656..1ac552142763f8ab2a8f21b9234ded19c6fd3b8b 100644 (file)
@@ -1763,7 +1763,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        mutex_lock(&adev->pm.mutex);
        r = smu_v13_0_6_request_i2c_xfer(smu, req);
-       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -1780,6 +1779,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
        }
        r = num_msgs;
 fail:
+       mutex_unlock(&adev->pm.mutex);
        kfree(req);
        return r;
 }
index cda4e818aab7e9381273f5ee4739d88f54a41305..b1f0937ccade8f20b972b11dd177a26ec00278a3 100644 (file)
@@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
                ret = smu_v13_0_7_get_smu_metrics_data(smu,
-                                                      METRICS_AVERAGE_UCLK,
+                                                      METRICS_CURR_UCLK,
                                                       (uint32_t *)data);
                *(uint32_t *)data *= 100;
                *size = 4;
@@ -1635,37 +1635,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
        return ret;
 }
 
-static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
-                                             uint32_t pcie_gen_cap,
-                                             uint32_t pcie_width_cap)
-{
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-       struct smu_13_0_pcie_table *pcie_table =
-                               &dpm_context->dpm_tables.pcie_table;
-       uint32_t smu_pcie_arg;
-       int ret, i;
-
-       for (i = 0; i < pcie_table->num_of_link_levels; i++) {
-               if (pcie_table->pcie_gen[i] > pcie_gen_cap)
-                       pcie_table->pcie_gen[i] = pcie_gen_cap;
-               if (pcie_table->pcie_lane[i] > pcie_width_cap)
-                       pcie_table->pcie_lane[i] = pcie_width_cap;
-
-               smu_pcie_arg = i << 16;
-               smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
-               smu_pcie_arg |= pcie_table->pcie_lane[i];
-
-               ret = smu_cmn_send_smc_msg_with_param(smu,
-                                                     SMU_MSG_OverridePcieParameters,
-                                                     smu_pcie_arg,
-                                                     NULL);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static const struct smu_temperature_range smu13_thermal_policy[] =
 {
        {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
@@ -2234,7 +2203,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .feature_is_enabled = smu_cmn_feature_is_enabled,
        .print_clk_levels = smu_v13_0_7_print_clk_levels,
        .force_clk_levels = smu_v13_0_7_force_clk_levels,
-       .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
+       .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
        .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
        .register_irq_handler = smu_v13_0_register_irq_handler,
        .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
index 3ecb900e6ecdccf814c219431be45095e9524a0a..442d267088bcd969fff465573e0d721a09dd4fc9 100644 (file)
@@ -691,7 +691,7 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(fea)   #fea
-static const char__smu_feature_names[] = {
+static const char *__smu_feature_names[] = {
        SMU_FEATURE_MASKS
 };
 
@@ -927,7 +927,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
                              void *metrics_table,
                              bool bypass_cache)
 {
-       struct smu_table_context *smu_table= &smu->smu_table;
+       struct smu_table_context *smu_table = &smu->smu_table;
        uint32_t table_size =
                smu_table->tables[SMU_TABLE_SMU_METRICS].size;
        int ret = 0;
@@ -969,7 +969,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        struct metrics_table_header *header = (struct metrics_table_header *)table;
        uint16_t structure_size;
 
-#define METRICS_VERSION(a, b)  ((a << 16) | b )
+#define METRICS_VERSION(a, b)  ((a << 16) | b)
 
        switch (METRICS_VERSION(frev, crev)) {
        case METRICS_VERSION(1, 0):
@@ -996,6 +996,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        case METRICS_VERSION(2, 3):
                structure_size = sizeof(struct gpu_metrics_v2_3);
                break;
+       case METRICS_VERSION(2, 4):
+               structure_size = sizeof(struct gpu_metrics_v2_4);
+               break;
        default:
                return;
        }
index c1b89274d2a44ac135f6da34ab537fb232031097..ddf20708370f9906800d198fe4d5bc634a4ca83f 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 menu "ARM devices"
+       depends on DRM
 
 config DRM_HDLCD
        tristate "ARM HDLCD"
index cea3fd5772b574efec47283551543062ca0fa94e..2c661f28410eda2d297a1fe33b0f33e003af775c 100644 (file)
@@ -12,6 +12,8 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_bridge.h>
 
 #include "komeda_dev.h"
 #include "komeda_kms.h"
@@ -612,9 +614,11 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
                           struct komeda_crtc *kcrtc)
 {
        struct drm_crtc *crtc = &kcrtc->base;
+       struct drm_device *base = &kms->base;
+       struct drm_bridge *bridge;
        int err;
 
-       err = drm_crtc_init_with_planes(&kms->base, crtc,
+       err = drm_crtc_init_with_planes(base, crtc,
                                        get_crtc_primary(kms, kcrtc), NULL,
                                        &komeda_crtc_funcs, NULL);
        if (err)
@@ -624,6 +628,22 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
 
        crtc->port = kcrtc->master->of_output_port;
 
+       /* Construct an encoder for each pipeline and attach it to the remote
+        * bridge
+        */
+       kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc);
+       err = drm_simple_encoder_init(base, &kcrtc->encoder,
+                                     DRM_MODE_ENCODER_TMDS);
+       if (err)
+               return err;
+
+       bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node,
+                                       KOMEDA_OF_PORT_OUTPUT, 0);
+       if (IS_ERR(bridge))
+               return PTR_ERR(bridge);
+
+       err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0);
+
        drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
 
        return err;
index cc7664c95a5477c644dca3833c3316f362ae51ac..14ee79becacb56f62a20c5a54efd16a3ec4e7a43 100644 (file)
@@ -6,7 +6,7 @@
  */
 #include <linux/io.h>
 #include <linux/iommu.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
index 28f76e07dd95842bc8015bc317dc8aa49d29bdbe..cb2a2be24c5fffe23edba1d142ac352026f29364 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/component.h>
 #include <linux/pm_runtime.h>
 #include <drm/drm_fbdev_generic.h>
 #include <drm/drm_module.h>
@@ -28,13 +27,11 @@ struct komeda_dev *dev_to_mdev(struct device *dev)
        return mdrv ? mdrv->mdev : NULL;
 }
 
-static void komeda_unbind(struct device *dev)
+static void komeda_platform_remove(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
-       if (!mdrv)
-               return;
-
        komeda_kms_detach(mdrv->kms);
 
        if (pm_runtime_enabled(dev))
@@ -48,8 +45,9 @@ static void komeda_unbind(struct device *dev)
        devm_kfree(dev, mdrv);
 }
 
-static int komeda_bind(struct device *dev)
+static int komeda_platform_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct komeda_drv *mdrv;
        int err;
 
@@ -91,52 +89,6 @@ free_mdrv:
        return err;
 }
 
-static const struct component_master_ops komeda_master_ops = {
-       .bind   = komeda_bind,
-       .unbind = komeda_unbind,
-};
-
-static void komeda_add_slave(struct device *master,
-                            struct component_match **match,
-                            struct device_node *np,
-                            u32 port, u32 endpoint)
-{
-       struct device_node *remote;
-
-       remote = of_graph_get_remote_node(np, port, endpoint);
-       if (remote) {
-               drm_of_component_match_add(master, match, component_compare_of, remote);
-               of_node_put(remote);
-       }
-}
-
-static int komeda_platform_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct component_match *match = NULL;
-       struct device_node *child;
-
-       if (!dev->of_node)
-               return -ENODEV;
-
-       for_each_available_child_of_node(dev->of_node, child) {
-               if (of_node_cmp(child->name, "pipeline") != 0)
-                       continue;
-
-               /* add connector */
-               komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0);
-               komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1);
-       }
-
-       return component_master_add_with_match(dev, &komeda_master_ops, match);
-}
-
-static int komeda_platform_remove(struct platform_device *pdev)
-{
-       component_master_del(&pdev->dev, &komeda_master_ops);
-       return 0;
-}
-
 static const struct of_device_id komeda_of_match[] = {
        { .compatible = "arm,mali-d71", .data = d71_identify, },
        { .compatible = "arm,mali-d32", .data = d71_identify, },
@@ -189,7 +141,7 @@ static const struct dev_pm_ops komeda_pm_ops = {
 
 static struct platform_driver komeda_platform_driver = {
        .probe  = komeda_platform_probe,
-       .remove = komeda_platform_remove,
+       .remove_new = komeda_platform_remove,
        .driver = {
                .name = "komeda",
                .of_match_table = komeda_of_match,
index 62dc64550793e2a30eb60419aa51123334eb1097..9299026701f348dddcd99373d7036170cd95626c 100644 (file)
@@ -4,7 +4,6 @@
  * Author: James.Qian.Wang <james.qian.wang@arm.com>
  *
  */
-#include <linux/component.h>
 #include <linux/interrupt.h>
 
 #include <drm/drm_atomic.h>
@@ -305,17 +304,13 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
        if (err)
                goto cleanup_mode_config;
 
-       err = component_bind_all(mdev->dev, kms);
-       if (err)
-               goto cleanup_mode_config;
-
        drm_mode_config_reset(drm);
 
        err = devm_request_irq(drm->dev, mdev->irq,
                               komeda_kms_irq_handler, IRQF_SHARED,
                               drm->driver->name, drm);
        if (err)
-               goto free_component_binding;
+               goto cleanup_mode_config;
 
        drm_kms_helper_poll_init(drm);
 
@@ -327,8 +322,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 
 free_interrupts:
        drm_kms_helper_poll_fini(drm);
-free_component_binding:
-       component_unbind_all(mdev->dev, drm);
 cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
@@ -339,12 +332,10 @@ cleanup_mode_config:
 void komeda_kms_detach(struct komeda_kms_dev *kms)
 {
        struct drm_device *drm = &kms->base;
-       struct komeda_dev *mdev = drm->dev_private;
 
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
        drm_atomic_helper_shutdown(drm);
-       component_unbind_all(mdev->dev, drm);
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
index 3a872c292091230f971b54efb6b13fabe6c8395b..6ef65532635706a82e9de80e65f02fd20d5c9e29 100644 (file)
@@ -84,6 +84,9 @@ struct komeda_crtc {
 
        /** @disable_done: this flip_done is for tracing the disable */
        struct completion *disable_done;
+
+       /** @encoder: encoder at the end of the pipeline */
+       struct drm_encoder encoder;
 };
 
 /**
index 12f5a2c7f03d50921e7784efcc94e00eb839e6a3..aa06f98380154a6d444576816cda8490ccbdc4d6 100644 (file)
@@ -367,10 +367,9 @@ static int hdlcd_probe(struct platform_device *pdev)
                                               match);
 }
 
-static int hdlcd_remove(struct platform_device *pdev)
+static void hdlcd_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &hdlcd_master_ops);
-       return 0;
 }
 
 static const struct of_device_id  hdlcd_of_match[] = {
@@ -399,7 +398,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
 
 static struct platform_driver hdlcd_platform_driver = {
        .probe          = hdlcd_probe,
-       .remove         = hdlcd_remove,
+       .remove_new     = hdlcd_remove,
        .driver = {
                .name = "hdlcd",
                .pm = &hdlcd_pm_ops,
index c03cfd57b75263a2e1dda01ef770a34e11465d51..62329d5dd992ed749a65411009c032488c580f8b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/debugfs.h>
 
@@ -935,10 +936,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
                                               match);
 }
 
-static int malidp_platform_remove(struct platform_device *pdev)
+static void malidp_platform_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &malidp_master_ops);
-       return 0;
 }
 
 static int __maybe_unused malidp_pm_suspend(struct device *dev)
@@ -981,7 +981,7 @@ static const struct dev_pm_ops malidp_pm_ops = {
 
 static struct platform_driver malidp_platform_driver = {
        .probe          = malidp_platform_probe,
-       .remove         = malidp_platform_remove,
+       .remove_new     = malidp_platform_remove,
        .driver = {
                .name = "mali-dp",
                .pm = &malidp_pm_ops,
index 5afade25e217df39c00e0d0aa133d561ffe8f297..e5597d7c9ae1a8b215b93df704448eb5b5f7616e 100644 (file)
@@ -3,7 +3,7 @@ config DRM_ARMADA
        tristate "DRM support for Marvell Armada SoCs"
        depends on DRM && HAVE_CLK && ARM && MMU
        select DRM_KMS_HELPER
-       select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+       select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
        help
          Support the "LCD" controllers found on the Marvell Armada 510
          devices.  There are two controllers on the device, each controller
index e120144d4b470c33971994442b6bb2e6716d28b9..e8d2fe955909ab2feaf9d2f5cba8132cfcc9fa93 100644 (file)
@@ -37,8 +37,6 @@ static const struct drm_ioctl_desc armada_ioctls[] = {
 DEFINE_DRM_GEM_FOPS(armada_drm_fops);
 
 static const struct drm_driver armada_drm_driver = {
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import       = armada_gem_prime_import,
        .dumb_create            = armada_gem_dumb_create,
        .major                  = 1,
index 3943e89cc06c826b768009222aef337cbbc7e2af..d223176912b634921a23b40af960ee5f896a8c68 100644 (file)
@@ -34,7 +34,7 @@ static void armada_fbdev_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops armada_fb_ops = {
        .owner          = THIS_MODULE,
-       FB_DEFAULT_IO_OPS,
+       FB_DEFAULT_IOMEM_OPS,
        DRM_FB_HELPER_DEFAULT_OPS,
        .fb_destroy     = armada_fbdev_fb_destroy,
 };
@@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
                goto err_drm_client_init;
        }
 
-       ret = armada_fbdev_client_hotplug(&fbh->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fbh->client);
 
        return;
index f21eb8fb76d87285584ac93a0619cf8f83251f05..3b9bd8ecda137f6dea671247a59b3b614d0b8e7d 100644 (file)
@@ -4,6 +4,8 @@
  *  Rewritten from the dovefb driver, and Armada510 manuals.
  */
 
+#include <linux/bitfield.h>
+
 #include <drm/armada_drm.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -445,8 +447,8 @@ static int armada_overlay_get_property(struct drm_plane *plane,
                             drm_to_overlay_state(state)->colorkey_ug,
                             drm_to_overlay_state(state)->colorkey_vb, 0);
        } else if (property == priv->colorkey_mode_prop) {
-               *val = (drm_to_overlay_state(state)->colorkey_mode &
-                       CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+               *val = FIELD_GET(CFG_CKMODE_MASK,
+                                drm_to_overlay_state(state)->colorkey_mode);
        } else if (property == priv->brightness_prop) {
                *val = drm_to_overlay_state(state)->brightness + 256;
        } else if (property == priv->contrast_prop) {
index c8c7f821515594d07b13af6e9413e13c9cc23014..d207b03f8357c7380e331615e06cd7e8283873c5 100644 (file)
@@ -351,20 +351,18 @@ err_unload:
        return ret;
 }
 
-static int aspeed_gfx_remove(struct platform_device *pdev)
+static void aspeed_gfx_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
 
        sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
        drm_dev_unregister(drm);
        aspeed_gfx_unload(drm);
-
-       return 0;
 }
 
 static struct platform_driver aspeed_gfx_platform_driver = {
        .probe          = aspeed_gfx_probe,
-       .remove         = aspeed_gfx_remove,
+       .remove_new     = aspeed_gfx_remove,
        .driver = {
                .name = "aspeed_gfx",
                .of_match_table = aspeed_gfx_match,
index 6dc1a09504e13ec2bad4bbf095c28a00b710d9e4..fdd9a493aa9c089df437aa6a796ca64ea2d26204 100644 (file)
@@ -7,6 +7,17 @@
 #include <drm/drm_print.h>
 #include "ast_drv.h"
 
+bool ast_astdp_is_connected(struct ast_device *ast)
+{
+       if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
+               return false;
+       if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
+               return false;
+       if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
+               return false;
+       return true;
+}
+
 int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
 {
        struct ast_device *ast = to_ast_device(dev);
index 1bc35a992369d745ce74d9f3b84bb8030e27081b..f10d53b0c94f0b50510796f4e4584fc9a53eb3ec 100644 (file)
@@ -272,11 +272,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
        return true;
 }
 
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+bool ast_dp501_is_connected(struct ast_device *ast)
 {
-       struct ast_device *ast = to_ast_device(dev);
-       u32 i, boot_address, offset, data;
-       u32 *pEDIDidx;
+       u32 boot_address, offset, data;
 
        if (ast->config_mode == ast_use_p2a) {
                boot_address = get_fw_base(ast);
@@ -292,14 +290,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
                data = ast_mindwm(ast, boot_address + offset);
                if (!(data & AST_DP501_PNP_CONNECTED))
                        return false;
-
-               /* Read EDID */
-               offset = AST_DP501_EDID_DATA;
-               for (i = 0; i < 128; i += 4) {
-                       data = ast_mindwm(ast, boot_address + offset + i);
-                       pEDIDidx = (u32 *)(ediddata + i);
-                       *pEDIDidx = data;
-               }
        } else {
                if (!ast->dp501_fw_buf)
                        return false;
@@ -319,7 +309,30 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
                data = readl(ast->dp501_fw_buf + offset);
                if (!(data & AST_DP501_PNP_CONNECTED))
                        return false;
+       }
+       return true;
+}
+
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+{
+       struct ast_device *ast = to_ast_device(dev);
+       u32 i, boot_address, offset, data;
+       u32 *pEDIDidx;
+
+       if (!ast_dp501_is_connected(ast))
+               return false;
+
+       if (ast->config_mode == ast_use_p2a) {
+               boot_address = get_fw_base(ast);
 
+               /* Read EDID */
+               offset = AST_DP501_EDID_DATA;
+               for (i = 0; i < 128; i += 4) {
+                       data = ast_mindwm(ast, boot_address + offset + i);
+                       pEDIDidx = (u32 *)(ediddata + i);
+                       *pEDIDidx = data;
+               }
+       } else {
                /* Read EDID */
                offset = AST_DP501_EDID_DATA;
                for (i = 0; i < 128; i += 4) {
@@ -350,7 +363,7 @@ static bool ast_init_dvo(struct drm_device *dev)
                data |= 0x00000500;
                ast_write32(ast, 0x12008, data);
 
-               if (ast->chip == AST2300) {
+               if (IS_AST_GEN4(ast)) {
                        data = ast_read32(ast, 0x12084);
                        /* multi-pins for DVO single-edge */
                        data |= 0xfffe0000;
@@ -366,7 +379,7 @@ static bool ast_init_dvo(struct drm_device *dev)
                        data &= 0xffffffcf;
                        data |= 0x00000020;
                        ast_write32(ast, 0x12090, data);
-               } else { /* AST2400 */
+               } else { /* AST GEN5+ */
                        data = ast_read32(ast, 0x12088);
                        /* multi-pins for DVO single-edge */
                        data |= 0x30000000;
@@ -437,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev)
        struct ast_device *ast = to_ast_device(dev);
        u8 jreg;
 
-       if (ast->chip == AST2300 || ast->chip == AST2400) {
+       if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
                jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
                switch (jreg & 0x0e) {
                case 0x04:
index 5498a6676f2e8f2eede492b53c34e875ccf81635..848a9f1403e89681d2bb5c48d3cf4e3680fed119 100644 (file)
 #define PCI_CHIP_AST2000 0x2000
 #define PCI_CHIP_AST2100 0x2010
 
+#define __AST_CHIP(__gen, __index)     ((__gen) << 16 | (__index))
 
 enum ast_chip {
-       AST2000,
-       AST2100,
-       AST1100,
-       AST2200,
-       AST2150,
-       AST2300,
-       AST2400,
-       AST2500,
-       AST2600,
+       /* 1st gen */
+       AST1000 = __AST_CHIP(1, 0), // unused
+       AST2000 = __AST_CHIP(1, 1),
+       /* 2nd gen */
+       AST1100 = __AST_CHIP(2, 0),
+       AST2100 = __AST_CHIP(2, 1),
+       AST2050 = __AST_CHIP(2, 2), // unused
+       /* 3rd gen */
+       AST2200 = __AST_CHIP(3, 0),
+       AST2150 = __AST_CHIP(3, 1),
+       /* 4th gen */
+       AST2300 = __AST_CHIP(4, 0),
+       AST1300 = __AST_CHIP(4, 1),
+       AST1050 = __AST_CHIP(4, 2), // unused
+       /* 5th gen */
+       AST2400 = __AST_CHIP(5, 0),
+       AST1400 = __AST_CHIP(5, 1),
+       AST1250 = __AST_CHIP(5, 2), // unused
+       /* 6th gen */
+       AST2500 = __AST_CHIP(6, 0),
+       AST2510 = __AST_CHIP(6, 1),
+       AST2520 = __AST_CHIP(6, 2), // unused
+       /* 7th gen */
+       AST2600 = __AST_CHIP(7, 0),
+       AST2620 = __AST_CHIP(7, 1), // unused
 };
 
+#define __AST_CHIP_GEN(__chip) (((unsigned long)(__chip)) >> 16)
+
 enum ast_tx_chip {
        AST_TX_NONE,
        AST_TX_SIL164,
@@ -166,7 +185,6 @@ struct ast_device {
        void __iomem *dp501_fw_buf;
 
        enum ast_chip chip;
-       bool vga2_clone;
        uint32_t dram_bus_width;
        uint32_t dram_type;
        uint32_t mclk;
@@ -196,6 +214,10 @@ struct ast_device {
                        struct drm_encoder encoder;
                        struct drm_connector connector;
                } astdp;
+               struct {
+                       struct drm_encoder encoder;
+                       struct drm_connector connector;
+               } bmc;
        } output;
 
        bool support_wide_screen;
@@ -219,6 +241,24 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
                                     struct pci_dev *pdev,
                                     unsigned long flags);
 
+static inline unsigned long __ast_gen(struct ast_device *ast)
+{
+       return __AST_CHIP_GEN(ast->chip);
+}
+#define AST_GEN(__ast) __ast_gen(__ast)
+
+static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
+{
+       return __ast_gen(ast) == gen;
+}
+#define IS_AST_GEN1(__ast)     __ast_gen_is_eq(__ast, 1)
+#define IS_AST_GEN2(__ast)     __ast_gen_is_eq(__ast, 2)
+#define IS_AST_GEN3(__ast)     __ast_gen_is_eq(__ast, 3)
+#define IS_AST_GEN4(__ast)     __ast_gen_is_eq(__ast, 4)
+#define IS_AST_GEN5(__ast)     __ast_gen_is_eq(__ast, 5)
+#define IS_AST_GEN6(__ast)     __ast_gen_is_eq(__ast, 6)
+#define IS_AST_GEN7(__ast)     __ast_gen_is_eq(__ast, 7)
+
 #define AST_IO_AR_PORT_WRITE           (0x40)
 #define AST_IO_MISC_PORT_WRITE         (0x42)
 #define AST_IO_VGA_ENABLE_PORT         (0x43)
@@ -258,26 +298,35 @@ static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
        iowrite8(val, ast->ioregs + reg);
 }
 
-static inline void ast_set_index_reg(struct ast_device *ast,
-                                    uint32_t base, uint8_t index,
-                                    uint8_t val)
+static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
 {
        ast_io_write8(ast, base, index);
        ++base;
-       ast_io_write8(ast, base, val);
+       return ast_io_read8(ast, base);
 }
 
-void ast_set_index_reg_mask(struct ast_device *ast,
-                           uint32_t base, uint8_t index,
-                           uint8_t mask, uint8_t val);
-uint8_t ast_get_index_reg(struct ast_device *ast,
-                         uint32_t base, uint8_t index);
-uint8_t ast_get_index_reg_mask(struct ast_device *ast,
-                              uint32_t base, uint8_t index, uint8_t mask);
+static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
+                                       u8 preserve_mask)
+{
+       u8 val = ast_get_index_reg(ast, base, index);
+
+       return val & preserve_mask;
+}
+
+static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
+{
+       ast_io_write8(ast, base, index);
+       ++base;
+       ast_io_write8(ast, base, val);
+}
 
-static inline void ast_open_key(struct ast_device *ast)
+static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
+                                         u8 preserve_mask, u8 val)
 {
-       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+       u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
+
+       tmp |= val;
+       ast_set_index_reg(ast, base, index, tmp);
 }
 
 #define AST_VIDMEM_SIZE_8M    0x00800000
@@ -458,9 +507,6 @@ int ast_mode_config_init(struct ast_device *ast);
 int ast_mm_init(struct ast_device *ast);
 
 /* ast post */
-void ast_enable_vga(struct drm_device *dev);
-void ast_enable_mmio(struct drm_device *dev);
-bool ast_is_vga_enabled(struct drm_device *dev);
 void ast_post_gpu(struct drm_device *dev);
 u32 ast_mindwm(struct ast_device *ast, u32 r);
 void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
@@ -468,6 +514,7 @@ void ast_patch_ahb_2500(struct ast_device *ast);
 /* ast dp501 */
 void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
 bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+bool ast_dp501_is_connected(struct ast_device *ast);
 bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
 u8 ast_get_dp501_max_clk(struct drm_device *dev);
 void ast_init_3rdtx(struct drm_device *dev);
@@ -476,6 +523,7 @@ void ast_init_3rdtx(struct drm_device *dev);
 struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
 
 /* aspeed DP */
+bool ast_astdp_is_connected(struct ast_device *ast);
 int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
 void ast_dp_launch(struct drm_device *dev);
 void ast_dp_power_on_off(struct drm_device *dev, bool no);
index 1f35438f614a7ba441af81e1e564da3ec1dd5d52..dae365ed39696c4d6d9e0c1d891796e63e61b463 100644 (file)
 
 #include "ast_drv.h"
 
-void ast_set_index_reg_mask(struct ast_device *ast,
-                           uint32_t base, uint8_t index,
-                           uint8_t mask, uint8_t val)
+static bool ast_is_vga_enabled(struct drm_device *dev)
 {
-       u8 tmp;
-       ast_io_write8(ast, base, index);
-       tmp = (ast_io_read8(ast, base + 1) & mask) | val;
-       ast_set_index_reg(ast, base, index, tmp);
+       struct ast_device *ast = to_ast_device(dev);
+       u8 ch;
+
+       ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
+
+       return !!(ch & 0x01);
 }
 
-uint8_t ast_get_index_reg(struct ast_device *ast,
-                         uint32_t base, uint8_t index)
+static void ast_enable_vga(struct drm_device *dev)
 {
-       uint8_t ret;
-       ast_io_write8(ast, base, index);
-       ret = ast_io_read8(ast, base + 1);
-       return ret;
+       struct ast_device *ast = to_ast_device(dev);
+
+       ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
+       ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
 }
 
-uint8_t ast_get_index_reg_mask(struct ast_device *ast,
-                              uint32_t base, uint8_t index, uint8_t mask)
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
 {
-       uint8_t ret;
-       ast_io_write8(ast, base, index);
-       ret = ast_io_read8(ast, base + 1) & mask;
-       return ret;
+       struct ast_device *ast = data;
+
+       /* enable standard VGA decode */
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
 }
 
-static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+static int ast_enable_mmio(struct ast_device *ast)
 {
-       struct device_node *np = dev->dev->of_node;
-       struct ast_device *ast = to_ast_device(dev);
+       struct drm_device *dev = &ast->base;
+
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+
+       return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
+}
+
+static void ast_open_key(struct ast_device *ast)
+{
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+}
+
+static int ast_device_config_init(struct ast_device *ast)
+{
+       struct drm_device *dev = &ast->base;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
-       uint32_t data, jregd0, jregd1;
+       struct device_node *np = dev->dev->of_node;
+       uint32_t scu_rev = 0xffffffff;
+       u32 data;
+       u8 jregd0, jregd1;
+
+       /*
+        * Find configuration mode and read SCU revision
+        */
 
-       /* Defaults */
        ast->config_mode = ast_use_defaults;
-       *scu_rev = 0xffffffff;
 
        /* Check if we have device-tree properties */
-       if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
-                                       scu_rev)) {
+       if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
                /* We do, disable P2A access */
                ast->config_mode = ast_use_dt;
-               drm_info(dev, "Using device-tree for configuration\n");
-               return;
-       }
+               scu_rev = data;
+       } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+               /*
+                * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+                * is disabled. We force using P2A if VGA only mode bit
+                * is set D[7]
+                */
+               jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+               jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+               if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+
+                       /*
+                        * We have a P2A bridge and it is enabled.
+                        */
+
+                       /* Patch AST2500/AST2510 */
+                       if ((pdev->revision & 0xf0) == 0x40) {
+                               if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
+                                       ast_patch_ahb_2500(ast);
+                       }
 
-       /* Not all families have a P2A bridge */
-       if (pdev->device != PCI_CHIP_AST2000)
-               return;
+                       /* Double check that it's actually working */
+                       data = ast_read32(ast, 0xf004);
+                       if ((data != 0xffffffff) && (data != 0x00)) {
+                               ast->config_mode = ast_use_p2a;
 
-       /*
-        * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
-        * is disabled. We force using P2A if VGA only mode bit
-        * is set D[7]
-        */
-       jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
-       jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
-       if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
-               /* Patch AST2500 */
-               if (((pdev->revision & 0xF0) == 0x40)
-                       && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0))
-                       ast_patch_ahb_2500(ast);
-
-               /* Double check it's actually working */
-               data = ast_read32(ast, 0xf004);
-               if ((data != 0xFFFFFFFF) && (data != 0x00)) {
-                       /* P2A works, grab silicon revision */
-                       ast->config_mode = ast_use_p2a;
-
-                       drm_info(dev, "Using P2A bridge for configuration\n");
-
-                       /* Read SCU7c (silicon revision register) */
-                       ast_write32(ast, 0xf004, 0x1e6e0000);
-                       ast_write32(ast, 0xf000, 0x1);
-                       *scu_rev = ast_read32(ast, 0x1207c);
-                       return;
+                               /* Read SCU7c (silicon revision register) */
+                               ast_write32(ast, 0xf004, 0x1e6e0000);
+                               ast_write32(ast, 0xf000, 0x1);
+                               scu_rev = ast_read32(ast, 0x1207c);
+                       }
                }
        }
 
-       /* We have a P2A bridge but it's disabled */
-       drm_info(dev, "P2A bridge disabled, using default configuration\n");
-}
-
-static int ast_detect_chip(struct drm_device *dev, bool *need_post)
-{
-       struct ast_device *ast = to_ast_device(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
-       uint32_t jreg, scu_rev;
+       switch (ast->config_mode) {
+       case ast_use_defaults:
+               drm_info(dev, "Using default configuration\n");
+               break;
+       case ast_use_dt:
+               drm_info(dev, "Using device-tree for configuration\n");
+               break;
+       case ast_use_p2a:
+               drm_info(dev, "Using P2A bridge for configuration\n");
+               break;
+       }
 
        /*
-        * If VGA isn't enabled, we need to enable now or subsequent
-        * access to the scratch registers will fail. We also inform
-        * our caller that it needs to POST the chip
-        * (Assumption: VGA not enabled -> need to POST)
+        * Identify chipset
         */
-       if (!ast_is_vga_enabled(dev)) {
-               ast_enable_vga(dev);
-               drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
-               *need_post = true;
-       } else
-               *need_post = false;
-
-
-       /* Enable extended register access */
-       ast_open_key(ast);
-       ast_enable_mmio(dev);
 
-       /* Find out whether P2A works or whether to use device-tree */
-       ast_detect_config_mode(dev, &scu_rev);
-
-       /* Identify chipset */
        if (pdev->revision >= 0x50) {
                ast->chip = AST2600;
                drm_info(dev, "AST 2600 detected\n");
        } else if (pdev->revision >= 0x40) {
-               ast->chip = AST2500;
-               drm_info(dev, "AST 2500 detected\n");
+               switch (scu_rev & 0x300) {
+               case 0x0100:
+                       ast->chip = AST2510;
+                       drm_info(dev, "AST 2510 detected\n");
+                       break;
+               default:
+                       ast->chip = AST2500;
+                       drm_info(dev, "AST 2500 detected\n");
+               }
        } else if (pdev->revision >= 0x30) {
-               ast->chip = AST2400;
-               drm_info(dev, "AST 2400 detected\n");
+               switch (scu_rev & 0x300) {
+               case 0x0100:
+                       ast->chip = AST1400;
+                       drm_info(dev, "AST 1400 detected\n");
+                       break;
+               default:
+                       ast->chip = AST2400;
+                       drm_info(dev, "AST 2400 detected\n");
+               }
        } else if (pdev->revision >= 0x20) {
-               ast->chip = AST2300;
-               drm_info(dev, "AST 2300 detected\n");
+               switch (scu_rev & 0x300) {
+               case 0x0000:
+                       ast->chip = AST1300;
+                       drm_info(dev, "AST 1300 detected\n");
+                       break;
+               default:
+                       ast->chip = AST2300;
+                       drm_info(dev, "AST 2300 detected\n");
+                       break;
+               }
        } else if (pdev->revision >= 0x10) {
                switch (scu_rev & 0x0300) {
                case 0x0200:
@@ -179,15 +201,21 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        drm_info(dev, "AST 2100 detected\n");
                        break;
                }
-               ast->vga2_clone = false;
        } else {
                ast->chip = AST2000;
                drm_info(dev, "AST 2000 detected\n");
        }
 
+       return 0;
+}
+
+static void ast_detect_widescreen(struct ast_device *ast)
+{
+       u8 jreg;
+
        /* Check if we support wide screen */
-       switch (ast->chip) {
-       case AST2000:
+       switch (AST_GEN(ast)) {
+       case 1:
                ast->support_wide_screen = false;
                break;
        default:
@@ -198,20 +226,23 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        ast->support_wide_screen = true;
                else {
                        ast->support_wide_screen = false;
-                       if (ast->chip == AST2300 &&
-                           (scu_rev & 0x300) == 0x0) /* ast1300 */
+                       if (ast->chip == AST1300)
                                ast->support_wide_screen = true;
-                       if (ast->chip == AST2400 &&
-                           (scu_rev & 0x300) == 0x100) /* ast1400 */
+                       if (ast->chip == AST1400)
                                ast->support_wide_screen = true;
-                       if (ast->chip == AST2500 &&
-                           scu_rev == 0x100)           /* ast2510 */
+                       if (ast->chip == AST2510)
                                ast->support_wide_screen = true;
-                       if (ast->chip == AST2600)               /* ast2600 */
+                       if (IS_AST_GEN7(ast))
                                ast->support_wide_screen = true;
                }
                break;
        }
+}
+
+static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
+{
+       struct drm_device *dev = &ast->base;
+       u8 jreg;
 
        /* Check 3rd Tx option (digital output afaik) */
        ast->tx_chip_types |= AST_TX_NONE_BIT;
@@ -224,15 +255,15 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
         * is at power-on reset, otherwise we'll incorrectly "detect" a
         * SIL164 when there is none.
         */
-       if (!*need_post) {
+       if (!need_post) {
                jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
                if (jreg & 0x80)
                        ast->tx_chip_types = AST_TX_SIL164_BIT;
        }
 
-       if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) {
+       if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
                /*
-                * On AST2300 and 2400, look the configuration set by the SoC in
+                * On AST GEN4+, look the configuration set by the SoC in
                 * the SOC scratch register #1 bits 11:8 (interestingly marked
                 * as "reserved" in the spec)
                 */
@@ -254,7 +285,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                case 0x0c:
                        ast->tx_chip_types = AST_TX_DP501_BIT;
                }
-       } else if (ast->chip == AST2600) {
+       } else if (IS_AST_GEN7(ast)) {
                if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
                    ASTDP_DPMCU_TX) {
                        ast->tx_chip_types = AST_TX_ASTDP_BIT;
@@ -271,8 +302,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                drm_info(dev, "Using DP501 DisplayPort transmitter\n");
        if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
                drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
-
-       return 0;
 }
 
 static int ast_get_dram_info(struct drm_device *dev)
@@ -286,7 +315,7 @@ static int ast_get_dram_info(struct drm_device *dev)
        case ast_use_dt:
                /*
                 * If some properties are missing, use reasonable
-                * defaults for AST2400
+                * defaults for GEN5
                 */
                if (of_property_read_u32(np, "aspeed,mcr-configuration",
                                         &mcr_cfg))
@@ -309,7 +338,7 @@ static int ast_get_dram_info(struct drm_device *dev)
        default:
                ast->dram_bus_width = 16;
                ast->dram_type = AST_DRAM_1Gx16;
-               if (ast->chip == AST2500)
+               if (IS_AST_GEN6(ast))
                        ast->mclk = 800;
                else
                        ast->mclk = 396;
@@ -321,7 +350,7 @@ static int ast_get_dram_info(struct drm_device *dev)
        else
                ast->dram_bus_width = 32;
 
-       if (ast->chip == AST2500) {
+       if (IS_AST_GEN6(ast)) {
                switch (mcr_cfg & 0x03) {
                case 0:
                        ast->dram_type = AST_DRAM_1Gx16;
@@ -337,7 +366,7 @@ static int ast_get_dram_info(struct drm_device *dev)
                        ast->dram_type = AST_DRAM_8Gx16;
                        break;
                }
-       } else if (ast->chip == AST2300 || ast->chip == AST2400) {
+       } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
                switch (mcr_cfg & 0x03) {
                case 0:
                        ast->dram_type = AST_DRAM_512Mx16;
@@ -395,25 +424,13 @@ static int ast_get_dram_info(struct drm_device *dev)
        return 0;
 }
 
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_device_release(void *data)
-{
-       struct ast_device *ast = data;
-
-       /* enable standard VGA decode */
-       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
-}
-
 struct ast_device *ast_device_create(const struct drm_driver *drv,
                                     struct pci_dev *pdev,
                                     unsigned long flags)
 {
        struct drm_device *dev;
        struct ast_device *ast;
-       bool need_post;
+       bool need_post = false;
        int ret = 0;
 
        ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
@@ -449,7 +466,30 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
                        return ERR_PTR(-EIO);
        }
 
-       ast_detect_chip(dev, &need_post);
+       if (!ast_is_vga_enabled(dev)) {
+               drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+               need_post = true;
+       }
+
+       /*
+        * If VGA isn't enabled, we need to enable now or subsequent
+        * access to the scratch registers will fail.
+        */
+       if (need_post)
+               ast_enable_vga(dev);
+
+       /* Enable extended register access */
+       ast_open_key(ast);
+       ret = ast_enable_mmio(ast);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ret = ast_device_config_init(ast);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ast_detect_widescreen(ast);
+       ast_detect_tx_chip(ast, need_post);
 
        ret = ast_get_dram_info(dev);
        if (ret)
@@ -477,9 +517,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
        if (ret)
                return ERR_PTR(ret);
 
-       ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast);
-       if (ret)
-               return ERR_PTR(ret);
-
        return ast;
 }
index e16af60deef90e200cba3cf28af917bf16c2556c..bc174bd933b9715de989eb609cf18c451aefc667 100644 (file)
@@ -38,8 +38,6 @@ static u32 ast_get_vram_size(struct ast_device *ast)
        u8 jreg;
        u32 vram_size;
 
-       ast_open_key(ast);
-
        vram_size = AST_VIDMEM_DEFAULT_SIZE;
        jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
        switch (jreg & 3) {
index b3c670af6ef2bc309051457d292f6919200e3f5c..32f04ec6c386fa31f015728deb3eac0783585bc6 100644 (file)
@@ -342,7 +342,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
        u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
        u16 temp, precache = 0;
 
-       if ((ast->chip == AST2500 || ast->chip == AST2600) &&
+       if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
            (vbios_mode->enh_table->flags & AST2500PreCatchCRT))
                precache = 40;
 
@@ -384,7 +384,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
        ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
 
        // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
-       if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080))
+       if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
                ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
        else
                ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
@@ -466,7 +466,7 @@ static void ast_set_dclk_reg(struct ast_device *ast,
 {
        const struct ast_vbios_dclk_info *clk_info;
 
-       if ((ast->chip == AST2500) || (ast->chip == AST2600))
+       if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
                clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
        else
                clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
@@ -510,17 +510,13 @@ static void ast_set_color_reg(struct ast_device *ast,
 static void ast_set_crtthd_reg(struct ast_device *ast)
 {
        /* Set Threshold */
-       if (ast->chip == AST2600) {
+       if (IS_AST_GEN7(ast)) {
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
-       } else if (ast->chip == AST2300 || ast->chip == AST2400 ||
-           ast->chip == AST2500) {
+       } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
-       } else if (ast->chip == AST2100 ||
-                  ast->chip == AST1100 ||
-                  ast->chip == AST2200 ||
-                  ast->chip == AST2150) {
+       } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
        } else {
@@ -1082,9 +1078,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
                if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
                        return MODE_OK;
 
-               if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
-                   (ast->chip == AST2300) || (ast->chip == AST2400) ||
-                   (ast->chip == AST2500) || (ast->chip == AST2600)) {
+               if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
+                   (ast->chip == AST2200) || // GEN3, but not AST2150 (?)
+                   IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
+                   IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
                        if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
                                return MODE_OK;
 
@@ -1585,8 +1582,20 @@ err_drm_connector_update_edid_property:
        return 0;
 }
 
+static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
+                                                struct drm_modeset_acquire_ctx *ctx,
+                                                bool force)
+{
+       struct ast_device *ast = to_ast_device(connector->dev);
+
+       if (ast_dp501_is_connected(ast))
+               return connector_status_connected;
+       return connector_status_disconnected;
+}
+
 static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
        .get_modes = ast_dp501_connector_helper_get_modes,
+       .detect_ctx = ast_dp501_connector_helper_detect_ctx,
 };
 
 static const struct drm_connector_funcs ast_dp501_connector_funcs = {
@@ -1611,7 +1620,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 
        return 0;
 }
@@ -1683,8 +1692,20 @@ err_drm_connector_update_edid_property:
        return 0;
 }
 
+static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
+                                                struct drm_modeset_acquire_ctx *ctx,
+                                                bool force)
+{
+       struct ast_device *ast = to_ast_device(connector->dev);
+
+       if (ast_astdp_is_connected(ast))
+               return connector_status_connected;
+       return connector_status_disconnected;
+}
+
 static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
        .get_modes = ast_astdp_connector_helper_get_modes,
+       .detect_ctx = ast_astdp_connector_helper_detect_ctx,
 };
 
 static const struct drm_connector_funcs ast_astdp_connector_funcs = {
@@ -1709,7 +1730,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 
        return 0;
 }
@@ -1738,6 +1759,60 @@ static int ast_astdp_output_init(struct ast_device *ast)
        return 0;
 }
 
+/*
+ * BMC virtual Connector
+ */
+
+static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+{
+       return drm_add_modes_noedid(connector, 4096, 4096);
+}
+
+static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
+       .get_modes = ast_bmc_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+       .reset = drm_atomic_helper_connector_reset,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_bmc_output_init(struct ast_device *ast)
+{
+       struct drm_device *dev = &ast->base;
+       struct drm_crtc *crtc = &ast->crtc;
+       struct drm_encoder *encoder = &ast->output.bmc.encoder;
+       struct drm_connector *connector = &ast->output.bmc.connector;
+       int ret;
+
+       ret = drm_encoder_init(dev, encoder,
+                              &ast_bmc_encoder_funcs,
+                              DRM_MODE_ENCODER_VIRTUAL, "ast_bmc");
+       if (ret)
+               return ret;
+       encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+       ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
+                                DRM_MODE_CONNECTOR_VIRTUAL);
+       if (ret)
+               return ret;
+
+       drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
+
+       ret = drm_connector_attach_encoder(connector, encoder);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 /*
  * Mode config
  */
@@ -1800,12 +1875,12 @@ int ast_mode_config_init(struct ast_device *ast)
        dev->mode_config.min_height = 0;
        dev->mode_config.preferred_depth = 24;
 
-       if (ast->chip == AST2100 ||
-           ast->chip == AST2200 ||
-           ast->chip == AST2300 ||
-           ast->chip == AST2400 ||
-           ast->chip == AST2500 ||
-           ast->chip == AST2600) {
+       if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
+           ast->chip == AST2200 || // GEN3, but not AST2150 (?)
+           IS_AST_GEN7(ast) ||
+           IS_AST_GEN6(ast) ||
+           IS_AST_GEN5(ast) ||
+           IS_AST_GEN4(ast)) {
                dev->mode_config.max_width = 1920;
                dev->mode_config.max_height = 2048;
        } else {
@@ -1845,8 +1920,13 @@ int ast_mode_config_init(struct ast_device *ast)
                if (ret)
                        return ret;
        }
+       ret = ast_bmc_output_init(ast);
+       if (ret)
+               return ret;
 
        drm_mode_config_reset(dev);
 
+       drm_kms_helper_poll_init(dev);
+
        return 0;
 }
index a005aec18a0209a9bdb6e0dec252a750d3c99409..13e15173f2c5ba1be5bd27f1f8ff60bc05ab7ef3 100644 (file)
 static void ast_post_chip_2300(struct drm_device *dev);
 static void ast_post_chip_2500(struct drm_device *dev);
 
-void ast_enable_vga(struct drm_device *dev)
-{
-       struct ast_device *ast = to_ast_device(dev);
-
-       ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
-       ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
-}
-
-void ast_enable_mmio(struct drm_device *dev)
-{
-       struct ast_device *ast = to_ast_device(dev);
-
-       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
-}
-
-
-bool ast_is_vga_enabled(struct drm_device *dev)
-{
-       struct ast_device *ast = to_ast_device(dev);
-       u8 ch;
-
-       ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
-
-       return !!(ch & 0x01);
-}
-
 static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
 static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
 
 static void
 ast_set_def_ext_reg(struct drm_device *dev)
 {
        struct ast_device *ast = to_ast_device(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
        u8 i, index, reg;
        const u8 *ext_reg_info;
 
@@ -79,13 +51,9 @@ ast_set_def_ext_reg(struct drm_device *dev)
        for (i = 0x81; i <= 0x9f; i++)
                ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
 
-       if (ast->chip == AST2300 || ast->chip == AST2400 ||
-           ast->chip == AST2500) {
-               if (pdev->revision >= 0x20)
-                       ext_reg_info = extreginfo_ast2300;
-               else
-                       ext_reg_info = extreginfo_ast2300a0;
-       } else
+       if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
+               ext_reg_info = extreginfo_ast2300;
+       else
                ext_reg_info = extreginfo;
 
        index = 0xa0;
@@ -104,8 +72,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
 
        /* Enable RAMDAC for A1 */
        reg = 0x04;
-       if (ast->chip == AST2300 || ast->chip == AST2400 ||
-           ast->chip == AST2500)
+       if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
                reg |= 0x20;
        ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
 }
@@ -281,7 +248,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
        j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
 
        if ((j & 0x80) == 0) { /* VGA only */
-               if (ast->chip == AST2000) {
+               if (IS_AST_GEN1(ast)) {
                        dram_reg_info = ast2000_dram_table_data;
                        ast_write32(ast, 0xf004, 0x1e6e0000);
                        ast_write32(ast, 0xf000, 0x1);
@@ -290,8 +257,8 @@ static void ast_init_dram_reg(struct drm_device *dev)
                        do {
                                ;
                        } while (ast_read32(ast, 0x10100) != 0xa8);
-               } else {/* AST2100/1100 */
-                       if (ast->chip == AST2100 || ast->chip == 2200)
+               } else { /* GEN2/GEN3 */
+                       if (ast->chip == AST2100 || ast->chip == AST2200)
                                dram_reg_info = ast2100_dram_table_data;
                        else
                                dram_reg_info = ast1100_dram_table_data;
@@ -313,7 +280,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
                        if (dram_reg_info->index == 0xff00) {/* delay fn */
                                for (i = 0; i < 15; i++)
                                        udelay(dram_reg_info->data);
-                       } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+                       } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
                                data = dram_reg_info->data;
                                if (ast->dram_type == AST_DRAM_1Gx16)
                                        data = 0x00000d89;
@@ -339,15 +306,13 @@ static void ast_init_dram_reg(struct drm_device *dev)
                                cbrdlli_ast2150(ast, 32); /* 32 bits */
                }
 
-               switch (ast->chip) {
-               case AST2000:
+               switch (AST_GEN(ast)) {
+               case 1:
                        temp = ast_read32(ast, 0x10140);
                        ast_write32(ast, 0x10140, temp | 0x40);
                        break;
-               case AST1100:
-               case AST2100:
-               case AST2200:
-               case AST2150:
+               case 2:
+               case 3:
                        temp = ast_read32(ast, 0x1200c);
                        ast_write32(ast, 0x1200c, temp & 0xfffffffd);
                        temp = ast_read32(ast, 0x12040);
@@ -367,25 +332,16 @@ static void ast_init_dram_reg(struct drm_device *dev)
 void ast_post_gpu(struct drm_device *dev)
 {
        struct ast_device *ast = to_ast_device(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
-       u32 reg;
-
-       pci_read_config_dword(pdev, 0x04, &reg);
-       reg |= 0x3;
-       pci_write_config_dword(pdev, 0x04, reg);
 
-       ast_enable_vga(dev);
-       ast_open_key(ast);
-       ast_enable_mmio(dev);
        ast_set_def_ext_reg(dev);
 
-       if (ast->chip == AST2600) {
+       if (IS_AST_GEN7(ast)) {
                if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
                        ast_dp_launch(dev);
        } else if (ast->config_mode == ast_use_p2a) {
-               if (ast->chip == AST2500)
+               if (IS_AST_GEN6(ast))
                        ast_post_chip_2500(dev);
-               else if (ast->chip == AST2300 || ast->chip == AST2400)
+               else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
                        ast_post_chip_2300(dev);
                else
                        ast_init_dram_reg(dev);
index 58184cd6ab0bf9dda07c384cd7ac7cd60bfc27de..cc5cf4c2faf795f91bb1fa6f609a5b322cb6ce96 100644 (file)
@@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
        struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
        struct regmap *regmap = crtc->dc->hlcdc->regmap;
        struct drm_display_mode *adj = &c->state->adjusted_mode;
+       struct drm_encoder *encoder = NULL, *en_iter;
+       struct drm_connector *connector = NULL;
        struct atmel_hlcdc_crtc_state *state;
+       struct drm_device *ddev = c->dev;
+       struct drm_connector_list_iter iter;
        unsigned long mode_rate;
        struct videomode vm;
        unsigned long prate;
@@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
        unsigned int cfg = 0;
        int div, ret;
 
+       /* get encoder from crtc */
+       drm_for_each_encoder(en_iter, ddev) {
+               if (en_iter->crtc == c) {
+                       encoder = en_iter;
+                       break;
+               }
+       }
+
+       if (encoder) {
+               /* Get the connector from encoder */
+               drm_connector_list_iter_begin(ddev, &iter);
+               drm_for_each_connector_iter(connector, &iter)
+                       if (connector->encoder == encoder)
+                               break;
+               drm_connector_list_iter_end(&iter);
+       }
+
        ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
        if (ret)
                return;
@@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
 
        cfg |= ATMEL_HLCDC_CLKDIV(div);
 
+       if (connector &&
+           connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+               cfg |= ATMEL_HLCDC_CLKPOL;
+
        regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
 
        state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
index 29603561d50122b9e9e74df4aaade16f050417ac..fa0f9a93d50d937e26a1be6d2e26b116655a505d 100644 (file)
@@ -773,15 +773,13 @@ err_put:
        return ret;
 }
 
-static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
+static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
 {
        struct drm_device *ddev = platform_get_drvdata(pdev);
 
        drm_dev_unregister(ddev);
        atmel_hlcdc_dc_unload(ddev);
        drm_dev_put(ddev);
-
-       return 0;
 }
 
 static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
@@ -826,7 +824,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
 
 static struct platform_driver atmel_hlcdc_dc_platform_driver = {
        .probe  = atmel_hlcdc_dc_drm_probe,
-       .remove = atmel_hlcdc_dc_drm_remove,
+       .remove_new = atmel_hlcdc_dc_drm_remove,
        .driver = {
                .name   = "atmel-hlcdc-display-controller",
                .pm     = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
index 82c68b04244436ec878331beae5de0d54ed63bfb..44a660a4bdbfc4179807a2d666810ef8fbf5b7bd 100644 (file)
@@ -74,19 +74,19 @@ config DRM_FSL_LDB
          Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
 
 config DRM_ITE_IT6505
-        tristate "ITE IT6505 DisplayPort bridge"
-        depends on OF
+       tristate "ITE IT6505 DisplayPort bridge"
+       depends on OF
        select DRM_DISPLAY_DP_HELPER
        select DRM_DISPLAY_HDCP_HELPER
        select DRM_DISPLAY_HELPER
-        select DRM_DP_AUX_BUS
-        select DRM_KMS_HELPER
-        select DRM_DP_HELPER
-        select EXTCON
-        select CRYPTO
-        select CRYPTO_HASH
-        help
-          ITE IT6505 DisplayPort bridge chip driver.
+       select DRM_DP_AUX_BUS
+       select DRM_KMS_HELPER
+       select DRM_DP_HELPER
+       select EXTCON
+       select CRYPTO
+       select CRYPTO_HASH
+       help
+         ITE IT6505 DisplayPort bridge chip driver.
 
 config DRM_LONTIUM_LT8912B
        tristate "Lontium LT8912B DSI/HDMI bridge"
index 99964f5a5457b7f402ee27f325a43b1cd267da6a..2a6b91f752cb620d44f4e7bb51655523c0a57138 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/device.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
 
index 2254457ab5d0234956b3d6bc59eed230cbc8a71a..2611afd2c1c13662beb06cdce1d7937132c5eb85 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 
 #include <media/cec.h>
@@ -786,8 +786,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
        else
                low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
 
-       regmap_update_bits(adv7511->regmap, 0xfb,
-               0x6, low_refresh_rate << 1);
+       if (adv7511->type == ADV7511)
+               regmap_update_bits(adv7511->regmap, 0xfb,
+                                  0x6, low_refresh_rate << 1);
+       else
+               regmap_update_bits(adv7511->regmap, 0x4a,
+                                  0xc, low_refresh_rate << 2);
+
        regmap_update_bits(adv7511->regmap, 0x17,
                0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
 
index 72ab2ab7708193ce732721d137fd293b2d528a8c..c9e35731e6a1afe08d81a2b6fa3ce5ee28098ac1 100644 (file)
@@ -813,7 +813,7 @@ MODULE_DEVICE_TABLE(of, anx6345_match_table);
 static struct i2c_driver anx6345_driver = {
        .driver = {
                   .name = "anx6345",
-                  .of_match_table = of_match_ptr(anx6345_match_table),
+                  .of_match_table = anx6345_match_table,
                  },
        .probe = anx6345_i2c_probe,
        .remove = anx6345_i2c_remove,
index 06a3e3243e19ef8f3246763f5be19656f4459f9c..800555aef97fb9af093006a222ef5e9db6d056b3 100644 (file)
@@ -1373,7 +1373,6 @@ static const struct i2c_device_id anx78xx_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, anx78xx_id);
 
-#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id anx78xx_match_table[] = {
        { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
        { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
@@ -1382,12 +1381,11 @@ static const struct of_device_id anx78xx_match_table[] = {
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, anx78xx_match_table);
-#endif
 
 static struct i2c_driver anx78xx_driver = {
        .driver = {
                   .name = "anx7814",
-                  .of_match_table = of_match_ptr(anx78xx_match_table),
+                  .of_match_table = anx78xx_match_table,
                  },
        .probe = anx78xx_i2c_probe,
        .remove = anx78xx_i2c_remove,
index 8b985efdc086bed06313c0e613dce6ccf2f0f4f9..51abe42c639e5482a9c870a52461c3f931d42de3 100644 (file)
@@ -206,7 +206,7 @@ static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
 
 static int wait_aux_op_finish(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int val;
        int ret;
 
@@ -233,7 +233,7 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
 static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address,
                             u8 len, u8 *buf)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
        u8 addrh, addrm, addrl;
        u8 cmd;
@@ -426,7 +426,7 @@ static int anx7625_odfc_config(struct anx7625_data *ctx,
                               u8 post_divider)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Config input reference clock frequency 27MHz/19.2MHz */
        ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
@@ -476,7 +476,7 @@ static int anx7625_set_k_value(struct anx7625_data *ctx)
 
 static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        unsigned long m, n;
        u16 htotal;
        int ret;
@@ -574,7 +574,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
 static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx)
 {
        int val;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Swap MIPI-DSI data lane 3 P and N */
        val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP);
@@ -591,7 +591,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx)
 
 {
        int val, ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Swap MIPI-DSI data lane 3 P and N */
        ret = anx7625_swap_dsi_lane3(ctx);
@@ -656,7 +656,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx)
 
 static int anx7625_dsi_config(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n");
@@ -688,7 +688,7 @@ static int anx7625_dsi_config(struct anx7625_data *ctx)
 
 static int anx7625_api_dpi_config(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        u16 freq = ctx->dt.pixelclock.min / 1000;
        int ret;
 
@@ -719,7 +719,7 @@ static int anx7625_api_dpi_config(struct anx7625_data *ctx)
 
 static int anx7625_dpi_config(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n");
@@ -764,7 +764,7 @@ static int anx7625_read_flash_status(struct anx7625_data *ctx)
 static int anx7625_hdcp_key_probe(struct anx7625_data *ctx)
 {
        int ret, val;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        u8 ident[FLASH_BUF_LEN];
 
        ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -814,7 +814,7 @@ static int anx7625_hdcp_key_probe(struct anx7625_data *ctx)
 static int anx7625_hdcp_key_load(struct anx7625_data *ctx)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Select HDCP 1.4 KEY */
        ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -842,7 +842,7 @@ static int anx7625_hdcp_key_load(struct anx7625_data *ctx)
 static int anx7625_hdcp_disable(struct anx7625_data *ctx)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        dev_dbg(dev, "disable HDCP 1.4\n");
 
@@ -863,7 +863,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
 {
        u8 bcap;
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        ret = anx7625_hdcp_key_probe(ctx);
        if (ret) {
@@ -872,11 +872,11 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
        }
 
        /* Read downstream capability */
-       ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
+       ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap);
        if (ret < 0)
                return ret;
 
-       if (!(bcap & 0x01)) {
+       if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) {
                pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
                return 0;
        }
@@ -921,7 +921,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
 static void anx7625_dp_start(struct anx7625_data *ctx)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        u8 data;
 
        if (!ctx->display_timing_valid) {
@@ -931,8 +931,8 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
 
        dev_dbg(dev, "set downstream sink into normal\n");
        /* Downstream sink enter into normal mode */
-       data = 1;
-       ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
+       data = DP_SET_POWER_D0;
+       ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
        if (ret < 0)
                dev_err(dev, "IO error : set sink into normal mode fail\n");
 
@@ -954,7 +954,7 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
 
 static void anx7625_dp_stop(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
        u8 data;
 
@@ -971,8 +971,8 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
 
        dev_dbg(dev, "notify downstream enter into standby\n");
        /* Downstream monitor enter into standby mode */
-       data = 2;
-       ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
+       data = DP_SET_POWER_D3;
+       ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
        if (ret < 0)
                DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
 
@@ -1019,7 +1019,7 @@ static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd)
 static int sp_tx_get_edid_block(struct anx7625_data *ctx)
 {
        int c = 0;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        sp_tx_aux_wr(ctx, 0x7e);
        sp_tx_aux_rd(ctx, 0x01);
@@ -1041,7 +1041,7 @@ static int edid_read(struct anx7625_data *ctx,
                     u8 offset, u8 *pblock_buf)
 {
        int ret, cnt;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
                sp_tx_aux_wr(ctx, offset);
@@ -1072,7 +1072,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
 {
        u8 cnt;
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Write address only */
        ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -1127,7 +1127,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
        u8 i, j;
        int g_edid_break = 0;
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Address initial */
        ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -1234,7 +1234,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
 
 static void anx7625_power_on(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret, i;
 
        if (!ctx->pdata.low_power_mode) {
@@ -1270,7 +1270,7 @@ reg_err:
 
 static void anx7625_power_standby(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        if (!ctx->pdata.low_power_mode) {
@@ -1300,7 +1300,7 @@ static void anx7625_config(struct anx7625_data *ctx)
 
 static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        /* Reset main ocm */
@@ -1320,7 +1320,7 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
 static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Check interface workable */
        ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
@@ -1366,7 +1366,7 @@ static void anx7625_power_on_init(struct anx7625_data *ctx)
 
 static void anx7625_init_gpio(struct anx7625_data *platform)
 {
-       struct device *dev = &platform->client->dev;
+       struct device *dev = platform->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n");
 
@@ -1406,7 +1406,7 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
 static void anx7625_start_dp_work(struct anx7625_data *ctx)
 {
        int ret;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        if (ctx->hpd_high_cnt >= 2) {
                DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n");
@@ -1458,7 +1458,7 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
                                unsigned long wait_us)
 {
        int ret, val;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* Interrupt mode, no need poll HPD status, just return */
        if (ctx->pdata.intp_irq)
@@ -1492,7 +1492,7 @@ static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux,
                                     unsigned long wait_us)
 {
        struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        pm_runtime_get_sync(dev);
@@ -1525,7 +1525,7 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
 
 static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        /* HPD changed */
        DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n",
@@ -1545,7 +1545,7 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
 static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
 {
        int intr_vector, status;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
                                   INTR_ALERT_1, 0xFF);
@@ -1593,18 +1593,20 @@ static void anx7625_work_func(struct work_struct *work)
 
        mutex_lock(&ctx->lock);
 
-       if (pm_runtime_suspended(&ctx->client->dev))
-               goto unlock;
+       if (pm_runtime_suspended(ctx->dev)) {
+               mutex_unlock(&ctx->lock);
+               return;
+       }
 
        event = anx7625_hpd_change_detect(ctx);
+
+       mutex_unlock(&ctx->lock);
+
        if (event < 0)
-               goto unlock;
+               return;
 
        if (ctx->bridge_attached)
                drm_helper_hpd_irq_event(ctx->bridge.dev);
-
-unlock:
-       mutex_unlock(&ctx->lock);
 }
 
 static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
@@ -1735,7 +1737,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
                                    struct drm_dp_aux_msg *msg)
 {
        struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        u8 request = msg->request & ~DP_AUX_I2C_MOT;
        int ret = 0;
 
@@ -1761,7 +1763,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
 
 static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        struct s_edid_data *p_edid = &ctx->slimport_edid_p;
        int edid_num;
        u8 *edid;
@@ -1797,7 +1799,7 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
 
 static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
 
@@ -2006,7 +2008,7 @@ static const struct hdmi_codec_ops anx7625_codec_ops = {
 
 static void anx7625_unregister_audio(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        if (ctx->audio_pdev) {
                platform_device_unregister(ctx->audio_pdev);
@@ -2042,7 +2044,7 @@ static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx)
 static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
 {
        struct mipi_dsi_device *dsi;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        struct mipi_dsi_host *host;
        const struct mipi_dsi_device_info info = {
                .type = "anx7625",
@@ -2076,7 +2078,7 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
 
 static int anx7625_attach_dsi(struct anx7625_data *ctx)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int ret;
 
        DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
@@ -2102,7 +2104,7 @@ static void hdcp_check_work_func(struct work_struct *work)
 
        dwork = to_delayed_work(work);
        ctx = container_of(dwork, struct anx7625_data, hdcp_work);
-       dev = &ctx->client->dev;
+       dev = ctx->dev;
 
        if (!ctx->connector) {
                dev_err(dev, "HDCP connector is null!");
@@ -2129,7 +2131,7 @@ static void hdcp_check_work_func(struct work_struct *work)
 static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
                                          struct drm_connector_state *state)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        int cp;
 
        dev_dbg(dev, "hdcp state check\n");
@@ -2174,7 +2176,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
        int err;
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n");
        if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
@@ -2218,7 +2220,7 @@ anx7625_bridge_mode_valid(struct drm_bridge *bridge,
                          const struct drm_display_mode *mode)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n");
 
@@ -2239,7 +2241,7 @@ static void anx7625_bridge_mode_set(struct drm_bridge *bridge,
                                    const struct drm_display_mode *mode)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n");
 
@@ -2285,7 +2287,7 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
                                      struct drm_display_mode *adj)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        u32 hsync, hfp, hbp, hblanking;
        u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj;
        u32 vref, adj_clock;
@@ -2403,7 +2405,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
                                       struct drm_connector_state *conn_state)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        dev_dbg(dev, "drm bridge atomic check\n");
 
@@ -2417,7 +2419,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
                                         struct drm_bridge_state *state)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
        struct drm_connector *connector;
 
        dev_dbg(dev, "drm atomic enable\n");
@@ -2444,7 +2446,7 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
                                          struct drm_bridge_state *old)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        dev_dbg(dev, "drm atomic disable\n");
 
@@ -2458,7 +2460,7 @@ static enum drm_connector_status
 anx7625_bridge_detect(struct drm_bridge *bridge)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
 
@@ -2469,7 +2471,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
                                            struct drm_connector *connector)
 {
        struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
 
@@ -2494,7 +2496,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
 static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
                                              struct i2c_client *client)
 {
-       struct device *dev = &ctx->client->dev;
+       struct device *dev = ctx->dev;
 
        ctx->i2c.tx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter,
                                                          TX_P0_ADDR >> 1);
@@ -2629,7 +2631,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
 
        pdata = &platform->pdata;
 
-       platform->client = client;
+       platform->dev = &client->dev;
        i2c_set_clientdata(client, platform);
 
        pdata->supplies[0].supply = "vdd10";
index 14f33d6be289fca9f5495e4ee2b2d7ead837fcef..5af819611ebce8055c39664b236552fcc00fe0fb 100644 (file)
@@ -458,7 +458,7 @@ struct anx7625_data {
        int hdcp_cp;
        /* Lock for work queue */
        struct mutex lock;
-       struct i2c_client *client;
+       struct device *dev;
        struct anx7625_i2c_client i2c;
        struct i2c_client *last_client;
        struct timer_list hdcp_timer;
index f50d65f54314b2168776a6a536397f4e307a31d7..7457d38622b0c76928d1b6220bfe7ce76759bb98 100644 (file)
@@ -14,8 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
index f6822dfa380545c3118bb0ad01f0edcfe7d05d0f..6af565ac307ae3e2d3d0da221d96ae7adba89edb 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/phy/phy.h>
 #include <linux/phy/phy-dp.h>
 #include <linux/platform_device.h>
 #include "cdns-mhdp8546-hdcp.h"
 #include "cdns-mhdp8546-j721e.h"
 
+static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       /* Enable SW event interrupts */
+       if (mhdp->bridge_attached)
+               writel(readl(mhdp->regs + CDNS_APB_INT_MASK) &
+                      ~CDNS_APB_INT_MASK_SW_EVENT_INT,
+                      mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+       writel(readl(mhdp->regs + CDNS_APB_INT_MASK) |
+              CDNS_APB_INT_MASK_SW_EVENT_INT,
+              mhdp->regs + CDNS_APB_INT_MASK);
+}
+
 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
 {
        int ret, empty;
@@ -749,9 +768,7 @@ static int cdns_mhdp_fw_activate(const struct firmware *fw,
         * MHDP_HW_STOPPED happens only due to driver removal when
         * bridge should already be detached.
         */
-       if (mhdp->bridge_attached)
-               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
-                      mhdp->regs + CDNS_APB_INT_MASK);
+       cdns_mhdp_bridge_hpd_enable(&mhdp->bridge);
 
        spin_unlock(&mhdp->start_lock);
 
@@ -1740,8 +1757,7 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
 
        /* Enable SW event interrupts */
        if (hw_ready)
-               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
-                      mhdp->regs + CDNS_APB_INT_MASK);
+               cdns_mhdp_bridge_hpd_enable(bridge);
 
        return 0;
 aux_unregister:
@@ -2146,6 +2162,27 @@ cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
        return &cdns_mhdp_state->base;
 }
 
+static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge,
+                                        struct drm_bridge_state *bridge_state,
+                                        struct drm_crtc_state *crtc_state,
+                                        struct drm_connector_state *conn_state,
+                                        u32 output_fmt,
+                                        unsigned int *num_input_fmts)
+{
+       u32 *input_fmts;
+
+       *num_input_fmts = 0;
+
+       input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+       if (!input_fmts)
+               return NULL;
+
+       *num_input_fmts = 1;
+       input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36;
+
+       return input_fmts;
+}
+
 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
                                  struct drm_bridge_state *bridge_state,
                                  struct drm_crtc_state *crtc_state,
@@ -2165,6 +2202,13 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
                return -EINVAL;
        }
 
+       /*
+        * There might be flags negotiation supported in future.
+        * Set the bus flags in atomic_check statically for now.
+        */
+       if (mhdp->info)
+               bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
+
        mutex_unlock(&mhdp->link_mutex);
        return 0;
 }
@@ -2184,23 +2228,6 @@ static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
        return cdns_mhdp_get_edid(mhdp, connector);
 }
 
-static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
-{
-       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
-
-       /* Enable SW event interrupts */
-       if (mhdp->bridge_attached)
-               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
-                      mhdp->regs + CDNS_APB_INT_MASK);
-}
-
-static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
-{
-       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
-
-       writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
-}
-
 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
        .atomic_enable = cdns_mhdp_atomic_enable,
        .atomic_disable = cdns_mhdp_atomic_disable,
@@ -2210,6 +2237,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
        .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
        .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
        .atomic_reset = cdns_mhdp_bridge_atomic_reset,
+       .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
        .detect = cdns_mhdp_bridge_detect,
        .get_edid = cdns_mhdp_bridge_get_edid,
        .hpd_enable = cdns_mhdp_bridge_hpd_enable,
@@ -2529,8 +2557,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
        mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
                           DRM_BRIDGE_OP_HPD;
        mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-       if (mhdp->info)
-               mhdp->bridge.timings = mhdp->info->timings;
 
        ret = phy_init(mhdp->phy);
        if (ret) {
@@ -2617,7 +2643,7 @@ static const struct of_device_id mhdp_ids[] = {
 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
        { .compatible = "ti,j721e-mhdp8546",
          .data = &(const struct cdns_mhdp_platform_info) {
-                 .timings = &mhdp_ti_j721e_bridge_timings,
+                 .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags,
                  .ops = &mhdp_ti_j721e_ops,
          },
        },
@@ -2629,7 +2655,7 @@ MODULE_DEVICE_TABLE(of, mhdp_ids);
 static struct platform_driver mhdp_driver = {
        .driver = {
                .name           = "cdns-mhdp8546",
-               .of_match_table = of_match_ptr(mhdp_ids),
+               .of_match_table = mhdp_ids,
        },
        .probe  = cdns_mhdp_probe,
        .remove = cdns_mhdp_remove,
index bedddd510d179d371b0e66aa44e374ac19acb4e1..bad2fc0c7306607a98b84730233c0e984c90576f 100644 (file)
@@ -336,7 +336,7 @@ struct cdns_mhdp_bridge_state {
 };
 
 struct cdns_mhdp_platform_info {
-       const struct drm_bridge_timings *timings;
+       const u32 *input_bus_flags;
        const struct mhdp_platform_ops *ops;
 };
 
index dfe1b59514f74b6b54a5d328d51a37e4728e2c15..12d04be4e242ccda1bb00d8ac8c05661f79abf61 100644 (file)
@@ -71,8 +71,7 @@ const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
        .disable = cdns_mhdp_j721e_disable,
 };
 
-const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = {
-       .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
-                          DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
-                          DRM_BUS_FLAG_DE_HIGH,
-};
+const u32
+mhdp_ti_j721e_bridge_input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+                                      DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
+                                      DRM_BUS_FLAG_DE_HIGH;
index 97d20d115a24279a36845c45139835281fda80b8..5ddca07a4255aa754236dd1feb74c6e112ddfa39 100644 (file)
@@ -14,6 +14,6 @@
 struct mhdp_platform_ops;
 
 extern const struct mhdp_platform_ops mhdp_ti_j721e_ops;
-extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings;
+extern const u32 mhdp_ti_j721e_bridge_input_bus_flags;
 
 #endif /* !CDNS_MHDP8546_J721E_H */
index 8bfce21d6b90048b204483d4ccae4f9205401a8a..d205e755e524ae25f4222f32539c36229df38592 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
index a854eb84e39919d34e14759c3fdc1bde8abfcfae..483c28c7fc99b4520bf33277c6632af0df0aeede 100644 (file)
@@ -607,7 +607,7 @@ static struct i2c_driver ch7033_driver = {
        .remove = ch7033_remove,
        .driver = {
                .name = "ch7033",
-               .of_match_table = of_match_ptr(ch7033_dt_ids),
+               .of_match_table = ch7033_dt_ids,
        },
        .id_table = ch7033_ids,
 };
index f7f436cf96e0a34db23dff5c368bb1d306c7088e..08bd5695ddae0f59bdb104b588876a41110e7ff4 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
index b8e52156b07a940f9ad6314b431f51671c17e644..0e4bac7dd04ff199df759537d19b20d26e0dfb1a 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
index 386032a02599062f175f7e8f16263294dac5deb7..21471a9a28b22082fc38656c4f24d707ee7c19ae 100644 (file)
@@ -9,9 +9,9 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/phy/phy.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
index c806576b1e225d439e1468499523463a1d0970ed..7984da9c0a355246eecdb270c1acf7d349372da1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/phy/phy.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
index 504d51c42f798194423e08d43ce35f5df9b77334..6c2fcd8b87809f236100fe7b76ee9d944759fdd5 100644 (file)
@@ -404,7 +404,7 @@ struct debugfs_entries {
 struct it6505 {
        struct drm_dp_aux aux;
        struct drm_bridge bridge;
-       struct i2c_client *client;
+       struct device *dev;
        struct it6505_drm_dp_link link;
        struct it6505_platform_data pdata;
        /*
@@ -524,7 +524,7 @@ static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
 {
        unsigned int value;
        int err;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (!it6505->powered)
                return -ENODEV;
@@ -542,7 +542,7 @@ static int it6505_write(struct it6505 *it6505, unsigned int reg_addr,
                        unsigned int reg_val)
 {
        int err;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (!it6505->powered)
                return -ENODEV;
@@ -562,7 +562,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
                           unsigned int mask, unsigned int value)
 {
        int err;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (!it6505->powered)
                return -ENODEV;
@@ -580,7 +580,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
 static void it6505_debug_print(struct it6505 *it6505, unsigned int reg,
                               const char *prefix)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int val;
 
        if (!drm_debug_enabled(DRM_UT_DRIVER))
@@ -599,7 +599,7 @@ static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset)
 {
        u8 value;
        int ret;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value);
        if (ret < 0) {
@@ -613,7 +613,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset,
                             u8 datain)
 {
        int ret;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain);
        if (ret < 0) {
@@ -626,7 +626,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset,
 static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num)
 {
        int ret;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num);
 
@@ -643,7 +643,7 @@ static void it6505_dump(struct it6505 *it6505)
 {
        unsigned int i, j;
        u8 regs[16];
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        for (i = 0; i <= 0xff; i += 16) {
                for (j = 0; j < 16; j++)
@@ -682,7 +682,7 @@ static int it6505_read_word(struct it6505 *it6505, unsigned int reg)
 
 static void it6505_calc_video_info(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int hsync_pol, vsync_pol, interlaced;
        int htotal, hdes, hdew, hfph, hsyncw;
        int vtotal, vdes, vdew, vfph, vsyncw;
@@ -926,7 +926,7 @@ static int it6505_aux_wait(struct it6505 *it6505)
 {
        int status;
        unsigned long timeout;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
 
@@ -1141,7 +1141,7 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
                                 size_t len)
 {
        struct it6505 *it6505 = data;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        enum aux_cmd_reply reply;
        int offset, ret, aux_retry = 100;
 
@@ -1201,7 +1201,7 @@ static int it6505_send_video_infoframe(struct it6505 *it6505,
 {
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        int err;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
        if (err < 0) {
@@ -1231,7 +1231,7 @@ static void it6505_get_extcon_property(struct it6505 *it6505)
 {
        int err;
        union extcon_property_value property;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (it6505->extcon && !it6505->lane_swap_disabled) {
                err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP,
@@ -1382,7 +1382,7 @@ static void it6505_enable_audio_source(struct it6505 *it6505)
 
 static void it6505_enable_audio_infoframe(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F };
 
        DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x",
@@ -1411,7 +1411,7 @@ static void it6505_disable_audio(struct it6505 *it6505)
 
 static void it6505_enable_audio(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int regbe;
 
        DRM_DEV_DEBUG_DRIVER(dev, "start");
@@ -1446,7 +1446,7 @@ static bool it6505_use_step_train_check(struct it6505 *it6505)
 
 static void it6505_parse_link_capabilities(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        struct it6505_drm_dp_link *link = &it6505->link;
        int bcaps;
 
@@ -1557,7 +1557,7 @@ static void it6505_lane_count_setup(struct it6505 *it6505)
 
 static void it6505_link_training_setup(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (it6505->enable_enhanced_frame)
                it6505_set_bits(it6505, REG_DATA_MUTE_CTRL,
@@ -1708,7 +1708,7 @@ it6505_step_cr_train(struct it6505 *it6505,
                                        FORCE_CR_DONE);
                        return true;
                }
-               DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done");
+               DRM_DEV_DEBUG_DRIVER(it6505->dev, "cr not done");
 
                if (it6505_check_max_voltage_swing_reached(lane_level_config,
                                                           it6505->lane_count))
@@ -1785,7 +1785,7 @@ it6505_step_eq_train(struct it6505 *it6505,
                                        FORCE_EQ_DONE);
                        return true;
                }
-               DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done");
+               DRM_DEV_DEBUG_DRIVER(it6505->dev, "eq not done");
 
                for (i = 0; i < it6505->lane_count; i++) {
                        lane_voltage_pre_emphasis->voltage_swing[i] =
@@ -1820,7 +1820,7 @@ static bool it6505_link_start_step_train(struct it6505 *it6505)
                .pre_emphasis = { 0 },
        };
 
-       DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+       DRM_DEV_DEBUG_DRIVER(it6505->dev, "start");
        err = it6505_drm_dp_link_configure(it6505);
 
        if (err < 0)
@@ -1854,7 +1854,7 @@ static void it6505_reset_hdcp(struct it6505 *it6505)
 
 static void it6505_start_hdcp(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "start");
        it6505_reset_hdcp(it6505);
@@ -1882,7 +1882,7 @@ static bool it6505_hdcp_is_ksv_valid(u8 *ksv)
 
 static void it6505_hdcp_part1_auth(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        u8 hdcp_bcaps;
 
        it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00);
@@ -1923,7 +1923,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
        struct shash_desc *desc;
        struct crypto_shash *tfm;
        int err;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        tfm = crypto_alloc_shash("sha1", 0, 0);
        if (IS_ERR(tfm)) {
@@ -1948,7 +1948,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
 
 static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        u8 binfo[2];
        int down_stream_count, i, err, msg_count = 0;
 
@@ -2012,7 +2012,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
 
 static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        u8 av[5][4], bv[5][4];
        int i, err;
 
@@ -2045,7 +2045,7 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
 {
        struct it6505 *it6505 = container_of(work, struct it6505,
                                             hdcp_wait_ksv_list);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        unsigned int timeout = 5000;
        u8 bstatus = 0;
        bool ksv_list_check;
@@ -2087,7 +2087,7 @@ static void it6505_hdcp_work(struct work_struct *work)
 {
        struct it6505 *it6505 = container_of(work, struct it6505,
                                             hdcp_work.work);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int ret;
        u8 link_status[DP_LINK_STATUS_SIZE] = { 0 };
 
@@ -2128,7 +2128,7 @@ static void it6505_hdcp_work(struct work_struct *work)
 
 static void it6505_show_hdcp_info(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int i;
        u8 *sha1 = it6505->sha1_input;
 
@@ -2162,7 +2162,7 @@ static void it6505_stop_link_train(struct it6505 *it6505)
 
 static void it6505_link_train_ok(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        it6505->link_state = LINK_OK;
        /* disalbe mute enable avi info frame */
@@ -2181,7 +2181,7 @@ static void it6505_link_train_ok(struct it6505 *it6505)
 
 static void it6505_link_step_train_process(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int ret, i, step_retry = 3;
 
        DRM_DEV_DEBUG_DRIVER(dev, "Start step train");
@@ -2219,7 +2219,7 @@ static void it6505_link_step_train_process(struct it6505 *it6505)
 static void it6505_link_training_work(struct work_struct *work)
 {
        struct it6505 *it6505 = container_of(work, struct it6505, link_works);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int ret;
 
        DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
@@ -2267,7 +2267,7 @@ static void it6505_remove_edid(struct it6505 *it6505)
 
 static int it6505_process_hpd_irq(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int ret, dpcd_sink_count, dp_irq_vector, bstatus;
        u8 link_status[DP_LINK_STATUS_SIZE];
 
@@ -2331,7 +2331,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
 
 static void it6505_irq_hpd(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int dp_sink_count;
 
        it6505->hpd_state = it6505_get_sink_hpd_status(it6505);
@@ -2393,7 +2393,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
 
 static void it6505_irq_hpd_irq(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt");
 
@@ -2403,7 +2403,7 @@ static void it6505_irq_hpd_irq(struct it6505 *it6505)
 
 static void it6505_irq_scdt(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        bool data;
 
        data = it6505_get_video_status(it6505);
@@ -2418,7 +2418,7 @@ static void it6505_irq_scdt(struct it6505 *it6505)
 
 static void it6505_irq_hdcp_done(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt");
        it6505->hdcp_status = HDCP_AUTH_DONE;
@@ -2427,7 +2427,7 @@ static void it6505_irq_hdcp_done(struct it6505 *it6505)
 
 static void it6505_irq_hdcp_fail(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt");
        it6505->hdcp_status = HDCP_AUTH_IDLE;
@@ -2437,14 +2437,14 @@ static void it6505_irq_hdcp_fail(struct it6505 *it6505)
 
 static void it6505_irq_aux_cmd_fail(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt");
 }
 
 static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
        schedule_work(&it6505->hdcp_wait_ksv_list);
@@ -2452,7 +2452,7 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
 
 static void it6505_irq_audio_fifo_error(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt");
 
@@ -2462,7 +2462,7 @@ static void it6505_irq_audio_fifo_error(struct it6505 *it6505)
 
 static void it6505_irq_link_train_fail(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt");
        schedule_work(&it6505->link_works);
@@ -2470,7 +2470,7 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505)
 
 static void it6505_irq_video_fifo_error(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt");
        it6505->auto_train_retry = AUTO_TRAIN_RETRY;
@@ -2481,7 +2481,7 @@ static void it6505_irq_video_fifo_error(struct it6505 *it6505)
 
 static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt");
        it6505->auto_train_retry = AUTO_TRAIN_RETRY;
@@ -2498,7 +2498,7 @@ static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
 static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
 {
        struct it6505 *it6505 = data;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        static const struct {
                int bit;
                void (*handler)(struct it6505 *it6505);
@@ -2550,7 +2550,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
 
 static int it6505_poweron(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        struct it6505_platform_data *pdata = &it6505->pdata;
        int err;
 
@@ -2599,7 +2599,7 @@ static int it6505_poweron(struct it6505 *it6505)
 
 static int it6505_poweroff(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        struct it6505_platform_data *pdata = &it6505->pdata;
        int err;
 
@@ -2633,7 +2633,7 @@ static int it6505_poweroff(struct it6505 *it6505)
 
 static enum drm_connector_status it6505_detect(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        enum drm_connector_status status = connector_status_disconnected;
        int dp_sink_count;
 
@@ -2694,7 +2694,7 @@ static int it6505_extcon_notifier(struct notifier_block *self,
 static void it6505_extcon_work(struct work_struct *work)
 {
        struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int state, ret;
 
        if (it6505->enable_drv_hold)
@@ -2739,11 +2739,11 @@ unlock:
 static int it6505_use_notifier_module(struct it6505 *it6505)
 {
        int ret;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        it6505->event_nb.notifier_call = it6505_extcon_notifier;
        INIT_WORK(&it6505->extcon_wq, it6505_extcon_work);
-       ret = devm_extcon_register_notifier(&it6505->client->dev,
+       ret = devm_extcon_register_notifier(it6505->dev,
                                            it6505->extcon, EXTCON_DISP_DP,
                                            &it6505->event_nb);
        if (ret) {
@@ -2759,7 +2759,7 @@ static int it6505_use_notifier_module(struct it6505 *it6505)
 static void it6505_remove_notifier_module(struct it6505 *it6505)
 {
        if (it6505->extcon) {
-               devm_extcon_unregister_notifier(&it6505->client->dev,
+               devm_extcon_unregister_notifier(it6505->dev,
                                                it6505->extcon, EXTCON_DISP_DP,
                                                &it6505->event_nb);
 
@@ -2772,7 +2772,7 @@ static void __maybe_unused it6505_delayed_audio(struct work_struct *work)
        struct it6505 *it6505 = container_of(work, struct it6505,
                                             delayed_audio.work);
 
-       DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+       DRM_DEV_DEBUG_DRIVER(it6505->dev, "start");
 
        if (!it6505->powered)
                return;
@@ -2785,7 +2785,7 @@ static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505,
                                                       struct hdmi_codec_params
                                                       *params)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int i = 0;
 
        DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__,
@@ -2869,7 +2869,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
                                enum drm_bridge_attach_flags flags)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        int ret;
 
        if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
@@ -2933,7 +2933,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
                                        struct drm_bridge_state *old_state)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        struct drm_atomic_state *state = old_state->base.state;
        struct hdmi_avi_infoframe frame;
        struct drm_crtc_state *crtc_state;
@@ -2989,7 +2989,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
                                         struct drm_bridge_state *old_state)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "start");
 
@@ -3004,7 +3004,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
                                            struct drm_bridge_state *old_state)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "start");
 
@@ -3015,7 +3015,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
                                              struct drm_bridge_state *old_state)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        DRM_DEV_DEBUG_DRIVER(dev, "start");
 
@@ -3034,7 +3034,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
                                           struct drm_connector *connector)
 {
        struct it6505 *it6505 = bridge_to_it6505(bridge);
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        if (!it6505->cached_edid) {
                it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
@@ -3086,7 +3086,7 @@ static const struct dev_pm_ops it6505_bridge_pm_ops = {
 static int it6505_init_pdata(struct it6505 *it6505)
 {
        struct it6505_platform_data *pdata = &it6505->pdata;
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        /* 1.0V digital core power regulator  */
        pdata->pwr18 = devm_regulator_get(dev, "pwr18");
@@ -3128,7 +3128,7 @@ static int it6505_get_data_lanes_count(const struct device_node *endpoint,
 
 static void it6505_parse_dt(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
        struct device_node *np = dev->of_node, *ep = NULL;
        int len;
        u64 link_frequencies;
@@ -3333,7 +3333,7 @@ static void debugfs_create_files(struct it6505 *it6505)
 
 static void debugfs_init(struct it6505 *it6505)
 {
-       struct device *dev = &it6505->client->dev;
+       struct device *dev = it6505->dev;
 
        it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
 
@@ -3375,7 +3375,7 @@ static int it6505_i2c_probe(struct i2c_client *client)
 
        it6505->bridge.of_node = client->dev.of_node;
        it6505->connector_status = connector_status_disconnected;
-       it6505->client = client;
+       it6505->dev = &client->dev;
        i2c_set_clientdata(client, it6505);
 
        /* get extcon device from DTS */
index aa8d47e7f40da71aa6d3d62bb00b20adda9459ae..4d404f5ef87ebb1bb9ec0ba0b09ab452b081474d 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/i2c.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
index 2a57e804ea020c4df29780eb1a95d1dfb9ea1932..22c84d29c2bc580bfa8089ddb59617947beed319 100644 (file)
@@ -28,6 +28,8 @@
 #define EDID_BLOCK_SIZE        128
 #define EDID_NUM_BLOCKS        2
 
+#define FW_FILE "lt9611uxc_fw.bin"
+
 struct lt9611uxc {
        struct device *dev;
        struct drm_bridge bridge;
@@ -754,7 +756,7 @@ static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc)
                REG_SEQ0(0x805a, 0x00),
        };
 
-       ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev);
+       ret = request_firmware(&fw, FW_FILE, lt9611uxc->dev);
        if (ret < 0)
                return ret;
 
@@ -1019,3 +1021,5 @@ module_i2c_driver(lt9611uxc_driver);
 
 MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
 MODULE_LICENSE("GPL v2");
+
+MODULE_FIRMWARE(FW_FILE);
index 67368f23d4aa346825ea1e5fc72109ef78ecef78..8c5668dca0c4bc1b84fce4ed2f525db56e0f5952 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
index 4a5f5c4f5dccb91671c20a9ec6404daf3d9e49fa..8d54091ec66e4ef66d5d8a262f0c5aecd5676898 100644 (file)
@@ -16,8 +16,8 @@
 #include <linux/module.h>
 #include <linux/mux/consumer.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
 #include <linux/phy/phy.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 #include <linux/sys_soc.h>
index c9b6cb7678e332024122b8e05b32a3deedd18dd8..ae3ab9262ef1f184873929c7892d648a32c7cf37 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/pm.h>
 #include <linux/regulator/consumer.h>
 
index 8801cdd033b5e40e727274f0d581780121107976..8161b1a1a4b12fa50b0848deef27e7c7dace209a 100644 (file)
@@ -105,7 +105,6 @@ struct ps8640 {
        struct gpio_desc *gpio_reset;
        struct gpio_desc *gpio_powerdown;
        struct device_link *link;
-       struct edid *edid;
        bool pre_enabled;
        bool need_post_hpd_delay;
 };
@@ -155,23 +154,6 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
        return container_of(aux, struct ps8640, aux);
 }
 
-static bool ps8640_of_panel_on_aux_bus(struct device *dev)
-{
-       struct device_node *bus, *panel;
-
-       bus = of_get_child_by_name(dev->of_node, "aux-bus");
-       if (!bus)
-               return false;
-
-       panel = of_get_child_by_name(bus, "panel");
-       of_node_put(bus);
-       if (!panel)
-               return false;
-       of_node_put(panel);
-
-       return true;
-}
-
 static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wait_us)
 {
        struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -539,50 +521,6 @@ static void ps8640_bridge_detach(struct drm_bridge *bridge)
                device_link_del(ps_bridge->link);
 }
 
-static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
-                                          struct drm_connector *connector)
-{
-       struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
-       struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
-       bool poweroff = !ps_bridge->pre_enabled;
-
-       if (!ps_bridge->edid) {
-               /*
-                * When we end calling get_edid() triggered by an ioctl, i.e
-                *
-                *   drm_mode_getconnector (ioctl)
-                *     -> drm_helper_probe_single_connector_modes
-                *        -> drm_bridge_connector_get_modes
-                *           -> ps8640_bridge_get_edid
-                *
-                * We need to make sure that what we need is enabled before
-                * reading EDID, for this chip, we need to do a full poweron,
-                * otherwise it will fail.
-                */
-               if (poweroff)
-                       drm_atomic_bridge_chain_pre_enable(bridge,
-                                                          connector->state->state);
-
-               ps_bridge->edid = drm_get_edid(connector,
-                                              ps_bridge->page[PAGE0_DP_CNTL]->adapter);
-
-               /*
-                * If we call the get_edid() function without having enabled the
-                * chip before, return the chip to its original power state.
-                */
-               if (poweroff)
-                       drm_atomic_bridge_chain_post_disable(bridge,
-                                                            connector->state->state);
-       }
-
-       if (!ps_bridge->edid) {
-               dev_err(dev, "Failed to get EDID\n");
-               return NULL;
-       }
-
-       return drm_edid_duplicate(ps_bridge->edid);
-}
-
 static void ps8640_runtime_disable(void *data)
 {
        pm_runtime_dont_use_autosuspend(data);
@@ -592,7 +530,6 @@ static void ps8640_runtime_disable(void *data)
 static const struct drm_bridge_funcs ps8640_bridge_funcs = {
        .attach = ps8640_bridge_attach,
        .detach = ps8640_bridge_detach,
-       .get_edid = ps8640_bridge_get_edid,
        .atomic_post_disable = ps8640_atomic_post_disable,
        .atomic_pre_enable = ps8640_atomic_pre_enable,
        .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -705,14 +642,6 @@ static int ps8640_probe(struct i2c_client *client)
        ps_bridge->bridge.of_node = dev->of_node;
        ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
 
-       /*
-        * In the device tree, if panel is listed under aux-bus of the bridge
-        * node, panel driver should be able to retrieve EDID by itself using
-        * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here.
-        */
-       if (!ps8640_of_panel_on_aux_bus(&client->dev))
-               ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
-
        /*
         * Get MIPI DSI resources early. These can return -EPROBE_DEFER so
         * we want to get them out of the way sooner.
@@ -777,13 +706,6 @@ static int ps8640_probe(struct i2c_client *client)
        return ret;
 }
 
-static void ps8640_remove(struct i2c_client *client)
-{
-       struct ps8640 *ps_bridge = i2c_get_clientdata(client);
-
-       kfree(ps_bridge->edid);
-}
-
 static const struct of_device_id ps8640_match[] = {
        { .compatible = "parade,ps8640" },
        { }
@@ -792,7 +714,6 @@ MODULE_DEVICE_TABLE(of, ps8640_match);
 
 static struct i2c_driver ps8640_driver = {
        .probe = ps8640_probe,
-       .remove = ps8640_remove,
        .driver = {
                .name = "ps8640",
                .of_match_table = ps8640_match,
index 043b8109e64aa9606fab6fe1acc04d7cd7af83e2..c49091691ab1c23666a41e2df7d2262e311cc4b0 100644 (file)
@@ -16,8 +16,9 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/media-bus-format.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/phy/phy.h>
+#include <linux/platform_device.h>
 
 #include <video/mipi_display.h>
 
@@ -1009,7 +1010,7 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
        do {
                u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
 
-               if (!(reg & DSIM_SFR_HEADER_FULL))
+               if (reg & DSIM_SFR_HEADER_EMPTY)
                        return 0;
 
                if (!cond_resched())
index aac239729a1d0ece4d201f50f4fa51156d8fb943..2bdc5b439bebd56407af3b5b04892b3ac90678d4 100644 (file)
@@ -473,6 +473,41 @@ static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
        return sii902x_get_edid(sii902x, connector);
 }
 
+static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+                                                    struct drm_bridge_state *bridge_state,
+                                                    struct drm_crtc_state *crtc_state,
+                                                    struct drm_connector_state *conn_state,
+                                                    u32 output_fmt,
+                                                    unsigned int *num_input_fmts)
+{
+       u32 *input_fmts;
+
+       *num_input_fmts = 0;
+
+       input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+       if (!input_fmts)
+               return NULL;
+
+       input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+       *num_input_fmts = 1;
+
+       return input_fmts;
+}
+
+static int sii902x_bridge_atomic_check(struct drm_bridge *bridge,
+                                      struct drm_bridge_state *bridge_state,
+                                      struct drm_crtc_state *crtc_state,
+                                      struct drm_connector_state *conn_state)
+{
+       /*
+        * There might be flags negotiation supported in future but
+        * set the bus flags in atomic_check statically for now.
+        */
+       bridge_state->input_bus_cfg.flags = bridge->timings->input_bus_flags;
+
+       return 0;
+}
+
 static const struct drm_bridge_funcs sii902x_bridge_funcs = {
        .attach = sii902x_bridge_attach,
        .mode_set = sii902x_bridge_mode_set,
@@ -480,6 +515,11 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = {
        .enable = sii902x_bridge_enable,
        .detect = sii902x_bridge_detect,
        .get_edid = sii902x_bridge_get_edid,
+       .atomic_reset = drm_atomic_helper_bridge_reset,
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+       .atomic_get_input_bus_fmts = sii902x_bridge_atomic_get_input_bus_fmts,
+       .atomic_check = sii902x_bridge_atomic_check,
 };
 
 static int sii902x_mute(struct sii902x *sii902x, bool mute)
index 79b09ccd13533f69d6f2f8544d27977b5963f279..599164e3877dbd59d629b19b3756885f669fd20e 100644 (file)
@@ -2376,7 +2376,7 @@ MODULE_DEVICE_TABLE(i2c, sii8620_id);
 static struct i2c_driver sii8620_driver = {
        .driver = {
                .name   = "sii8620",
-               .of_match_table = of_match_ptr(sii8620_dt_match),
+               .of_match_table = sii8620_dt_match,
        },
        .probe          = sii8620_probe,
        .remove         = sii8620_remove,
index d85d9ee463b818488fbec00634675663623ee656..cbe8e778d7c7a9552d03677c117636ffb7774b1e 100644 (file)
@@ -8,8 +8,9 @@
 
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_atomic_helper.h>
index 9389ce526eb132d68e6b5096e7e4143983d1d97c..be21c11de1f2a135c8cce534e9cea85b4af3b613 100644 (file)
@@ -62,6 +62,10 @@ struct dw_hdmi_cec {
        bool rx_done;
        struct cec_notifier *notify;
        int irq;
+
+       u8 regs_polarity;
+       u8 regs_mask;
+       u8 regs_mute_stat0;
 };
 
 static void dw_hdmi_write(struct dw_hdmi_cec *cec, u8 val, int offset)
@@ -304,11 +308,44 @@ static void dw_hdmi_cec_remove(struct platform_device *pdev)
        cec_unregister_adapter(cec->adap);
 }
 
+static int __maybe_unused dw_hdmi_cec_resume(struct device *dev)
+{
+       struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
+
+       /* Restore logical address */
+       dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L);
+       dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H);
+
+       /* Restore interrupt status/mask registers */
+       dw_hdmi_write(cec, cec->regs_polarity, HDMI_CEC_POLARITY);
+       dw_hdmi_write(cec, cec->regs_mask, HDMI_CEC_MASK);
+       dw_hdmi_write(cec, cec->regs_mute_stat0, HDMI_IH_MUTE_CEC_STAT0);
+
+       return 0;
+}
+
+static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev)
+{
+       struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
+
+       /* store interrupt status/mask registers */
+        cec->regs_polarity = dw_hdmi_read(cec, HDMI_CEC_POLARITY);
+        cec->regs_mask = dw_hdmi_read(cec, HDMI_CEC_MASK);
+        cec->regs_mute_stat0 = dw_hdmi_read(cec, HDMI_IH_MUTE_CEC_STAT0);
+
+       return 0;
+}
+
+static const struct dev_pm_ops dw_hdmi_cec_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume)
+};
+
 static struct platform_driver dw_hdmi_cec_driver = {
        .probe  = dw_hdmi_cec_probe,
        .remove_new = dw_hdmi_cec_remove,
        .driver = {
                .name = "dw-hdmi-cec",
+               .pm = &dw_hdmi_cec_pm,
        },
 };
 module_platform_driver(dw_hdmi_cec_driver);
index 9d6dcaf317a1791c384b68f8ad11d36f9293f417..6c1d7947450545916befc09ce2fc3127d9654c99 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/regmap.h>
 #include <linux/dma-mapping.h>
 
 #define HDMI14_MAX_TMDSCLK     340000000
 
-enum hdmi_datamap {
-       RGB444_8B = 0x01,
-       RGB444_10B = 0x03,
-       RGB444_12B = 0x05,
-       RGB444_16B = 0x07,
-       YCbCr444_8B = 0x09,
-       YCbCr444_10B = 0x0B,
-       YCbCr444_12B = 0x0D,
-       YCbCr444_16B = 0x0F,
-       YCbCr422_8B = 0x16,
-       YCbCr422_10B = 0x14,
-       YCbCr422_12B = 0x12,
-};
-
 static const u16 csc_coeff_default[3][4] = {
        { 0x2000, 0x0000, 0x0000, 0x0000 },
        { 0x0000, 0x2000, 0x0000, 0x0000 },
@@ -856,10 +842,10 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
 
        if (pdata->enable_audio)
                pdata->enable_audio(hdmi,
-                                           hdmi->channels,
-                                           hdmi->sample_width,
-                                           hdmi->sample_rate,
-                                           hdmi->sample_non_pcm);
+                                   hdmi->channels,
+                                   hdmi->sample_width,
+                                   hdmi->sample_rate,
+                                   hdmi->sample_non_pcm);
 }
 
 static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
@@ -1426,9 +1412,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
        if (dw_hdmi_support_scdc(hdmi, display)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
-                       drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
                else
-                       drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
+                       drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
        }
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
@@ -2116,7 +2102,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                                min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
 
                        /* Enabled Scrambling in the Sink */
-                       drm_scdc_set_scrambling(&hdmi->connector, 1);
+                       drm_scdc_set_scrambling(hdmi->curr_conn, 1);
 
                        /*
                         * To activate the scrambler feature, you must ensure
@@ -2132,7 +2118,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                        hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
                        hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
                                    HDMI_MC_SWRSTZ);
-                       drm_scdc_set_scrambling(&hdmi->connector, 0);
+                       drm_scdc_set_scrambling(hdmi->curr_conn, 0);
                }
        }
 
@@ -2463,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
        enum drm_connector_status result;
 
        result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
-
-       mutex_lock(&hdmi->mutex);
-       if (result != hdmi->last_connector_result) {
-               dev_dbg(hdmi->dev, "read_hpd result: %d", result);
-               handle_plugged_change(hdmi,
-                                     result == connector_status_connected);
-               hdmi->last_connector_result = result;
-       }
-       mutex_unlock(&hdmi->mutex);
+       hdmi->last_connector_result = result;
 
        return result;
 }
@@ -2710,9 +2688,10 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
                /* Default 8bit fallback */
                output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
 
-               *num_output_fmts = i;
-
-               return output_fmts;
+               if (drm_mode_is_420_only(info, mode)) {
+                       *num_output_fmts = i;
+                       return output_fmts;
+               }
        }
 
        /*
@@ -2971,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
        hdmi->curr_conn = NULL;
        dw_hdmi_update_power(hdmi);
        dw_hdmi_update_phy_mask(hdmi);
+       handle_plugged_change(hdmi, false);
        mutex_unlock(&hdmi->mutex);
 }
 
@@ -2989,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
        hdmi->curr_conn = connector;
        dw_hdmi_update_power(hdmi);
        dw_hdmi_update_phy_mask(hdmi);
+       handle_plugged_change(hdmi, true);
        mutex_unlock(&hdmi->mutex);
 }
 
@@ -3346,6 +3327,12 @@ static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
        return 0;
 }
 
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi)
+{
+       return hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_bus_fmt_is_420);
+
 struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
                              const struct dw_hdmi_plat_data *plat_data)
 {
@@ -3553,6 +3540,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
        hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
                         | DRM_BRIDGE_OP_HPD;
        hdmi->bridge.interlace_allowed = true;
+       hdmi->bridge.ddc = hdmi->ddc;
 #ifdef CONFIG_OF
        hdmi->bridge.of_node = pdev->dev.of_node;
 #endif
index b2efecf7d1603c148ca033b6129cbe7cfe79002c..04d4a1a10698103cb88ab41f11f8f75b048ec039 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/debugfs.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
@@ -265,6 +265,7 @@ struct dw_mipi_dsi {
        struct dw_mipi_dsi *master; /* dual-dsi master ptr */
        struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
 
+       struct drm_display_mode mode;
        const struct dw_mipi_dsi_plat_data *plat_data;
 };
 
@@ -332,6 +333,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
        if (IS_ERR(bridge))
                return PTR_ERR(bridge);
 
+       bridge->pre_enable_prev_first = true;
        dsi->panel_bridge = bridge;
 
        drm_bridge_add(&dsi->bridge);
@@ -859,15 +861,6 @@ static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
         */
        dw_mipi_dsi_set_mode(dsi, 0);
 
-       /*
-        * TODO Only way found to call panel-bridge post_disable &
-        * panel unprepare before the dsi "final" disable...
-        * This needs to be fixed in the drm_bridge framework and the API
-        * needs to be updated to manage our own call chains...
-        */
-       if (dsi->panel_bridge->funcs->post_disable)
-               dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
-
        if (phy_ops->power_off)
                phy_ops->power_off(dsi->plat_data->priv_data);
 
@@ -942,15 +935,25 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
                phy_ops->power_on(dsi->plat_data->priv_data);
 }
 
+static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+                                                struct drm_bridge_state *old_bridge_state)
+{
+       struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+       /* Power up the dsi ctl into a command mode */
+       dw_mipi_dsi_mode_set(dsi, &dsi->mode);
+       if (dsi->slave)
+               dw_mipi_dsi_mode_set(dsi->slave, &dsi->mode);
+}
+
 static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
                                        const struct drm_display_mode *mode,
                                        const struct drm_display_mode *adjusted_mode)
 {
        struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 
-       dw_mipi_dsi_mode_set(dsi, adjusted_mode);
-       if (dsi->slave)
-               dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
+       /* Store the display mode for later use in pre_enable callback */
+       drm_mode_copy(&dsi->mode, adjusted_mode);
 }
 
 static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
@@ -1004,6 +1007,7 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
        .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
        .atomic_destroy_state   = drm_atomic_helper_bridge_destroy_state,
        .atomic_reset           = drm_atomic_helper_bridge_reset,
+       .atomic_pre_enable      = dw_mipi_dsi_bridge_atomic_pre_enable,
        .atomic_enable          = dw_mipi_dsi_bridge_atomic_enable,
        .atomic_post_disable    = dw_mipi_dsi_bridge_post_atomic_disable,
        .mode_set               = dw_mipi_dsi_bridge_mode_set,
index 5641395fd310e6f7c399e71bb2c752bd5664b80d..46198af9eebbf8cced7b9b4ad628126f144c25dd 100644 (file)
 #define DSI_LANEENABLE         0x0210 /* Enables each lane */
 #define DSI_RX_START           1
 
-/* LCDC/DPI Host Registers */
-#define LCDCTRL                        0x0420
+/* LCDC/DPI Host Registers, based on guesswork that this matches TC358764 */
+#define LCDCTRL                        0x0420 /* Video Path Control */
+#define LCDCTRL_MSF            BIT(0) /* Magic square in RGB666 */
+#define LCDCTRL_VTGEN          BIT(4)/* Use chip clock for timing */
+#define LCDCTRL_UNK6           BIT(6) /* Unknown */
+#define LCDCTRL_EVTMODE                BIT(5) /* Event mode */
+#define LCDCTRL_RGB888         BIT(8) /* RGB888 mode */
+#define LCDCTRL_HSPOL          BIT(17) /* Polarity of HSYNC signal */
+#define LCDCTRL_DEPOL          BIT(18) /* Polarity of DE signal */
+#define LCDCTRL_VSPOL          BIT(19) /* Polarity of VSYNC signal */
+#define LCDCTRL_VSDELAY(v)     (((v) & 0xfff) << 20) /* VSYNC delay */
 
 /* SPI Master Registers */
 #define SPICMR                 0x0450
@@ -65,6 +74,7 @@ struct tc358762 {
        struct regulator *regulator;
        struct drm_bridge *panel_bridge;
        struct gpio_desc *reset_gpio;
+       struct drm_display_mode mode;
        bool pre_enabled;
        int error;
 };
@@ -105,6 +115,8 @@ static inline struct tc358762 *bridge_to_tc358762(struct drm_bridge *bridge)
 
 static int tc358762_init(struct tc358762 *ctx)
 {
+       u32 lcdctrl;
+
        tc358762_write(ctx, DSI_LANEENABLE,
                       LANEENABLE_L0EN | LANEENABLE_CLEN);
        tc358762_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5);
@@ -114,7 +126,18 @@ static int tc358762_init(struct tc358762 *ctx)
        tc358762_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD);
 
        tc358762_write(ctx, SPICMR, 0x00);
-       tc358762_write(ctx, LCDCTRL, 0x00100150);
+
+       lcdctrl = LCDCTRL_VSDELAY(1) | LCDCTRL_RGB888 |
+                 LCDCTRL_UNK6 | LCDCTRL_VTGEN;
+
+       if (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC)
+               lcdctrl |= LCDCTRL_HSPOL;
+
+       if (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC)
+               lcdctrl |= LCDCTRL_VSPOL;
+
+       tc358762_write(ctx, LCDCTRL, lcdctrl);
+
        tc358762_write(ctx, SYSCTRL, 0x040f);
        msleep(100);
 
@@ -126,7 +149,7 @@ static int tc358762_init(struct tc358762 *ctx)
        return tc358762_clear_error(ctx);
 }
 
-static void tc358762_post_disable(struct drm_bridge *bridge)
+static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state)
 {
        struct tc358762 *ctx = bridge_to_tc358762(bridge);
        int ret;
@@ -148,7 +171,7 @@ static void tc358762_post_disable(struct drm_bridge *bridge)
                dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
 }
 
-static void tc358762_pre_enable(struct drm_bridge *bridge)
+static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
 {
        struct tc358762 *ctx = bridge_to_tc358762(bridge);
        int ret;
@@ -162,11 +185,17 @@ static void tc358762_pre_enable(struct drm_bridge *bridge)
                usleep_range(5000, 10000);
        }
 
+       ctx->pre_enabled = true;
+}
+
+static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+{
+       struct tc358762 *ctx = bridge_to_tc358762(bridge);
+       int ret;
+
        ret = tc358762_init(ctx);
        if (ret < 0)
                dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
-
-       ctx->pre_enabled = true;
 }
 
 static int tc358762_attach(struct drm_bridge *bridge,
@@ -178,10 +207,24 @@ static int tc358762_attach(struct drm_bridge *bridge,
                                 bridge, flags);
 }
 
+static void tc358762_bridge_mode_set(struct drm_bridge *bridge,
+                                    const struct drm_display_mode *mode,
+                                    const struct drm_display_mode *adj)
+{
+       struct tc358762 *ctx = bridge_to_tc358762(bridge);
+
+       drm_mode_copy(&ctx->mode, mode);
+}
+
 static const struct drm_bridge_funcs tc358762_bridge_funcs = {
-       .post_disable = tc358762_post_disable,
-       .pre_enable = tc358762_pre_enable,
+       .atomic_post_disable = tc358762_post_disable,
+       .atomic_pre_enable = tc358762_pre_enable,
+       .atomic_enable = tc358762_enable,
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+       .atomic_reset = drm_atomic_helper_bridge_reset,
        .attach = tc358762_attach,
+       .mode_set = tc358762_bridge_mode_set,
 };
 
 static int tc358762_parse_dt(struct tc358762 *ctx)
@@ -231,7 +274,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
        dsi->lanes = 1;
        dsi->format = MIPI_DSI_FMT_RGB888;
        dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
-                         MIPI_DSI_MODE_LPM;
+                         MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE;
 
        ret = tc358762_parse_dt(ctx);
        if (ret < 0)
index f85654f1b10450610cb1a3d8dac3b8f4acedae5a..deccb3995022b5565a22f24f8150c5f70605fffd 100644 (file)
 
 /* Video path registers */
 #define VP_CTRL                        0x0450 /* Video Path Control */
-#define VP_CTRL_MSF(v)         FLD_VAL(v, 0, 0) /* Magic square in RGB666 */
-#define VP_CTRL_VTGEN(v)       FLD_VAL(v, 4, 4) /* Use chip clock for timing */
-#define VP_CTRL_EVTMODE(v)     FLD_VAL(v, 5, 5) /* Event mode */
-#define VP_CTRL_RGB888(v)      FLD_VAL(v, 8, 8) /* RGB888 mode */
+#define VP_CTRL_MSF            BIT(0) /* Magic square in RGB666 */
+#define VP_CTRL_VTGEN          BIT(4) /* Use chip clock for timing */
+#define VP_CTRL_EVTMODE                BIT(5) /* Event mode */
+#define VP_CTRL_RGB888         BIT(8) /* RGB888 mode */
 #define VP_CTRL_VSDELAY(v)     FLD_VAL(v, 31, 20) /* VSYNC delay */
 #define VP_CTRL_HSPOL          BIT(17) /* Polarity of HSYNC signal */
 #define VP_CTRL_DEPOL          BIT(18) /* Polarity of DE signal */
@@ -176,7 +176,7 @@ static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
        if (ret >= 0)
                le32_to_cpus(val);
 
-       dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
+       dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val);
 }
 
 static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
@@ -233,8 +233,8 @@ static int tc358764_init(struct tc358764 *ctx)
        tc358764_write(ctx, DSI_STARTDSI, DSI_RX_START);
 
        /* configure video path */
-       tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888(1) |
-                      VP_CTRL_EVTMODE(1) | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
+       tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888 |
+                      VP_CTRL_EVTMODE | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
 
        /* reset PHY */
        tc358764_write(ctx, LV_PHY0, LV_PHY0_RST(1) |
index 65dc842e31f014554faefc1cbf680ef440d5a442..b45bffab7c817418e1ca707eab4f1005aa86503d 100644 (file)
@@ -500,8 +500,8 @@ static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl)
        if (ret)
                return ret;
 
-       /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
-       usleep_range(3000, 6000);
+       /* Wait for PLL to lock: up to 7.5 ms, depending on refclk */
+       usleep_range(15000, 20000);
 
        return 0;
 }
@@ -817,7 +817,7 @@ static int tc_set_common_video_mode(struct tc_data *tc,
         * sync signals
         */
        ret = regmap_write(tc->regmap, VPCTRL0,
-                          FIELD_PREP(VSDELAY, 0) |
+                          FIELD_PREP(VSDELAY, right_margin + 10) |
                           OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
        if (ret)
                return ret;
@@ -2215,13 +2215,6 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
        return -EINVAL;
 }
 
-static void tc_clk_disable(void *data)
-{
-       struct clk *refclk = data;
-
-       clk_disable_unprepare(refclk);
-}
-
 static int tc_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
@@ -2238,20 +2231,10 @@ static int tc_probe(struct i2c_client *client)
        if (ret)
                return ret;
 
-       tc->refclk = devm_clk_get(dev, "ref");
-       if (IS_ERR(tc->refclk)) {
-               ret = PTR_ERR(tc->refclk);
-               dev_err(dev, "Failed to get refclk: %d\n", ret);
-               return ret;
-       }
-
-       ret = clk_prepare_enable(tc->refclk);
-       if (ret)
-               return ret;
-
-       ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk);
-       if (ret)
-               return ret;
+       tc->refclk = devm_clk_get_enabled(dev, "ref");
+       if (IS_ERR(tc->refclk))
+               return dev_err_probe(dev, PTR_ERR(tc->refclk),
+                                    "Failed to get and enable the ref clk\n");
 
        /* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */
        usleep_range(10, 15);
index 7e9f4ec8e780ac9b6a5e5f97743398c3826ed79a..061e8bd5915de808a38a07f7e94a6e784c020dc0 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/i2c.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
index c499a14d0b980a12333d6ea809d198ec568c16f3..f448b903e19075676247fc72d30a1a9b1f501085 100644 (file)
  * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
  */
 struct ti_sn65dsi86 {
-       struct auxiliary_device         bridge_aux;
-       struct auxiliary_device         gpio_aux;
-       struct auxiliary_device         aux_aux;
-       struct auxiliary_device         pwm_aux;
+       struct auxiliary_device         *bridge_aux;
+       struct auxiliary_device         *gpio_aux;
+       struct auxiliary_device         *aux_aux;
+       struct auxiliary_device         *pwm_aux;
 
        struct device                   *dev;
        struct regmap                   *regmap;
@@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
        auxiliary_device_delete(data);
 }
 
-/*
- * AUX bus docs say that a non-NULL release is mandatory, but it makes no
- * sense for the model used here where all of the aux devices are allocated
- * in the single shared structure. We'll use this noop as a workaround.
- */
-static void ti_sn65dsi86_noop(struct device *dev) {}
+static void ti_sn65dsi86_aux_device_release(struct device *dev)
+{
+       struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+
+       kfree(aux);
+}
 
 static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
-                                      struct auxiliary_device *aux,
+                                      struct auxiliary_device **aux_out,
                                       const char *name)
 {
        struct device *dev = pdata->dev;
+       struct auxiliary_device *aux;
        int ret;
 
+       aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+       if (!aux)
+               return -ENOMEM;
+
        aux->name = name;
        aux->dev.parent = dev;
-       aux->dev.release = ti_sn65dsi86_noop;
+       aux->dev.release = ti_sn65dsi86_aux_device_release;
        device_set_of_node_from_dev(&aux->dev, dev);
        ret = auxiliary_device_init(aux);
-       if (ret)
+       if (ret) {
+               kfree(aux);
                return ret;
+       }
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
        if (ret)
                return ret;
@@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
        if (ret)
                return ret;
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
+       if (!ret)
+               *aux_out = aux;
 
        return ret;
 }
index c06390da9ffd03f392f2be30b8070564a4f478cd..28848a8eb42e847676e66eb2c3d51f004d5ee900 100644 (file)
@@ -206,12 +206,55 @@ static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
        return MODE_OK;
 }
 
+static u32 *tfp410_get_input_bus_fmts(struct drm_bridge *bridge,
+                                     struct drm_bridge_state *bridge_state,
+                                     struct drm_crtc_state *crtc_state,
+                                     struct drm_connector_state *conn_state,
+                                     u32 output_fmt,
+                                     unsigned int *num_input_fmts)
+{
+       struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+       u32 *input_fmts;
+
+       *num_input_fmts = 0;
+
+       input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+       if (!input_fmts)
+               return NULL;
+
+       *num_input_fmts = 1;
+       input_fmts[0] = dvi->bus_format;
+
+       return input_fmts;
+}
+
+static int tfp410_atomic_check(struct drm_bridge *bridge,
+                              struct drm_bridge_state *bridge_state,
+                              struct drm_crtc_state *crtc_state,
+                              struct drm_connector_state *conn_state)
+{
+       struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+       /*
+        * There might be flags negotiation supported in future.
+        * Set the bus flags in atomic_check statically for now.
+        */
+       bridge_state->input_bus_cfg.flags = dvi->timings.input_bus_flags;
+
+       return 0;
+}
+
 static const struct drm_bridge_funcs tfp410_bridge_funcs = {
        .attach         = tfp410_attach,
        .detach         = tfp410_detach,
        .enable         = tfp410_enable,
        .disable        = tfp410_disable,
        .mode_valid     = tfp410_mode_valid,
+       .atomic_reset = drm_atomic_helper_bridge_reset,
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+       .atomic_get_input_bus_fmts = tfp410_get_input_bus_fmts,
+       .atomic_check = tfp410_atomic_check,
 };
 
 static const struct drm_bridge_timings tfp410_default_timings = {
@@ -405,7 +448,7 @@ MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
 static struct i2c_driver tfp410_i2c_driver = {
        .driver = {
                .name   = "tfp410",
-               .of_match_table = of_match_ptr(tfp410_match),
+               .of_match_table = tfp410_match,
        },
        .id_table       = tfp410_i2c_ids,
        .probe          = tfp410_i2c_probe,
index e78999c72bd77e87ef232f14c0529895fc76a9c4..a3f0e6d961055943aca5a6389b924a6d1eaf2c91 100644 (file)
@@ -415,7 +415,7 @@ void drm_hdcp_update_content_protection(struct drm_connector *connector,
                return;
 
        state->content_protection = val;
-       drm_sysfs_connector_status_event(connector,
-                                dev->mode_config.content_protection_property);
+       drm_sysfs_connector_property_event(connector,
+                                          dev->mode_config.content_protection_property);
 }
 EXPORT_SYMBOL(drm_hdcp_update_content_protection);
index 41b8066f61fff4be48263c616845430d1bb95e7b..292e38eb621806a007c61ff631a50d8fb7f072b6 100644 (file)
@@ -3332,7 +3332,7 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
  * that also takes a snapshot of the modeset state to be restored on resume.
  *
  * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
- * and it is the atomic version of drm_crtc_force_disable_all().
+ * and it is the atomic version of drm_helper_force_disable_all().
  */
 void drm_atomic_helper_shutdown(struct drm_device *dev)
 {
index d867e7f9f2cd5800132561a63e0deb51e5c41b9c..98d3b10c08ae194071777b4abb2ed8c2e43286a5 100644 (file)
@@ -374,16 +374,25 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
 
        if (blob_id != 0) {
                new_blob = drm_property_lookup_blob(dev, blob_id);
-               if (new_blob == NULL)
+               if (new_blob == NULL) {
+                       drm_dbg_atomic(dev,
+                                      "cannot find blob ID %llu\n", blob_id);
                        return -EINVAL;
+               }
 
                if (expected_size > 0 &&
                    new_blob->length != expected_size) {
+                       drm_dbg_atomic(dev,
+                                      "[BLOB:%d] length %zu different from expected %zu\n",
+                                      new_blob->base.id, new_blob->length, expected_size);
                        drm_property_blob_put(new_blob);
                        return -EINVAL;
                }
                if (expected_elem_size > 0 &&
                    new_blob->length % expected_elem_size != 0) {
+                       drm_dbg_atomic(dev,
+                                      "[BLOB:%d] length %zu not divisible by element size %zu\n",
+                                      new_blob->base.id, new_blob->length, expected_elem_size);
                        drm_property_blob_put(new_blob);
                        return -EINVAL;
                }
@@ -454,7 +463,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
        } else {
                drm_dbg_atomic(crtc->dev,
-                              "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+                              "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
                               crtc->base.id, crtc->name,
                               property->base.id, property->name);
                return -EINVAL;
@@ -489,8 +498,13 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
                *val = state->scaling_filter;
        else if (crtc->funcs->atomic_get_property)
                return crtc->funcs->atomic_get_property(crtc, state, property, val);
-       else
+       else {
+               drm_dbg_atomic(dev,
+                              "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
+                              crtc->base.id, crtc->name,
+                              property->base.id, property->name);
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -525,8 +539,12 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
        } else if (property == config->prop_crtc_id) {
                struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
 
-               if (val && !crtc)
+               if (val && !crtc) {
+                       drm_dbg_atomic(dev,
+                                      "[PROP:%d:%s] cannot find CRTC with ID %llu\n",
+                                      property->base.id, property->name, val);
                        return -EACCES;
+               }
                return drm_atomic_set_crtc_for_plane(state, crtc);
        } else if (property == config->prop_crtc_x) {
                state->crtc_x = U642I64(val);
@@ -577,7 +595,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
                                property, val);
        } else {
                drm_dbg_atomic(plane->dev,
-                              "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+                              "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
                               plane->base.id, plane->name,
                               property->base.id, property->name);
                return -EINVAL;
@@ -636,6 +654,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
        } else if (plane->funcs->atomic_get_property) {
                return plane->funcs->atomic_get_property(plane, state, property, val);
        } else {
+               drm_dbg_atomic(dev,
+                              "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
+                              plane->base.id, plane->name,
+                              property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -677,14 +699,21 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
        if (property == config->prop_crtc_id) {
                struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
 
-               if (val && !crtc)
+               if (val && !crtc) {
+                       drm_dbg_atomic(dev,
+                                      "[PROP:%d:%s] cannot find CRTC with ID %llu\n",
+                                      property->base.id, property->name, val);
                        return -EACCES;
+               }
                return drm_atomic_set_crtc_for_connector(state, crtc);
        } else if (property == config->dpms_property) {
                /* setting DPMS property requires special handling, which
                 * is done in legacy setprop path for us.  Disallow (for
                 * now?) atomic writes to DPMS property:
                 */
+               drm_dbg_atomic(dev,
+                              "legacy [PROP:%d:%s] can only be set via legacy uAPI\n",
+                              property->base.id, property->name);
                return -EINVAL;
        } else if (property == config->tv_select_subconnector_property) {
                state->tv.select_subconnector = val;
@@ -774,7 +803,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
                                state, property, val);
        } else {
                drm_dbg_atomic(connector->dev,
-                              "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+                              "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
                               connector->base.id, connector->name,
                               property->base.id, property->name);
                return -EINVAL;
@@ -856,6 +885,10 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
                return connector->funcs->atomic_get_property(connector,
                                state, property, val);
        } else {
+               drm_dbg_atomic(dev,
+                              "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
+                              connector->base.id, connector->name,
+                              property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -894,6 +927,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
                break;
        }
        default:
+               drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
                ret = -EINVAL;
                break;
        }
@@ -1030,6 +1064,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
                break;
        }
        default:
+               drm_dbg_atomic(prop->dev, "[OBJECT:%d] has no properties\n", obj->id);
                ret = -EINVAL;
                break;
        }
@@ -1230,8 +1265,10 @@ static int prepare_signaling(struct drm_device *dev,
         * Having this flag means user mode pends on event which will never
         * reach due to lack of at least one CRTC for signaling
         */
-       if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+       if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) {
+               drm_dbg_atomic(dev, "need at least one CRTC for DRM_MODE_PAGE_FLIP_EVENT");
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -1364,11 +1401,13 @@ retry:
 
                obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
                if (!obj) {
+                       drm_dbg_atomic(dev, "cannot find object ID %d", obj_id);
                        ret = -ENOENT;
                        goto out;
                }
 
                if (!obj->properties) {
+                       drm_dbg_atomic(dev, "[OBJECT:%d] has no properties", obj_id);
                        drm_mode_object_put(obj);
                        ret = -ENOENT;
                        goto out;
@@ -1395,6 +1434,9 @@ retry:
 
                        prop = drm_mode_obj_find_prop_id(obj, prop_id);
                        if (!prop) {
+                               drm_dbg_atomic(dev,
+                                              "[OBJECT:%d] cannot find property ID %d",
+                                              obj_id, prop_id);
                                drm_mode_object_put(obj);
                                ret = -ENOENT;
                                goto out;
index c3d69af02e79daaaf00cf4a7104dd1d12b0f7953..39e68e45bb124bbf21b3a1caceea488a2fdd12de 100644 (file)
 #include <linux/mutex.h>
 
 #include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_debugfs.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
 #include <drm/drm_of.h>
 #include <drm/drm_print.h>
 
@@ -1345,6 +1347,50 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
 EXPORT_SYMBOL(of_drm_find_bridge);
 #endif
 
+#ifdef CONFIG_DEBUG_FS
+static int drm_bridge_chains_info(struct seq_file *m, void *data)
+{
+       struct drm_debugfs_entry *entry = m->private;
+       struct drm_device *dev = entry->dev;
+       struct drm_printer p = drm_seq_file_printer(m);
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_encoder *encoder;
+       unsigned int bridge_idx = 0;
+
+       list_for_each_entry(encoder, &config->encoder_list, head) {
+               struct drm_bridge *bridge;
+
+               drm_printf(&p, "encoder[%u]\n", encoder->base.id);
+
+               drm_for_each_bridge_in_chain(encoder, bridge) {
+                       drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
+                                  bridge_idx, bridge->type, bridge->ops);
+
+#ifdef CONFIG_OF
+                       if (bridge->of_node)
+                               drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
+#endif
+
+                       drm_printf(&p, "\n");
+
+                       bridge_idx++;
+               }
+       }
+
+       return 0;
+}
+
+static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
+       { "bridge_chains", drm_bridge_chains_info, 0 },
+};
+
+void drm_bridge_debugfs_init(struct drm_minor *minor)
+{
+       drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list,
+                             ARRAY_SIZE(drm_bridge_debugfs_list));
+}
+#endif
+
 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
 MODULE_DESCRIPTION("DRM bridge infrastructure");
 MODULE_LICENSE("GPL and additional rights");
index 19ae4a177ac386b255a7b182a0cffcb921066e53..1da93d5a1f614732eca132aec5ee20efa2dec7ab 100644 (file)
@@ -125,7 +125,7 @@ static void drm_bridge_connector_hpd_cb(void *cb_data,
 
        drm_bridge_connector_hpd_notify(connector, status);
 
-       drm_kms_helper_hotplug_event(dev);
+       drm_kms_helper_connector_hotplug_event(connector);
 }
 
 static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
@@ -318,6 +318,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
        struct i2c_adapter *ddc = NULL;
        struct drm_bridge *bridge, *panel_bridge = NULL;
        int connector_type;
+       int ret;
 
        bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
        if (!bridge_connector)
@@ -368,8 +369,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
                return ERR_PTR(-EINVAL);
        }
 
-       drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
-                                   connector_type, ddc);
+       ret = drm_connector_init_with_ddc(drm, connector,
+                                         &drm_bridge_connector_funcs,
+                                         connector_type, ddc);
+       if (ret) {
+               kfree(bridge_connector);
+               return ERR_PTR(ret);
+       }
+
        drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
 
        if (bridge_connector->bridge_hpd)
index f6292ba0e6fc378371dcbde10857144ce501a619..037e36f2049c1793d287ca6cbeff7d7a8870a610 100644 (file)
@@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
  * drm_client_register() it is no longer permissible to call drm_client_release()
  * directly (outside the unregister callback), instead cleanup will happen
  * automatically on driver unload.
+ *
+ * Registering a client generates a hotplug event that allows the client
+ * to set up its display from pre-existing outputs. The client must have
+ * initialized its state to able to handle the hotplug event successfully.
  */
 void drm_client_register(struct drm_client_dev *client)
 {
        struct drm_device *dev = client->dev;
+       int ret;
 
        mutex_lock(&dev->clientlist_mutex);
        list_add(&client->list, &dev->clientlist);
+
+       if (client->funcs && client->funcs->hotplug) {
+               /*
+                * Perform an initial hotplug event to pick up the
+                * display configuration for the client. This step
+                * has to be performed *after* registering the client
+                * in the list of clients, or a concurrent hotplug
+                * event might be lost; leaving the display off.
+                *
+                * Hold the clientlist_mutex as for a regular hotplug
+                * event.
+                */
+               ret = client->funcs->hotplug(client);
+               if (ret)
+                       drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+       }
        mutex_unlock(&dev->clientlist_mutex);
 }
 EXPORT_SYMBOL(drm_client_register);
index 3ed4cfcb350c4db19e0db135842cb65f4ddf2a08..bf8371dc2a612111a9b2aaa40e06d3f063ba6120 100644 (file)
@@ -2730,10 +2730,10 @@ static int drm_connector_privacy_screen_notifier(
        drm_connector_update_privacy_screen_properties(connector, true);
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
-       drm_sysfs_connector_status_event(connector,
-                               connector->privacy_screen_sw_state_property);
-       drm_sysfs_connector_status_event(connector,
-                               connector->privacy_screen_hw_state_property);
+       drm_sysfs_connector_property_event(connector,
+                                          connector->privacy_screen_sw_state_property);
+       drm_sysfs_connector_property_event(connector,
+                                          connector->privacy_screen_hw_state_property);
 
        return NOTIFY_DONE;
 }
index 4855230ba2c6ca062bea78ad53cd3c7c591dd1a0..2de43ff3ce0a4305714c854cde1364db6e5e34e0 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_client.h>
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
@@ -39,6 +40,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_managed.h>
+#include <drm/drm_gpuva_mgr.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
@@ -175,6 +177,45 @@ static const struct file_operations drm_debugfs_fops = {
        .release = single_release,
 };
 
+/**
+ * drm_debugfs_gpuva_info - dump the given DRM GPU VA space
+ * @m: pointer to the &seq_file to write
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ *
+ * Dumps the GPU VA mappings of a given DRM GPU VA manager.
+ *
+ * For each DRM GPU VA space drivers should call this function from their
+ * &drm_info_list's show callback.
+ *
+ * Returns: 0 on success, -ENODEV if the &mgr is not initialized
+ */
+int drm_debugfs_gpuva_info(struct seq_file *m,
+                          struct drm_gpuva_manager *mgr)
+{
+       struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
+
+       if (!mgr->name)
+               return -ENODEV;
+
+       seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
+                  mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
+       seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
+                  kva->va.addr, kva->va.addr + kva->va.range);
+       seq_puts(m, "\n");
+       seq_puts(m, " VAs | start              | range              | end                | object             | object offset\n");
+       seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
+       drm_gpuva_for_each_va(va, mgr) {
+               if (unlikely(va == kva))
+                       continue;
+
+               seq_printf(m, "     | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n",
+                          va->va.addr, va->va.range, va->va.addr + va->va.range,
+                          (u64)(uintptr_t)va->gem.obj, va->gem.offset);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_gpuva_info);
 
 /**
  * drm_debugfs_create_files - Initialize a given set of debugfs files for DRM
@@ -234,6 +275,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 
        if (drm_drv_uses_atomic_modeset(dev)) {
                drm_atomic_debugfs_init(minor);
+               drm_bridge_debugfs_init(minor);
        }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
index 12687dd9e1ac5f758f8bdb126eb5648caabb1bdf..3eda026ffac6a9e654dd7def6e34eca8b7e34676 100644 (file)
@@ -84,7 +84,7 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
  */
 
 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
-                                            unsigned int type)
+                                            enum drm_minor_type type)
 {
        switch (type) {
        case DRM_MINOR_PRIMARY:
@@ -116,7 +116,7 @@ static void drm_minor_alloc_release(struct drm_device *dev, void *data)
        }
 }
 
-static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
 {
        struct drm_minor *minor;
        unsigned long flags;
@@ -160,7 +160,7 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
        return 0;
 }
 
-static int drm_minor_register(struct drm_device *dev, unsigned int type)
+static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
 {
        struct drm_minor *minor;
        unsigned long flags;
@@ -203,7 +203,7 @@ err_debugfs:
        return ret;
 }
 
-static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
 {
        struct drm_minor *minor;
        unsigned long flags;
index e0dbd9140726b336669db2db854b2556a463ebd1..f95152fac4270fa65bde655d3de32df70521cff7 100644 (file)
@@ -230,6 +230,7 @@ static const struct edid_quirk {
 
        /* OSVR HDK and HDK2 VR Headsets */
        EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
+       EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
 };
 
 /*
@@ -3962,7 +3963,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
        struct drm_display_mode *newmode;
        struct drm_device *dev = connector->dev;
        const struct cvt_timing *cvt;
-       const int rates[] = { 60, 85, 75, 60, 50 };
+       static const int rates[] = { 60, 85, 75, 60, 50 };
        const u8 empty[3] = { 0, 0, 0 };
 
        for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
new file mode 100644 (file)
index 0000000..ff69cf0
--- /dev/null
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-resv.h>
+
+/**
+ * DOC: Overview
+ *
+ * This component mainly abstracts the retry loop necessary for locking
+ * multiple GEM objects while preparing hardware operations (e.g. command
+ * submissions, page table updates etc..).
+ *
+ * If a contention is detected while locking a GEM object the cleanup procedure
+ * unlocks all previously locked GEM objects and locks the contended one first
+ * before locking any further objects.
+ *
+ * After an object is locked fences slots can optionally be reserved on the
+ * dma_resv object inside the GEM object.
+ *
+ * A typical usage pattern should look like this::
+ *
+ *     struct drm_gem_object *obj;
+ *     struct drm_exec exec;
+ *     unsigned long index;
+ *     int ret;
+ *
+ *     drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ *     drm_exec_until_all_locked(&exec) {
+ *             ret = drm_exec_prepare_obj(&exec, boA, 1);
+ *             drm_exec_retry_on_contention(&exec);
+ *             if (ret)
+ *                     goto error;
+ *
+ *             ret = drm_exec_prepare_obj(&exec, boB, 1);
+ *             drm_exec_retry_on_contention(&exec);
+ *             if (ret)
+ *                     goto error;
+ *     }
+ *
+ *     drm_exec_for_each_locked_object(&exec, index, obj) {
+ *             dma_resv_add_fence(obj->resv, fence, DMA_RESV_USAGE_READ);
+ *             ...
+ *     }
+ *     drm_exec_fini(&exec);
+ *
+ * See struct dma_exec for more details.
+ */
+
+/* Dummy value used to initially enter the retry loop */
+#define DRM_EXEC_DUMMY ((void *)~0)
+
+/* Unlock all objects and drop references */
+static void drm_exec_unlock_all(struct drm_exec *exec)
+{
+       struct drm_gem_object *obj;
+       unsigned long index;
+
+       drm_exec_for_each_locked_object(exec, index, obj) {
+               dma_resv_unlock(obj->resv);
+               drm_gem_object_put(obj);
+       }
+
+       drm_gem_object_put(exec->prelocked);
+       exec->prelocked = NULL;
+}
+
+/**
+ * drm_exec_init - initialize a drm_exec object
+ * @exec: the drm_exec object to initialize
+ * @flags: controls locking behavior, see DRM_EXEC_* defines
+ *
+ * Initialize the object and make sure that we can track locked objects.
+ */
+void drm_exec_init(struct drm_exec *exec, uint32_t flags)
+{
+       exec->flags = flags;
+       exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+       /* If allocation here fails, just delay that till the first use */
+       exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
+       exec->num_objects = 0;
+       exec->contended = DRM_EXEC_DUMMY;
+       exec->prelocked = NULL;
+}
+EXPORT_SYMBOL(drm_exec_init);
+
+/**
+ * drm_exec_fini - finalize a drm_exec object
+ * @exec: the drm_exec object to finalize
+ *
+ * Unlock all locked objects, drop the references to objects and free all memory
+ * used for tracking the state.
+ */
+void drm_exec_fini(struct drm_exec *exec)
+{
+       drm_exec_unlock_all(exec);
+       kvfree(exec->objects);
+       if (exec->contended != DRM_EXEC_DUMMY) {
+               drm_gem_object_put(exec->contended);
+               ww_acquire_fini(&exec->ticket);
+       }
+}
+EXPORT_SYMBOL(drm_exec_fini);
+
+/**
+ * drm_exec_cleanup - cleanup when contention is detected
+ * @exec: the drm_exec object to cleanup
+ *
+ * Cleanup the current state and return true if we should stay inside the retry
+ * loop, false if there wasn't any contention detected and we can keep the
+ * objects locked.
+ */
+bool drm_exec_cleanup(struct drm_exec *exec)
+{
+       if (likely(!exec->contended)) {
+               ww_acquire_done(&exec->ticket);
+               return false;
+       }
+
+       if (likely(exec->contended == DRM_EXEC_DUMMY)) {
+               exec->contended = NULL;
+               ww_acquire_init(&exec->ticket, &reservation_ww_class);
+               return true;
+       }
+
+       drm_exec_unlock_all(exec);
+       exec->num_objects = 0;
+       return true;
+}
+EXPORT_SYMBOL(drm_exec_cleanup);
+
+/* Track the locked object in the array */
+static int drm_exec_obj_locked(struct drm_exec *exec,
+                              struct drm_gem_object *obj)
+{
+       if (unlikely(exec->num_objects == exec->max_objects)) {
+               size_t size = exec->max_objects * sizeof(void *);
+               void *tmp;
+
+               tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE,
+                               GFP_KERNEL);
+               if (!tmp)
+                       return -ENOMEM;
+
+               exec->objects = tmp;
+               exec->max_objects += PAGE_SIZE / sizeof(void *);
+       }
+       drm_gem_object_get(obj);
+       exec->objects[exec->num_objects++] = obj;
+
+       return 0;
+}
+
+/* Make sure the contended object is locked first */
+static int drm_exec_lock_contended(struct drm_exec *exec)
+{
+       struct drm_gem_object *obj = exec->contended;
+       int ret;
+
+       if (likely(!obj))
+               return 0;
+
+       /* Always cleanup the contention so that error handling can kick in */
+       exec->contended = NULL;
+       if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) {
+               ret = dma_resv_lock_slow_interruptible(obj->resv,
+                                                      &exec->ticket);
+               if (unlikely(ret))
+                       goto error_dropref;
+       } else {
+               dma_resv_lock_slow(obj->resv, &exec->ticket);
+       }
+
+       ret = drm_exec_obj_locked(exec, obj);
+       if (unlikely(ret))
+               goto error_unlock;
+
+       exec->prelocked = obj;
+       return 0;
+
+error_unlock:
+       dma_resv_unlock(obj->resv);
+
+error_dropref:
+       drm_gem_object_put(obj);
+       return ret;
+}
+
+/**
+ * drm_exec_lock_obj - lock a GEM object for use
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to lock
+ *
+ * Lock a GEM object for use and grab a reference to it.
+ *
+ * Returns: -EDEADLK if a contention is detected, -EALREADY when object is
+ * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES
+ * flag), -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
+{
+       int ret;
+
+       ret = drm_exec_lock_contended(exec);
+       if (unlikely(ret))
+               return ret;
+
+       if (exec->prelocked == obj) {
+               drm_gem_object_put(exec->prelocked);
+               exec->prelocked = NULL;
+               return 0;
+       }
+
+       if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
+               ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket);
+       else
+               ret = dma_resv_lock(obj->resv, &exec->ticket);
+
+       if (unlikely(ret == -EDEADLK)) {
+               drm_gem_object_get(obj);
+               exec->contended = obj;
+               return -EDEADLK;
+       }
+
+       if (unlikely(ret == -EALREADY) &&
+           exec->flags & DRM_EXEC_IGNORE_DUPLICATES)
+               return 0;
+
+       if (unlikely(ret))
+               return ret;
+
+       ret = drm_exec_obj_locked(exec, obj);
+       if (ret)
+               goto error_unlock;
+
+       return 0;
+
+error_unlock:
+       dma_resv_unlock(obj->resv);
+       return ret;
+}
+EXPORT_SYMBOL(drm_exec_lock_obj);
+
+/**
+ * drm_exec_unlock_obj - unlock a GEM object in this exec context
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to unlock
+ *
+ * Unlock the GEM object and remove it from the collection of locked objects.
+ * Should only be used to unlock the most recently locked objects. It's not time
+ * efficient to unlock objects locked long ago.
+ */
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
+{
+       unsigned int i;
+
+       for (i = exec->num_objects; i--;) {
+               if (exec->objects[i] == obj) {
+                       dma_resv_unlock(obj->resv);
+                       for (++i; i < exec->num_objects; ++i)
+                               exec->objects[i - 1] = exec->objects[i];
+                       --exec->num_objects;
+                       drm_gem_object_put(obj);
+                       return;
+               }
+
+       }
+}
+EXPORT_SYMBOL(drm_exec_unlock_obj);
+
+/**
+ * drm_exec_prepare_obj - prepare a GEM object for use
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to prepare
+ * @num_fences: how many fences to reserve
+ *
+ * Prepare a GEM object for use by locking it and reserving fence slots.
+ *
+ * Returns: -EDEADLK if a contention is detected, -EALREADY when object is
+ * already locked, -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+                        unsigned int num_fences)
+{
+       int ret;
+
+       ret = drm_exec_lock_obj(exec, obj);
+       if (ret)
+               return ret;
+
+       ret = dma_resv_reserve_fences(obj->resv, num_fences);
+       if (ret) {
+               drm_exec_unlock_obj(exec, obj);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_exec_prepare_obj);
+
+/**
+ * drm_exec_prepare_array - helper to prepare an array of objects
+ * @exec: the drm_exec object with the state
+ * @objects: array of GEM object to prepare
+ * @num_objects: number of GEM objects in the array
+ * @num_fences: number of fences to reserve on each GEM object
+ *
+ * Prepares all GEM objects in an array, aborts on first error.
+ * Reserves @num_fences on each GEM object after locking it.
+ *
+ * Returns: -EDEADLOCK on contention, -EALREADY when object is already locked,
+ * -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_prepare_array(struct drm_exec *exec,
+                          struct drm_gem_object **objects,
+                          unsigned int num_objects,
+                          unsigned int num_fences)
+{
+       int ret;
+
+       for (unsigned int i = 0; i < num_objects; ++i) {
+               ret = drm_exec_prepare_obj(exec, objects[i], num_fences);
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_exec_prepare_array);
+
+MODULE_DESCRIPTION("DRM execution context");
+MODULE_LICENSE("Dual MIT/GPL");
index d86773fa8ab00f49380a8d008745d34acd16d89f..6c9427bb4053ba4b9d130a8d6adc9151b3b61e80 100644 (file)
@@ -54,21 +54,17 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        struct drm_fb_helper *fb_helper = info->par;
-       struct drm_device *dev = fb_helper->dev;
-
-       if (drm_WARN_ON_ONCE(dev, !fb_helper->dev->driver->gem_prime_mmap))
-               return -ENODEV;
 
-       return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+       return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
 }
 
 static const struct fb_ops drm_fbdev_dma_fb_ops = {
        .owner = THIS_MODULE,
        .fb_open = drm_fbdev_dma_fb_open,
        .fb_release = drm_fbdev_dma_fb_release,
-       __FB_DEFAULT_SYS_OPS_RDWR,
+       __FB_DEFAULT_DMAMEM_OPS_RDWR,
        DRM_FB_HELPER_DEFAULT_OPS,
-       __FB_DEFAULT_SYS_OPS_DRAW,
+       __FB_DEFAULT_DMAMEM_OPS_DRAW,
        .fb_mmap = drm_fbdev_dma_fb_mmap,
        .fb_destroy = drm_fbdev_dma_fb_destroy,
 };
@@ -127,7 +123,6 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
        drm_fb_helper_fill_info(info, fb_helper, sizes);
 
        info->fbops = &drm_fbdev_dma_fb_ops;
-       info->flags = FBINFO_DEFAULT;
 
        /* screen */
        info->flags |= FBINFO_VIRTFB; /* system memory */
@@ -217,7 +212,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
  * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device.
- *                 @dev->mode_config.preferred_depth is used if this is zero.
+ *                 32 is used if this is zero.
  *
  * This function sets up fbdev emulation for GEM DMA drivers that support
  * dumb buffers with a virtual address and that can be mmap'ed.
@@ -252,10 +247,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
                goto err_drm_client_init;
        }
 
-       ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 98ae703848a02fa34b947de96511853fab4fbee3..d647d89764cb9894411af5927d61da968e723dad 100644 (file)
@@ -34,9 +34,9 @@ static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
        return 0;
 }
 
-FB_GEN_DEFAULT_DEFERRED_SYS_OPS(drm_fbdev_generic,
-                               drm_fb_helper_damage_range,
-                               drm_fb_helper_damage_area);
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(drm_fbdev_generic,
+                                  drm_fb_helper_damage_range,
+                                  drm_fb_helper_damage_area);
 
 static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
 {
@@ -109,7 +109,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
        drm_fb_helper_fill_info(info, fb_helper, sizes);
 
        info->fbops = &drm_fbdev_generic_fb_ops;
-       info->flags = FBINFO_DEFAULT;
 
        /* screen */
        info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
@@ -339,10 +338,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
                goto err_drm_client_init;
        }
 
-       ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 78dcae201cc64066e79441508ccca8ba4e879c4f..6129b89bb36618db7686cec9cc0a7a4c8d9d5410 100644 (file)
@@ -164,6 +164,9 @@ void drm_gem_private_object_init(struct drm_device *dev,
        if (!obj->resv)
                obj->resv = &obj->_resv;
 
+       if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
+               drm_gem_gpuva_init(obj);
+
        drm_vma_node_reset(&obj->vma_node);
        INIT_LIST_HEAD(&obj->lru_node);
 }
@@ -1160,8 +1163,8 @@ int drm_gem_pin(struct drm_gem_object *obj)
 {
        if (obj->funcs->pin)
                return obj->funcs->pin(obj);
-       else
-               return 0;
+
+       return 0;
 }
 
 void drm_gem_unpin(struct drm_gem_object *obj)
index b8a615a138cd675f9a832a4393d760f852be32ca..3bdb6ba37ff42fb6d8fc0715358863e83061de5a 100644 (file)
@@ -168,8 +168,8 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
        if (drm_drv_uses_atomic_modeset(dev) &&
            !drm_any_plane_has_format(dev, mode_cmd->pixel_format,
                                      mode_cmd->modifier[0])) {
-               drm_dbg(dev, "Unsupported pixel format %p4cc / modifier 0x%llx\n",
-                       &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+               drm_dbg_kms(dev, "Unsupported pixel format %p4cc / modifier 0x%llx\n",
+                           &mode_cmd->pixel_format, mode_cmd->modifier[0]);
                return -EINVAL;
        }
 
index 4ea6507a77e5d2edeb65a3b61ae24c2c5eae454a..a783d2245599eeadbb80d91785c4bcdfc1f75e9b 100644 (file)
@@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
        if (ret)
                goto err_release;
 
-       mutex_init(&shmem->pages_lock);
-       mutex_init(&shmem->vmap_lock);
        INIT_LIST_HEAD(&shmem->madv_list);
 
        if (!private) {
@@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
 
-       drm_WARN_ON(obj->dev, shmem->vmap_use_count);
-
        if (obj->import_attach) {
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
+               dma_resv_lock(shmem->base.resv, NULL);
+
+               drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+
                if (shmem->sgt) {
                        dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
                                          DMA_BIDIRECTIONAL, 0);
@@ -154,22 +154,24 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
                }
                if (shmem->pages)
                        drm_gem_shmem_put_pages(shmem);
-       }
 
-       drm_WARN_ON(obj->dev, shmem->pages_use_count);
+               drm_WARN_ON(obj->dev, shmem->pages_use_count);
+
+               dma_resv_unlock(shmem->base.resv);
+       }
 
        drm_gem_object_release(obj);
-       mutex_destroy(&shmem->pages_lock);
-       mutex_destroy(&shmem->vmap_lock);
        kfree(shmem);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
 
-static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct page **pages;
 
+       dma_resv_assert_held(shmem->base.resv);
+
        if (shmem->pages_use_count++ > 0)
                return 0;
 
@@ -197,35 +199,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
 }
 
 /*
- * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
  * @shmem: shmem GEM object
  *
- * This function makes sure that backing pages exists for the shmem GEM object
- * and increases the use count.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function decreases the use count and puts the backing pages when use drops to zero.
  */
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
-       int ret;
 
-       drm_WARN_ON(obj->dev, obj->import_attach);
-
-       ret = mutex_lock_interruptible(&shmem->pages_lock);
-       if (ret)
-               return ret;
-       ret = drm_gem_shmem_get_pages_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_get_pages);
-
-static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
-{
-       struct drm_gem_object *obj = &shmem->base;
+       dma_resv_assert_held(shmem->base.resv);
 
        if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                return;
@@ -243,20 +226,25 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
                          shmem->pages_mark_accessed_on_put);
        shmem->pages = NULL;
 }
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
 
-/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
 {
-       mutex_lock(&shmem->pages_lock);
-       drm_gem_shmem_put_pages_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
+       int ret;
+
+       dma_resv_assert_held(shmem->base.resv);
+
+       ret = drm_gem_shmem_get_pages(shmem);
+
+       return ret;
+}
+
+static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
+{
+       dma_resv_assert_held(shmem->base.resv);
+
+       drm_gem_shmem_put_pages(shmem);
 }
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
 
 /**
  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
@@ -271,10 +259,17 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
+       int ret;
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       return drm_gem_shmem_get_pages(shmem);
+       ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
+       if (ret)
+               return ret;
+       ret = drm_gem_shmem_pin_locked(shmem);
+       dma_resv_unlock(shmem->base.resv);
+
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_shmem_pin);
 
@@ -291,12 +286,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       drm_gem_shmem_put_pages(shmem);
+       dma_resv_lock(shmem->base.resv, NULL);
+       drm_gem_shmem_unpin_locked(shmem);
+       dma_resv_unlock(shmem->base.resv);
 }
 EXPORT_SYMBOL(drm_gem_shmem_unpin);
 
-static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
-                                    struct iosys_map *map)
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
+ *       store.
+ *
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
+ *
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+                      struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
        int ret = 0;
@@ -312,6 +324,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
        } else {
                pgprot_t prot = PAGE_KERNEL;
 
+               dma_resv_assert_held(shmem->base.resv);
+
                if (shmem->vmap_use_count++ > 0) {
                        iosys_map_set_vaddr(map, shmem->vaddr);
                        return 0;
@@ -346,45 +360,30 @@ err_zero_use:
 
        return ret;
 }
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
 
 /*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
  * @shmem: shmem GEM object
- * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
- *       store.
- *
- * This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object. It hides the differences
- * between dma-buf imported and natively allocated objects.
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
  *
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
+ * zero.
  *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
  */
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
-                      struct iosys_map *map)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&shmem->vmap_lock);
-       if (ret)
-               return ret;
-       ret = drm_gem_shmem_vmap_locked(shmem, map);
-       mutex_unlock(&shmem->vmap_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
-
-static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
-                                       struct iosys_map *map)
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+                         struct iosys_map *map)
 {
        struct drm_gem_object *obj = &shmem->base;
 
        if (obj->import_attach) {
                dma_buf_vunmap(obj->import_attach->dmabuf, map);
        } else {
+               dma_resv_assert_held(shmem->base.resv);
+
                if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
                        return;
 
@@ -397,26 +396,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
 
        shmem->vaddr = NULL;
 }
-
-/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
- * @shmem: shmem GEM object
- * @map: Kernel virtual address where the SHMEM GEM object was mapped
- *
- * This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
- *
- * This function hides the differences between dma-buf imported and natively
- * allocated objects.
- */
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
-                         struct iosys_map *map)
-{
-       mutex_lock(&shmem->vmap_lock);
-       drm_gem_shmem_vunmap_locked(shmem, map);
-       mutex_unlock(&shmem->vmap_lock);
-}
 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
 
 static int
@@ -447,24 +426,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
  */
 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
 {
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_assert_held(shmem->base.resv);
 
        if (shmem->madv >= 0)
                shmem->madv = madv;
 
        madv = shmem->madv;
 
-       mutex_unlock(&shmem->pages_lock);
-
        return (madv >= 0);
 }
 EXPORT_SYMBOL(drm_gem_shmem_madvise);
 
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
 {
        struct drm_gem_object *obj = &shmem->base;
        struct drm_device *dev = obj->dev;
 
+       dma_resv_assert_held(shmem->base.resv);
+
        drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
 
        dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
@@ -472,7 +451,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
        kfree(shmem->sgt);
        shmem->sgt = NULL;
 
-       drm_gem_shmem_put_pages_locked(shmem);
+       drm_gem_shmem_put_pages(shmem);
 
        shmem->madv = -1;
 
@@ -488,17 +467,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
 
        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
 }
-EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
-
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
-{
-       if (!mutex_trylock(&shmem->pages_lock))
-               return false;
-       drm_gem_shmem_purge_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
-
-       return true;
-}
 EXPORT_SYMBOL(drm_gem_shmem_purge);
 
 /**
@@ -551,7 +519,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_lock(shmem->base.resv, NULL);
 
        if (page_offset >= num_pages ||
            drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
@@ -563,7 +531,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
                ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
        }
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        return ret;
 }
@@ -575,7 +543,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       mutex_lock(&shmem->pages_lock);
+       dma_resv_lock(shmem->base.resv, NULL);
 
        /*
         * We should have already pinned the pages when the buffer was first
@@ -585,7 +553,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
        if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
                shmem->pages_use_count++;
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        drm_gem_vm_open(vma);
 }
@@ -595,7 +563,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
 
+       dma_resv_lock(shmem->base.resv, NULL);
        drm_gem_shmem_put_pages(shmem);
+       dma_resv_unlock(shmem->base.resv);
+
        drm_gem_vm_close(vma);
 }
 
@@ -633,7 +604,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
                return ret;
        }
 
+       dma_resv_lock(shmem->base.resv, NULL);
        ret = drm_gem_shmem_get_pages(shmem);
+       dma_resv_unlock(shmem->base.resv);
+
        if (ret)
                return ret;
 
@@ -699,7 +673,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
 
        drm_WARN_ON(obj->dev, obj->import_attach);
 
-       ret = drm_gem_shmem_get_pages_locked(shmem);
+       ret = drm_gem_shmem_get_pages(shmem);
        if (ret)
                return ERR_PTR(ret);
 
@@ -721,7 +695,7 @@ err_free_sgt:
        sg_free_table(sgt);
        kfree(sgt);
 err_put_pages:
-       drm_gem_shmem_put_pages_locked(shmem);
+       drm_gem_shmem_put_pages(shmem);
        return ERR_PTR(ret);
 }
 
@@ -746,11 +720,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
        int ret;
        struct sg_table *sgt;
 
-       ret = mutex_lock_interruptible(&shmem->pages_lock);
+       ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
        if (ret)
                return ERR_PTR(ret);
        sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
        return sgt;
 }
diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c
new file mode 100644 (file)
index 0000000..f86bfad
--- /dev/null
@@ -0,0 +1,1725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Danilo Krummrich <dakr@redhat.com>
+ *
+ */
+
+#include <drm/drm_gpuva_mgr.h>
+
+#include <linux/interval_tree_generic.h>
+#include <linux/mm.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
+ * of a GPU's virtual address (VA) space and manages the corresponding virtual
+ * mappings represented by &drm_gpuva objects. It also keeps track of the
+ * mapping's backing &drm_gem_object buffers.
+ *
+ * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
+ * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ *
+ * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
+ * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
+ *
+ * The GPU VA manager internally uses a rb-tree to manage the
+ * &drm_gpuva mappings within a GPU's virtual address space.
+ *
+ * The &drm_gpuva_manager contains a special &drm_gpuva representing the
+ * portion of VA space reserved by the kernel. This node is initialized together
+ * with the GPU VA manager instance and removed when the GPU VA manager is
+ * destroyed.
+ *
+ * In a typical application drivers would embed struct drm_gpuva_manager and
+ * struct drm_gpuva within their own driver specific structures, there won't be
+ * any memory allocations of its own nor memory allocations of &drm_gpuva
+ * entries.
+ *
+ * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
+ * are contained within struct drm_gpuva already. Hence, for inserting
+ * &drm_gpuva entries from within dma-fence signalling critical sections it is
+ * enough to pre-allocate the &drm_gpuva structures.
+ */
+
+/**
+ * DOC: Split and Merge
+ *
+ * Besides its capability to manage and represent a GPU VA space, the
+ * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
+ * calculate a sequence of operations to satisfy a given map or unmap request.
+ *
+ * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
+ * and merging of existent GPU VA mappings with the ones that are requested to
+ * be mapped or unmapped. This feature is required by the Vulkan API to
+ * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
+ * as VM BIND.
+ *
+ * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
+ * containing map, unmap and remap operations for a given newly requested
+ * mapping. The sequence of callbacks represents the set of operations to
+ * execute in order to integrate the new mapping cleanly into the current state
+ * of the GPU VA space.
+ *
+ * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
+ * amount of unmap operations, a maximum of two remap operations and a single
+ * map operation. The caller might receive no callback at all if no operation is
+ * required, e.g. if the requested mapping already exists in the exact same way.
+ *
+ * The single map operation represents the original map operation requested by
+ * the caller.
+ *
+ * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
+ * &drm_gpuva to unmap is physically contiguous with the original mapping
+ * request. Optionally, if 'keep' is set, drivers may keep the actual page table
+ * entries for this &drm_gpuva, adding the missing page table entries only and
+ * update the &drm_gpuva_manager's view of things accordingly.
+ *
+ * Drivers may do the same optimization, namely delta page table updates, also
+ * for remap operations. This is possible since &drm_gpuva_op_remap consists of
+ * one unmap operation and one or two map operations, such that drivers can
+ * derive the page table update delta accordingly.
+ *
+ * Note that there can't be more than two existent mappings to split up, one at
+ * the beginning and one at the end of the new mapping, hence there is a
+ * maximum of two remap operations.
+ *
+ * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
+ * to call back into the driver in order to unmap a range of GPU VA space. The
+ * logic behind this function is way simpler though: For all existent mappings
+ * enclosed by the given range unmap operations are created. For mappings which
+ * are only partically located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partically.
+ *
+ * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
+ * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
+ * to directly obtain an instance of struct drm_gpuva_ops containing a list of
+ * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
+ * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
+ * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
+ * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
+ * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
+ * allocations are possible (e.g. to allocate GPU page tables) and once in the
+ * dma-fence signalling critical path.
+ *
+ * To update the &drm_gpuva_manager's view of the GPU VA space
+ * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
+ * safely be used from &drm_gpuva_fn_ops callbacks originating from
+ * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
+ * convenient to use the provided helper functions drm_gpuva_map(),
+ * drm_gpuva_remap() and drm_gpuva_unmap() instead.
+ *
+ * The following diagram depicts the basic relationships of existent GPU VA
+ * mappings, a newly requested mapping and the resulting mappings as implemented
+ * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
+ *
+ * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
+ *    could be kept.
+ *
+ *    ::
+ *
+ *          0     a     1
+ *     old: |-----------| (bo_offset=n)
+ *
+ *          0     a     1
+ *     req: |-----------| (bo_offset=n)
+ *
+ *          0     a     1
+ *     new: |-----------| (bo_offset=n)
+ *
+ *
+ * 2) Requested mapping is identical, except for the BO offset, hence replace
+ *    the mapping.
+ *
+ *    ::
+ *
+ *          0     a     1
+ *     old: |-----------| (bo_offset=n)
+ *
+ *          0     a     1
+ *     req: |-----------| (bo_offset=m)
+ *
+ *          0     a     1
+ *     new: |-----------| (bo_offset=m)
+ *
+ *
+ * 3) Requested mapping is identical, except for the backing BO, hence replace
+ *    the mapping.
+ *
+ *    ::
+ *
+ *          0     a     1
+ *     old: |-----------| (bo_offset=n)
+ *
+ *          0     b     1
+ *     req: |-----------| (bo_offset=n)
+ *
+ *          0     b     1
+ *     new: |-----------| (bo_offset=n)
+ *
+ *
+ * 4) Existent mapping is a left aligned subset of the requested one, hence
+ *    replace the existent one.
+ *
+ *    ::
+ *
+ *          0  a  1
+ *     old: |-----|       (bo_offset=n)
+ *
+ *          0     a     2
+ *     req: |-----------| (bo_offset=n)
+ *
+ *          0     a     2
+ *     new: |-----------| (bo_offset=n)
+ *
+ *    .. note::
+ *       We expect to see the same result for a request with a different BO
+ *       and/or non-contiguous BO offset.
+ *
+ *
+ * 5) Requested mapping's range is a left aligned subset of the existent one,
+ *    but backed by a different BO. Hence, map the requested mapping and split
+ *    the existent one adjusting its BO offset.
+ *
+ *    ::
+ *
+ *          0     a     2
+ *     old: |-----------| (bo_offset=n)
+ *
+ *          0  b  1
+ *     req: |-----|       (bo_offset=n)
+ *
+ *          0  b  1  a' 2
+ *     new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
+ *
+ *    .. note::
+ *       We expect to see the same result for a request with a different BO
+ *       and/or non-contiguous BO offset.
+ *
+ *
+ * 6) Existent mapping is a superset of the requested mapping. Split it up, but
+ *    indicate that the backing PTEs could be kept.
+ *
+ *    ::
+ *
+ *          0     a     2
+ *     old: |-----------| (bo_offset=n)
+ *
+ *          0  a  1
+ *     req: |-----|       (bo_offset=n)
+ *
+ *          0  a  1  a' 2
+ *     new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
+ *
+ *
+ * 7) Requested mapping's range is a right aligned subset of the existent one,
+ *    but backed by a different BO. Hence, map the requested mapping and split
+ *    the existent one, without adjusting the BO offset.
+ *
+ *    ::
+ *
+ *          0     a     2
+ *     old: |-----------| (bo_offset=n)
+ *
+ *                1  b  2
+ *     req:       |-----| (bo_offset=m)
+ *
+ *          0  a  1  b  2
+ *     new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
+ *
+ *
+ * 8) Existent mapping is a superset of the requested mapping. Split it up, but
+ *    indicate that the backing PTEs could be kept.
+ *
+ *    ::
+ *
+ *           0     a     2
+ *     old: |-----------| (bo_offset=n)
+ *
+ *                1  a  2
+ *     req:       |-----| (bo_offset=n+1)
+ *
+ *          0  a' 1  a  2
+ *     new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
+ *
+ *
+ * 9) Existent mapping is overlapped at the end by the requested mapping backed
+ *    by a different BO. Hence, map the requested mapping and split up the
+ *    existent one, without adjusting the BO offset.
+ *
+ *    ::
+ *
+ *          0     a     2
+ *     old: |-----------|       (bo_offset=n)
+ *
+ *                1     b     3
+ *     req:       |-----------| (bo_offset=m)
+ *
+ *          0  a  1     b     3
+ *     new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
+ *
+ *
+ * 10) Existent mapping is overlapped by the requested mapping, both having the
+ *     same backing BO with a contiguous offset. Indicate the backing PTEs of
+ *     the old mapping could be kept.
+ *
+ *     ::
+ *
+ *           0     a     2
+ *      old: |-----------|       (bo_offset=n)
+ *
+ *                 1     a     3
+ *      req:       |-----------| (bo_offset=n+1)
+ *
+ *           0  a' 1     a     3
+ *      new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
+ *
+ *
+ * 11) Requested mapping's range is a centered subset of the existent one
+ *     having a different backing BO. Hence, map the requested mapping and split
+ *     up the existent one in two mappings, adjusting the BO offset of the right
+ *     one accordingly.
+ *
+ *     ::
+ *
+ *           0        a        3
+ *      old: |-----------------| (bo_offset=n)
+ *
+ *                 1  b  2
+ *      req:       |-----|       (bo_offset=m)
+ *
+ *           0  a  1  b  2  a' 3
+ *      new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
+ *
+ *
+ * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ *     up, but indicate that the backing PTEs could be kept.
+ *
+ *     ::
+ *
+ *           0        a        3
+ *      old: |-----------------| (bo_offset=n)
+ *
+ *                 1  a  2
+ *      req:       |-----|       (bo_offset=n+1)
+ *
+ *           0  a' 1  a  2 a'' 3
+ *      old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
+ *
+ *
+ * 13) Existent mapping is a right aligned subset of the requested one, hence
+ *     replace the existent one.
+ *
+ *     ::
+ *
+ *                 1  a  2
+ *      old:       |-----| (bo_offset=n+1)
+ *
+ *           0     a     2
+ *      req: |-----------| (bo_offset=n)
+ *
+ *           0     a     2
+ *      new: |-----------| (bo_offset=n)
+ *
+ *     .. note::
+ *        We expect to see the same result for a request with a different bo
+ *        and/or non-contiguous bo_offset.
+ *
+ *
+ * 14) Existent mapping is a centered subset of the requested one, hence
+ *     replace the existent one.
+ *
+ *     ::
+ *
+ *                 1  a  2
+ *      old:       |-----| (bo_offset=n+1)
+ *
+ *           0        a       3
+ *      req: |----------------| (bo_offset=n)
+ *
+ *           0        a       3
+ *      new: |----------------| (bo_offset=n)
+ *
+ *     .. note::
+ *        We expect to see the same result for a request with a different bo
+ *        and/or non-contiguous bo_offset.
+ *
+ *
+ * 15) Existent mappings is overlapped at the beginning by the requested mapping
+ *     backed by a different BO. Hence, map the requested mapping and split up
+ *     the existent one, adjusting its BO offset accordingly.
+ *
+ *     ::
+ *
+ *                 1     a     3
+ *      old:       |-----------| (bo_offset=n)
+ *
+ *           0     b     2
+ *      req: |-----------|       (bo_offset=m)
+ *
+ *           0     b     2  a' 3
+ *      new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
+ */
+
+/**
+ * DOC: Locking
+ *
+ * Generally, the GPU VA manager does not take care of locking itself, it is
+ * the drivers responsibility to take care about locking. Drivers might want to
+ * protect the following operations: inserting, removing and iterating
+ * &drm_gpuva objects as well as generating all kinds of operations, such as
+ * split / merge or prefetch.
+ *
+ * The GPU VA manager also does not take care of the locking of the backing
+ * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
+ * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
+ * a driver specific external lock. For the latter see also
+ * drm_gem_gpuva_set_lock().
+ *
+ * However, the GPU VA manager contains lockdep checks to ensure callers of its
+ * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
+ * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
+ */
+
+/**
+ * DOC: Examples
+ *
+ * This section gives two examples on how to let the DRM GPUVA Manager generate
+ * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
+ * make use of them.
+ *
+ * The below code is strictly limited to illustrate the generic usage pattern.
+ * To maintain simplicitly, it doesn't make use of any abstractions for common
+ * code, different (asyncronous) stages with fence signalling critical paths,
+ * any other helpers or error handling in terms of freeing memory and dropping
+ * previously taken locks.
+ *
+ * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
+ *
+ *     // Allocates a new &drm_gpuva.
+ *     struct drm_gpuva * driver_gpuva_alloc(void);
+ *
+ *     // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ *     // structure in individual driver structures and lock the dma-resv with
+ *     // drm_exec or similar helpers.
+ *     int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ *                               u64 addr, u64 range,
+ *                               struct drm_gem_object *obj, u64 offset)
+ *     {
+ *             struct drm_gpuva_ops *ops;
+ *             struct drm_gpuva_op *op
+ *
+ *             driver_lock_va_space();
+ *             ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
+ *                                               obj, offset);
+ *             if (IS_ERR(ops))
+ *                     return PTR_ERR(ops);
+ *
+ *             drm_gpuva_for_each_op(op, ops) {
+ *                     struct drm_gpuva *va;
+ *
+ *                     switch (op->op) {
+ *                     case DRM_GPUVA_OP_MAP:
+ *                             va = driver_gpuva_alloc();
+ *                             if (!va)
+ *                                     ; // unwind previous VA space updates,
+ *                                       // free memory and unlock
+ *
+ *                             driver_vm_map();
+ *                             drm_gpuva_map(mgr, va, &op->map);
+ *                             drm_gpuva_link(va);
+ *
+ *                             break;
+ *                     case DRM_GPUVA_OP_REMAP: {
+ *                             struct drm_gpuva *prev = NULL, *next = NULL;
+ *
+ *                             va = op->remap.unmap->va;
+ *
+ *                             if (op->remap.prev) {
+ *                                     prev = driver_gpuva_alloc();
+ *                                     if (!prev)
+ *                                             ; // unwind previous VA space
+ *                                               // updates, free memory and
+ *                                               // unlock
+ *                             }
+ *
+ *                             if (op->remap.next) {
+ *                                     next = driver_gpuva_alloc();
+ *                                     if (!next)
+ *                                             ; // unwind previous VA space
+ *                                               // updates, free memory and
+ *                                               // unlock
+ *                             }
+ *
+ *                             driver_vm_remap();
+ *                             drm_gpuva_remap(prev, next, &op->remap);
+ *
+ *                             drm_gpuva_unlink(va);
+ *                             if (prev)
+ *                                     drm_gpuva_link(prev);
+ *                             if (next)
+ *                                     drm_gpuva_link(next);
+ *
+ *                             break;
+ *                     }
+ *                     case DRM_GPUVA_OP_UNMAP:
+ *                             va = op->unmap->va;
+ *
+ *                             driver_vm_unmap();
+ *                             drm_gpuva_unlink(va);
+ *                             drm_gpuva_unmap(&op->unmap);
+ *
+ *                             break;
+ *                     default:
+ *                             break;
+ *                     }
+ *             }
+ *             driver_unlock_va_space();
+ *
+ *             return 0;
+ *     }
+ *
+ * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
+ *
+ *     struct driver_context {
+ *             struct drm_gpuva_manager *mgr;
+ *             struct drm_gpuva *new_va;
+ *             struct drm_gpuva *prev_va;
+ *             struct drm_gpuva *next_va;
+ *     };
+ *
+ *     // ops to pass to drm_gpuva_manager_init()
+ *     static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
+ *             .sm_step_map = driver_gpuva_map,
+ *             .sm_step_remap = driver_gpuva_remap,
+ *             .sm_step_unmap = driver_gpuva_unmap,
+ *     };
+ *
+ *     // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ *     // structure in individual driver structures and lock the dma-resv with
+ *     // drm_exec or similar helpers.
+ *     int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ *                               u64 addr, u64 range,
+ *                               struct drm_gem_object *obj, u64 offset)
+ *     {
+ *             struct driver_context ctx;
+ *             struct drm_gpuva_ops *ops;
+ *             struct drm_gpuva_op *op;
+ *             int ret = 0;
+ *
+ *             ctx.mgr = mgr;
+ *
+ *             ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
+ *             ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
+ *             ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
+ *             if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
+ *                     ret = -ENOMEM;
+ *                     goto out;
+ *             }
+ *
+ *             driver_lock_va_space();
+ *             ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
+ *             driver_unlock_va_space();
+ *
+ *     out:
+ *             kfree(ctx.new_va);
+ *             kfree(ctx.prev_va);
+ *             kfree(ctx.next_va);
+ *             return ret;
+ *     }
+ *
+ *     int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
+ *     {
+ *             struct driver_context *ctx = __ctx;
+ *
+ *             drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
+ *
+ *             drm_gpuva_link(ctx->new_va);
+ *
+ *             // prevent the new GPUVA from being freed in
+ *             // driver_mapping_create()
+ *             ctx->new_va = NULL;
+ *
+ *             return 0;
+ *     }
+ *
+ *     int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
+ *     {
+ *             struct driver_context *ctx = __ctx;
+ *
+ *             drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
+ *
+ *             drm_gpuva_unlink(op->remap.unmap->va);
+ *             kfree(op->remap.unmap->va);
+ *
+ *             if (op->remap.prev) {
+ *                     drm_gpuva_link(ctx->prev_va);
+ *                     ctx->prev_va = NULL;
+ *             }
+ *
+ *             if (op->remap.next) {
+ *                     drm_gpuva_link(ctx->next_va);
+ *                     ctx->next_va = NULL;
+ *             }
+ *
+ *             return 0;
+ *     }
+ *
+ *     int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
+ *     {
+ *             drm_gpuva_unlink(op->unmap.va);
+ *             drm_gpuva_unmap(&op->unmap);
+ *             kfree(op->unmap.va);
+ *
+ *             return 0;
+ *     }
+ */
+
+#define to_drm_gpuva(__node)   container_of((__node), struct drm_gpuva, rb.node)
+
+#define GPUVA_START(node) ((node)->va.addr)
+#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
+
+/* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
+ * about this.
+ */
+INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
+                    GPUVA_START, GPUVA_LAST, static __maybe_unused,
+                    drm_gpuva_it)
+
+static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+                             struct drm_gpuva *va);
+static void __drm_gpuva_remove(struct drm_gpuva *va);
+
+static bool
+drm_gpuva_check_overflow(u64 addr, u64 range)
+{
+       u64 end;
+
+       return WARN(check_add_overflow(addr, range, &end),
+                   "GPUVA address limited to %zu bytes.\n", sizeof(end));
+}
+
+static bool
+drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+       u64 end = addr + range;
+       u64 mm_start = mgr->mm_start;
+       u64 mm_end = mm_start + mgr->mm_range;
+
+       return addr >= mm_start && end <= mm_end;
+}
+
+static bool
+drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+       u64 end = addr + range;
+       u64 kstart = mgr->kernel_alloc_node.va.addr;
+       u64 krange = mgr->kernel_alloc_node.va.range;
+       u64 kend = kstart + krange;
+
+       return krange && addr < kend && kstart < end;
+}
+
+static bool
+drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
+                     u64 addr, u64 range)
+{
+       return !drm_gpuva_check_overflow(addr, range) &&
+              drm_gpuva_in_mm_range(mgr, addr, range) &&
+              !drm_gpuva_in_kernel_node(mgr, addr, range);
+}
+
+/**
+ * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
+ * @mgr: pointer to the &drm_gpuva_manager to initialize
+ * @name: the name of the GPU VA space
+ * @start_offset: the start offset of the GPU VA space
+ * @range: the size of the GPU VA space
+ * @reserve_offset: the start of the kernel reserved GPU VA area
+ * @reserve_range: the size of the kernel reserved GPU VA area
+ * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
+ *
+ * The &drm_gpuva_manager must be initialized with this function before use.
+ *
+ * Note that @mgr must be cleared to 0 before calling this function. The given
+ * &name is expected to be managed by the surrounding driver structures.
+ */
+void
+drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
+                      const char *name,
+                      u64 start_offset, u64 range,
+                      u64 reserve_offset, u64 reserve_range,
+                      const struct drm_gpuva_fn_ops *ops)
+{
+       mgr->rb.tree = RB_ROOT_CACHED;
+       INIT_LIST_HEAD(&mgr->rb.list);
+
+       drm_gpuva_check_overflow(start_offset, range);
+       mgr->mm_start = start_offset;
+       mgr->mm_range = range;
+
+       mgr->name = name ? name : "unknown";
+       mgr->ops = ops;
+
+       memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+
+       if (reserve_range) {
+               mgr->kernel_alloc_node.va.addr = reserve_offset;
+               mgr->kernel_alloc_node.va.range = reserve_range;
+
+               if (likely(!drm_gpuva_check_overflow(reserve_offset,
+                                                    reserve_range)))
+                       __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
+       }
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
+
+/**
+ * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
+ * @mgr: pointer to the &drm_gpuva_manager to clean up
+ *
+ * Note that it is a bug to call this function on a manager that still
+ * holds GPU VA mappings.
+ */
+void
+drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
+{
+       mgr->name = NULL;
+
+       if (mgr->kernel_alloc_node.va.range)
+               __drm_gpuva_remove(&mgr->kernel_alloc_node);
+
+       WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
+            "GPUVA tree is not empty, potentially leaking memory.");
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
+
+static int
+__drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+                  struct drm_gpuva *va)
+{
+       struct rb_node *node;
+       struct list_head *head;
+
+       if (drm_gpuva_it_iter_first(&mgr->rb.tree,
+                                   GPUVA_START(va),
+                                   GPUVA_LAST(va)))
+               return -EEXIST;
+
+       va->mgr = mgr;
+
+       drm_gpuva_it_insert(va, &mgr->rb.tree);
+
+       node = rb_prev(&va->rb.node);
+       if (node)
+               head = &(to_drm_gpuva(node))->rb.entry;
+       else
+               head = &mgr->rb.list;
+
+       list_add(&va->rb.entry, head);
+
+       return 0;
+}
+
+/**
+ * drm_gpuva_insert() - insert a &drm_gpuva
+ * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
+ * @va: the &drm_gpuva to insert
+ *
+ * Insert a &drm_gpuva with a given address and range into a
+ * &drm_gpuva_manager.
+ *
+ * It is safe to use this function using the safe versions of iterating the GPU
+ * VA space, such as drm_gpuva_for_each_va_safe() and
+ * drm_gpuva_for_each_va_range_safe().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+                struct drm_gpuva *va)
+{
+       u64 addr = va->va.addr;
+       u64 range = va->va.range;
+
+       if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
+               return -EINVAL;
+
+       return __drm_gpuva_insert(mgr, va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_insert);
+
+static void
+__drm_gpuva_remove(struct drm_gpuva *va)
+{
+       drm_gpuva_it_remove(va, &va->mgr->rb.tree);
+       list_del_init(&va->rb.entry);
+}
+
+/**
+ * drm_gpuva_remove() - remove a &drm_gpuva
+ * @va: the &drm_gpuva to remove
+ *
+ * This removes the given &va from the underlaying tree.
+ *
+ * It is safe to use this function using the safe versions of iterating the GPU
+ * VA space, such as drm_gpuva_for_each_va_safe() and
+ * drm_gpuva_for_each_va_range_safe().
+ */
+void
+drm_gpuva_remove(struct drm_gpuva *va)
+{
+       struct drm_gpuva_manager *mgr = va->mgr;
+
+       if (unlikely(va == &mgr->kernel_alloc_node)) {
+               WARN(1, "Can't destroy kernel reserved node.\n");
+               return;
+       }
+
+       __drm_gpuva_remove(va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_remove);
+
+/**
+ * drm_gpuva_link() - link a &drm_gpuva
+ * @va: the &drm_gpuva to link
+ *
+ * This adds the given &va to the GPU VA list of the &drm_gem_object it is
+ * associated with.
+ *
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ */
+void
+drm_gpuva_link(struct drm_gpuva *va)
+{
+       struct drm_gem_object *obj = va->gem.obj;
+
+       if (unlikely(!obj))
+               return;
+
+       drm_gem_gpuva_assert_lock_held(obj);
+
+       list_add_tail(&va->gem.entry, &obj->gpuva.list);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_link);
+
+/**
+ * drm_gpuva_unlink() - unlink a &drm_gpuva
+ * @va: the &drm_gpuva to unlink
+ *
+ * This removes the given &va from the GPU VA list of the &drm_gem_object it is
+ * associated with.
+ *
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ */
+void
+drm_gpuva_unlink(struct drm_gpuva *va)
+{
+       struct drm_gem_object *obj = va->gem.obj;
+
+       if (unlikely(!obj))
+               return;
+
+       drm_gem_gpuva_assert_lock_held(obj);
+
+       list_del_init(&va->gem.entry);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
+
+/**
+ * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
+ * @mgr: the &drm_gpuva_manager to search in
+ * @addr: the &drm_gpuvas address
+ * @range: the &drm_gpuvas range
+ *
+ * Returns: the first &drm_gpuva within the given range
+ */
+struct drm_gpuva *
+drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+                    u64 addr, u64 range)
+{
+       u64 last = addr + range - 1;
+
+       return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
+
+/**
+ * drm_gpuva_find() - find a &drm_gpuva
+ * @mgr: the &drm_gpuva_manager to search in
+ * @addr: the &drm_gpuvas address
+ * @range: the &drm_gpuvas range
+ *
+ * Returns: the &drm_gpuva at a given &addr and with a given &range
+ */
+struct drm_gpuva *
+drm_gpuva_find(struct drm_gpuva_manager *mgr,
+              u64 addr, u64 range)
+{
+       struct drm_gpuva *va;
+
+       va = drm_gpuva_find_first(mgr, addr, range);
+       if (!va)
+               goto out;
+
+       if (va->va.addr != addr ||
+           va->va.range != range)
+               goto out;
+
+       return va;
+
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find);
+
+/**
+ * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
+ * @mgr: the &drm_gpuva_manager to search in
+ * @start: the given GPU VA's start address
+ *
+ * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
+ *
+ * Note that if there is any free space between the GPU VA mappings no mapping
+ * is returned.
+ *
+ * Returns: a pointer to the found &drm_gpuva or NULL if none was found
+ */
+struct drm_gpuva *
+drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
+{
+       if (!drm_gpuva_range_valid(mgr, start - 1, 1))
+               return NULL;
+
+       return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
+
+/**
+ * drm_gpuva_find_next() - find the &drm_gpuva after the given address
+ * @mgr: the &drm_gpuva_manager to search in
+ * @end: the given GPU VA's end address
+ *
+ * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
+ *
+ * Note that if there is any free space between the GPU VA mappings no mapping
+ * is returned.
+ *
+ * Returns: a pointer to the found &drm_gpuva or NULL if none was found
+ */
+struct drm_gpuva *
+drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
+{
+       if (!drm_gpuva_range_valid(mgr, end, 1))
+               return NULL;
+
+       return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
+
+/**
+ * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
+ * is empty
+ * @mgr: the &drm_gpuva_manager to check the range for
+ * @addr: the start address of the range
+ * @range: the range of the interval
+ *
+ * Returns: true if the interval is empty, false otherwise
+ */
+bool
+drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+       return !drm_gpuva_find_first(mgr, addr, range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
+
+/**
+ * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
+ * &drm_gpuva_op_map
+ * @mgr: the &drm_gpuva_manager
+ * @va: the &drm_gpuva to insert
+ * @op: the &drm_gpuva_op_map to initialize @va with
+ *
+ * Initializes the @va from the @op and inserts it into the given @mgr.
+ */
+void
+drm_gpuva_map(struct drm_gpuva_manager *mgr,
+             struct drm_gpuva *va,
+             struct drm_gpuva_op_map *op)
+{
+       drm_gpuva_init_from_op(va, op);
+       drm_gpuva_insert(mgr, va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_map);
+
+/**
+ * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
+ * &drm_gpuva_op_remap
+ * @prev: the &drm_gpuva to remap when keeping the start of a mapping
+ * @next: the &drm_gpuva to remap when keeping the end of a mapping
+ * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
+ *
+ * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
+ * @next.
+ */
+void
+drm_gpuva_remap(struct drm_gpuva *prev,
+               struct drm_gpuva *next,
+               struct drm_gpuva_op_remap *op)
+{
+       struct drm_gpuva *curr = op->unmap->va;
+       struct drm_gpuva_manager *mgr = curr->mgr;
+
+       drm_gpuva_remove(curr);
+
+       if (op->prev) {
+               drm_gpuva_init_from_op(prev, op->prev);
+               drm_gpuva_insert(mgr, prev);
+       }
+
+       if (op->next) {
+               drm_gpuva_init_from_op(next, op->next);
+               drm_gpuva_insert(mgr, next);
+       }
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_remap);
+
+/**
+ * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
+ * &drm_gpuva_op_unmap
+ * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
+ *
+ * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
+ */
+void
+drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
+{
+       drm_gpuva_remove(op->va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
+
+static int
+op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+         u64 addr, u64 range,
+         struct drm_gem_object *obj, u64 offset)
+{
+       struct drm_gpuva_op op = {};
+
+       op.op = DRM_GPUVA_OP_MAP;
+       op.map.va.addr = addr;
+       op.map.va.range = range;
+       op.map.gem.obj = obj;
+       op.map.gem.offset = offset;
+
+       return fn->sm_step_map(&op, priv);
+}
+
+static int
+op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+           struct drm_gpuva_op_map *prev,
+           struct drm_gpuva_op_map *next,
+           struct drm_gpuva_op_unmap *unmap)
+{
+       struct drm_gpuva_op op = {};
+       struct drm_gpuva_op_remap *r;
+
+       op.op = DRM_GPUVA_OP_REMAP;
+       r = &op.remap;
+       r->prev = prev;
+       r->next = next;
+       r->unmap = unmap;
+
+       return fn->sm_step_remap(&op, priv);
+}
+
+static int
+op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+           struct drm_gpuva *va, bool merge)
+{
+       struct drm_gpuva_op op = {};
+
+       op.op = DRM_GPUVA_OP_UNMAP;
+       op.unmap.va = va;
+       op.unmap.keep = merge;
+
+       return fn->sm_step_unmap(&op, priv);
+}
+
+static int
+__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
+                  const struct drm_gpuva_fn_ops *ops, void *priv,
+                  u64 req_addr, u64 req_range,
+                  struct drm_gem_object *req_obj, u64 req_offset)
+{
+       struct drm_gpuva *va, *next, *prev = NULL;
+       u64 req_end = req_addr + req_range;
+       int ret;
+
+       if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+               return -EINVAL;
+
+       drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+               struct drm_gem_object *obj = va->gem.obj;
+               u64 offset = va->gem.offset;
+               u64 addr = va->va.addr;
+               u64 range = va->va.range;
+               u64 end = addr + range;
+               bool merge = !!va->gem.obj;
+
+               if (addr == req_addr) {
+                       merge &= obj == req_obj &&
+                                offset == req_offset;
+
+                       if (end == req_end) {
+                               ret = op_unmap_cb(ops, priv, va, merge);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+
+                       if (end < req_end) {
+                               ret = op_unmap_cb(ops, priv, va, merge);
+                               if (ret)
+                                       return ret;
+                               goto next;
+                       }
+
+                       if (end > req_end) {
+                               struct drm_gpuva_op_map n = {
+                                       .va.addr = req_end,
+                                       .va.range = range - req_range,
+                                       .gem.obj = obj,
+                                       .gem.offset = offset + req_range,
+                               };
+                               struct drm_gpuva_op_unmap u = {
+                                       .va = va,
+                                       .keep = merge,
+                               };
+
+                               ret = op_remap_cb(ops, priv, NULL, &n, &u);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+               } else if (addr < req_addr) {
+                       u64 ls_range = req_addr - addr;
+                       struct drm_gpuva_op_map p = {
+                               .va.addr = addr,
+                               .va.range = ls_range,
+                               .gem.obj = obj,
+                               .gem.offset = offset,
+                       };
+                       struct drm_gpuva_op_unmap u = { .va = va };
+
+                       merge &= obj == req_obj &&
+                                offset + ls_range == req_offset;
+                       u.keep = merge;
+
+                       if (end == req_end) {
+                               ret = op_remap_cb(ops, priv, &p, NULL, &u);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+
+                       if (end < req_end) {
+                               ret = op_remap_cb(ops, priv, &p, NULL, &u);
+                               if (ret)
+                                       return ret;
+                               goto next;
+                       }
+
+                       if (end > req_end) {
+                               struct drm_gpuva_op_map n = {
+                                       .va.addr = req_end,
+                                       .va.range = end - req_end,
+                                       .gem.obj = obj,
+                                       .gem.offset = offset + ls_range +
+                                                     req_range,
+                               };
+
+                               ret = op_remap_cb(ops, priv, &p, &n, &u);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+               } else if (addr > req_addr) {
+                       merge &= obj == req_obj &&
+                                offset == req_offset +
+                                          (addr - req_addr);
+
+                       if (end == req_end) {
+                               ret = op_unmap_cb(ops, priv, va, merge);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+
+                       if (end < req_end) {
+                               ret = op_unmap_cb(ops, priv, va, merge);
+                               if (ret)
+                                       return ret;
+                               goto next;
+                       }
+
+                       if (end > req_end) {
+                               struct drm_gpuva_op_map n = {
+                                       .va.addr = req_end,
+                                       .va.range = end - req_end,
+                                       .gem.obj = obj,
+                                       .gem.offset = offset + req_end - addr,
+                               };
+                               struct drm_gpuva_op_unmap u = {
+                                       .va = va,
+                                       .keep = merge,
+                               };
+
+                               ret = op_remap_cb(ops, priv, NULL, &n, &u);
+                               if (ret)
+                                       return ret;
+                               break;
+                       }
+               }
+next:
+               prev = va;
+       }
+
+       return op_map_cb(ops, priv,
+                        req_addr, req_range,
+                        req_obj, req_offset);
+}
+
+static int
+__drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
+                    const struct drm_gpuva_fn_ops *ops, void *priv,
+                    u64 req_addr, u64 req_range)
+{
+       struct drm_gpuva *va, *next;
+       u64 req_end = req_addr + req_range;
+       int ret;
+
+       if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+               return -EINVAL;
+
+       drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+               struct drm_gpuva_op_map prev = {}, next = {};
+               bool prev_split = false, next_split = false;
+               struct drm_gem_object *obj = va->gem.obj;
+               u64 offset = va->gem.offset;
+               u64 addr = va->va.addr;
+               u64 range = va->va.range;
+               u64 end = addr + range;
+
+               if (addr < req_addr) {
+                       prev.va.addr = addr;
+                       prev.va.range = req_addr - addr;
+                       prev.gem.obj = obj;
+                       prev.gem.offset = offset;
+
+                       prev_split = true;
+               }
+
+               if (end > req_end) {
+                       next.va.addr = req_end;
+                       next.va.range = end - req_end;
+                       next.gem.obj = obj;
+                       next.gem.offset = offset + (req_end - addr);
+
+                       next_split = true;
+               }
+
+               if (prev_split || next_split) {
+                       struct drm_gpuva_op_unmap unmap = { .va = va };
+
+                       ret = op_remap_cb(ops, priv,
+                                         prev_split ? &prev : NULL,
+                                         next_split ? &next : NULL,
+                                         &unmap);
+                       if (ret)
+                               return ret;
+               } else {
+                       ret = op_unmap_cb(ops, priv, va, false);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the new mapping
+ * @req_range: the range of the new mapping
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ * @priv: pointer to a driver private data structure
+ *
+ * This function iterates the given range of the GPU VA space. It utilizes the
+ * &drm_gpuva_fn_ops to call back into the driver providing the split and merge
+ * steps.
+ *
+ * Drivers may use these callbacks to update the GPU VA space right away within
+ * the callback. In case the driver decides to copy and store the operations for
+ * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
+ * be called before the &drm_gpuva_manager's view of the GPU VA space was
+ * updated with the previous set of operations. To update the
+ * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * A sequence of callbacks can contain map, unmap and remap operations, but
+ * the sequence of callbacks might also be empty if no operation is required,
+ * e.g. if the requested mapping already exists in the exact same way.
+ *
+ * There can be an arbitrary amount of unmap operations, a maximum of two remap
+ * operations and a single map operation. The latter one represents the original
+ * map operation requested by the caller.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+                u64 req_addr, u64 req_range,
+                struct drm_gem_object *req_obj, u64 req_offset)
+{
+       const struct drm_gpuva_fn_ops *ops = mgr->ops;
+
+       if (unlikely(!(ops && ops->sm_step_map &&
+                      ops->sm_step_remap &&
+                      ops->sm_step_unmap)))
+               return -EINVAL;
+
+       return __drm_gpuva_sm_map(mgr, ops, priv,
+                                 req_addr, req_range,
+                                 req_obj, req_offset);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
+
+/**
+ * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @priv: pointer to a driver private data structure
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function iterates the given range of the GPU VA space. It utilizes the
+ * &drm_gpuva_fn_ops to call back into the driver providing the operations to
+ * unmap and, if required, split existent mappings.
+ *
+ * Drivers may use these callbacks to update the GPU VA space right away within
+ * the callback. In case the driver decides to copy and store the operations for
+ * later processing neither this function nor &drm_gpuva_sm_map is allowed to be
+ * called before the &drm_gpuva_manager's view of the GPU VA space was updated
+ * with the previous set of operations. To update the &drm_gpuva_manager's view
+ * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
+ * drm_gpuva_destroy_unlocked() should be used.
+ *
+ * A sequence of callbacks can contain unmap and remap operations, depending on
+ * whether there are actual overlapping mappings to split.
+ *
+ * There can be an arbitrary amount of unmap operations and a maximum of two
+ * remap operations.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+                  u64 req_addr, u64 req_range)
+{
+       const struct drm_gpuva_fn_ops *ops = mgr->ops;
+
+       if (unlikely(!(ops && ops->sm_step_remap &&
+                      ops->sm_step_unmap)))
+               return -EINVAL;
+
+       return __drm_gpuva_sm_unmap(mgr, ops, priv,
+                                   req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
+
+static struct drm_gpuva_op *
+gpuva_op_alloc(struct drm_gpuva_manager *mgr)
+{
+       const struct drm_gpuva_fn_ops *fn = mgr->ops;
+       struct drm_gpuva_op *op;
+
+       if (fn && fn->op_alloc)
+               op = fn->op_alloc();
+       else
+               op = kzalloc(sizeof(*op), GFP_KERNEL);
+
+       if (unlikely(!op))
+               return NULL;
+
+       return op;
+}
+
+static void
+gpuva_op_free(struct drm_gpuva_manager *mgr,
+             struct drm_gpuva_op *op)
+{
+       const struct drm_gpuva_fn_ops *fn = mgr->ops;
+
+       if (fn && fn->op_free)
+               fn->op_free(op);
+       else
+               kfree(op);
+}
+
+static int
+drm_gpuva_sm_step(struct drm_gpuva_op *__op,
+                 void *priv)
+{
+       struct {
+               struct drm_gpuva_manager *mgr;
+               struct drm_gpuva_ops *ops;
+       } *args = priv;
+       struct drm_gpuva_manager *mgr = args->mgr;
+       struct drm_gpuva_ops *ops = args->ops;
+       struct drm_gpuva_op *op;
+
+       op = gpuva_op_alloc(mgr);
+       if (unlikely(!op))
+               goto err;
+
+       memcpy(op, __op, sizeof(*op));
+
+       if (op->op == DRM_GPUVA_OP_REMAP) {
+               struct drm_gpuva_op_remap *__r = &__op->remap;
+               struct drm_gpuva_op_remap *r = &op->remap;
+
+               r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
+                                  GFP_KERNEL);
+               if (unlikely(!r->unmap))
+                       goto err_free_op;
+
+               if (__r->prev) {
+                       r->prev = kmemdup(__r->prev, sizeof(*r->prev),
+                                         GFP_KERNEL);
+                       if (unlikely(!r->prev))
+                               goto err_free_unmap;
+               }
+
+               if (__r->next) {
+                       r->next = kmemdup(__r->next, sizeof(*r->next),
+                                         GFP_KERNEL);
+                       if (unlikely(!r->next))
+                               goto err_free_prev;
+               }
+       }
+
+       list_add_tail(&op->entry, &ops->list);
+
+       return 0;
+
+err_free_unmap:
+       kfree(op->remap.unmap);
+err_free_prev:
+       kfree(op->remap.prev);
+err_free_op:
+       gpuva_op_free(mgr, op);
+err:
+       return -ENOMEM;
+}
+
+static const struct drm_gpuva_fn_ops gpuva_list_ops = {
+       .sm_step_map = drm_gpuva_sm_step,
+       .sm_step_remap = drm_gpuva_sm_step,
+       .sm_step_unmap = drm_gpuva_sm_step,
+};
+
+/**
+ * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the new mapping
+ * @req_range: the range of the new mapping
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function creates a list of operations to perform splitting and merging
+ * of existent mapping(s) with the newly requested one.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map, unmap and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There can be an arbitrary amount of unmap operations, a maximum of two remap
+ * operations and a single map operation. The latter one represents the original
+ * map operation requested by the caller.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+                           u64 req_addr, u64 req_range,
+                           struct drm_gem_object *req_obj, u64 req_offset)
+{
+       struct drm_gpuva_ops *ops;
+       struct {
+               struct drm_gpuva_manager *mgr;
+               struct drm_gpuva_ops *ops;
+       } args;
+       int ret;
+
+       ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+       if (unlikely(!ops))
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&ops->list);
+
+       args.mgr = mgr;
+       args.ops = ops;
+
+       ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
+                                req_addr, req_range,
+                                req_obj, req_offset);
+       if (ret)
+               goto err_free_ops;
+
+       return ops;
+
+err_free_ops:
+       drm_gpuva_ops_free(mgr, ops);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
+
+/**
+ * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
+ * unmap
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function creates a list of operations to perform unmapping and, if
+ * required, splitting of the mappings overlapping the unmap range.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain unmap and remap operations, depending on
+ * whether there are actual overlapping mappings to split.
+ *
+ * There can be an arbitrary amount of unmap operations and a maximum of two
+ * remap operations.
+ *
+ * Note that before calling this function again with another range to unmap it
+ * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * previously obtained operations must be processed or abandoned. To update the
+ * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+                             u64 req_addr, u64 req_range)
+{
+       struct drm_gpuva_ops *ops;
+       struct {
+               struct drm_gpuva_manager *mgr;
+               struct drm_gpuva_ops *ops;
+       } args;
+       int ret;
+
+       ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+       if (unlikely(!ops))
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&ops->list);
+
+       args.mgr = mgr;
+       args.ops = ops;
+
+       ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
+                                  req_addr, req_range);
+       if (ret)
+               goto err_free_ops;
+
+       return ops;
+
+err_free_ops:
+       drm_gpuva_ops_free(mgr, ops);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
+
+/**
+ * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @addr: the start address of the range to prefetch
+ * @range: the range of the mappings to prefetch
+ *
+ * This function creates a list of operations to perform prefetching.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain prefetch operations.
+ *
+ * There can be an arbitrary amount of prefetch operations.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+                             u64 addr, u64 range)
+{
+       struct drm_gpuva_ops *ops;
+       struct drm_gpuva_op *op;
+       struct drm_gpuva *va;
+       u64 end = addr + range;
+       int ret;
+
+       ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+       if (!ops)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&ops->list);
+
+       drm_gpuva_for_each_va_range(va, mgr, addr, end) {
+               op = gpuva_op_alloc(mgr);
+               if (!op) {
+                       ret = -ENOMEM;
+                       goto err_free_ops;
+               }
+
+               op->op = DRM_GPUVA_OP_PREFETCH;
+               op->prefetch.va = va;
+               list_add_tail(&op->entry, &ops->list);
+       }
+
+       return ops;
+
+err_free_ops:
+       drm_gpuva_ops_free(mgr, ops);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
+
+/**
+ * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @obj: the &drm_gem_object to unmap
+ *
+ * This function creates a list of operations to perform unmapping for every
+ * GPUVA attached to a GEM.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
+ * arbitrary amount of unmap operations.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * It is the callers responsibility to protect the GEMs GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+                              struct drm_gem_object *obj)
+{
+       struct drm_gpuva_ops *ops;
+       struct drm_gpuva_op *op;
+       struct drm_gpuva *va;
+       int ret;
+
+       drm_gem_gpuva_assert_lock_held(obj);
+
+       ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+       if (!ops)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&ops->list);
+
+       drm_gem_for_each_gpuva(va, obj) {
+               op = gpuva_op_alloc(mgr);
+               if (!op) {
+                       ret = -ENOMEM;
+                       goto err_free_ops;
+               }
+
+               op->op = DRM_GPUVA_OP_UNMAP;
+               op->unmap.va = va;
+               list_add_tail(&op->entry, &ops->list);
+       }
+
+       return ops;
+
+err_free_ops:
+       drm_gpuva_ops_free(mgr, ops);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
+
+/**
+ * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
+ * @mgr: the &drm_gpuva_manager the ops were created for
+ * @ops: the &drm_gpuva_ops to free
+ *
+ * Frees the given &drm_gpuva_ops structure including all the ops associated
+ * with it.
+ */
+void
+drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+                  struct drm_gpuva_ops *ops)
+{
+       struct drm_gpuva_op *op, *next;
+
+       drm_gpuva_for_each_op_safe(op, next, ops) {
+               list_del(&op->entry);
+
+               if (op->op == DRM_GPUVA_OP_REMAP) {
+                       kfree(op->remap.prev);
+                       kfree(op->remap.next);
+                       kfree(op->remap.unmap);
+               }
+
+               gpuva_op_free(mgr, op);
+       }
+
+       kfree(ops);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
index d7e023bbb0d5b999489c0c8db46c9d8d50e36e8a..ba12acd551390b8e96151139f786487d95e02a95 100644 (file)
@@ -245,6 +245,8 @@ int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_private);
 int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_private);
+int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_private);
 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
index 7c9d66ee917de5cc765358117b26cfdbf037bc3d..f03ffbacfe9b48d9b06437ca2f697f2ac98cd419 100644 (file)
@@ -245,8 +245,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
                req->value = 1;
                return 0;
        case DRM_CAP_PRIME:
-               req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
-               req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+               req->value = DRM_PRIME_CAP_IMPORT | DRM_PRIME_CAP_EXPORT;
                return 0;
        case DRM_CAP_SYNCOBJ:
                req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
@@ -702,6 +701,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
                      DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
                      DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl,
+                     DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
                      DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
index 5423ad8837294c05834752820d0c68a064a006f4..bcd111404b128aacb5081cfb4a489dab636923e2 100644 (file)
@@ -196,7 +196,7 @@ void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
                               size, gfp);
                return NULL;
        }
-       dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+       dr->node.name = kstrdup_const("kmalloc", gfp);
 
        add_dr(dev, dr);
 
index c871d9f096b8ba702b06134403650ad753283ebc..e90f0bf895b33ee3500faa0929e380767c4a4d78 100644 (file)
@@ -1140,10 +1140,13 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
                return -ENOMEM;
 
        tr[1].rx_buf = buf;
+
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(dbi->dc, 0);
 
        spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
-       ret = spi_sync(spi, &m);
+       ret = spi_sync_locked(spi, &m);
+       spi_bus_unlock(spi->controller);
        if (ret)
                goto err_free;
 
@@ -1177,19 +1180,24 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *dbi, u8 *cmd,
 
        MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
 
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(dbi->dc, 0);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
        ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
+       spi_bus_unlock(spi->controller);
        if (ret || !num)
                return ret;
 
        if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !dbi->swap_bytes)
                bpw = 16;
 
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(dbi->dc, 1);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+       ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+       spi_bus_unlock(spi->controller);
 
-       return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+       return ret;
 }
 
 /**
@@ -1271,7 +1279,8 @@ EXPORT_SYMBOL(mipi_dbi_spi_init);
  * @len: Buffer length
  *
  * This SPI transfer helper breaks up the transfer of @buf into chunks which
- * the SPI controller driver can handle.
+ * the SPI controller driver can handle. The SPI bus must be locked when
+ * calling this.
  *
  * Returns:
  * Zero on success, negative error code on failure.
@@ -1305,7 +1314,7 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
                buf += chunk;
                len -= chunk;
 
-               ret = spi_sync(spi, &m);
+               ret = spi_sync_locked(spi, &m);
                if (ret)
                        return ret;
        }
index 6252ac01e945a65a056e46d397f018b667ed57ea..14201f73aab1340ac8cc8f050f742a419588e369 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
index ba1608effc0fe373a256cb22a094d90b983f394b..ac0d2ce3f870411b3a31495bfdcfa16f7e82705c 100644 (file)
@@ -147,8 +147,10 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
                obj = NULL;
 
        if (obj && drm_mode_object_lease_required(obj->type) &&
-           !_drm_lease_held(file_priv, obj->id))
+           !_drm_lease_held(file_priv, obj->id)) {
+               drm_dbg_kms(dev, "[OBJECT:%d] not included in lease", id);
                obj = NULL;
+       }
 
        if (obj && obj->free_cb) {
                if (!kref_get_unless_zero(&obj->refcount))
index f634371c717a856b2e66961a58df77a7bf5012ed..e814020bbcd3b3275d71174fe907c18ffeddefe0 100644 (file)
@@ -58,6 +58,8 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev,
                    const struct drm_panel_funcs *funcs, int connector_type)
 {
        INIT_LIST_HEAD(&panel->list);
+       INIT_LIST_HEAD(&panel->followers);
+       mutex_init(&panel->follower_lock);
        panel->dev = dev;
        panel->funcs = funcs;
        panel->connector_type = connector_type;
@@ -105,13 +107,38 @@ EXPORT_SYMBOL(drm_panel_remove);
  */
 int drm_panel_prepare(struct drm_panel *panel)
 {
+       struct drm_panel_follower *follower;
+       int ret;
+
        if (!panel)
                return -EINVAL;
 
-       if (panel->funcs && panel->funcs->prepare)
-               return panel->funcs->prepare(panel);
+       if (panel->prepared) {
+               dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
+               return 0;
+       }
+
+       mutex_lock(&panel->follower_lock);
 
-       return 0;
+       if (panel->funcs && panel->funcs->prepare) {
+               ret = panel->funcs->prepare(panel);
+               if (ret < 0)
+                       goto exit;
+       }
+       panel->prepared = true;
+
+       list_for_each_entry(follower, &panel->followers, list) {
+               ret = follower->funcs->panel_prepared(follower);
+               if (ret < 0)
+                       dev_info(panel->dev, "%ps failed: %d\n",
+                                follower->funcs->panel_prepared, ret);
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&panel->follower_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(drm_panel_prepare);
 
@@ -128,13 +155,38 @@ EXPORT_SYMBOL(drm_panel_prepare);
  */
 int drm_panel_unprepare(struct drm_panel *panel)
 {
+       struct drm_panel_follower *follower;
+       int ret;
+
        if (!panel)
                return -EINVAL;
 
-       if (panel->funcs && panel->funcs->unprepare)
-               return panel->funcs->unprepare(panel);
+       if (!panel->prepared) {
+               dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
+               return 0;
+       }
 
-       return 0;
+       mutex_lock(&panel->follower_lock);
+
+       list_for_each_entry(follower, &panel->followers, list) {
+               ret = follower->funcs->panel_unpreparing(follower);
+               if (ret < 0)
+                       dev_info(panel->dev, "%ps failed: %d\n",
+                                follower->funcs->panel_unpreparing, ret);
+       }
+
+       if (panel->funcs && panel->funcs->unprepare) {
+               ret = panel->funcs->unprepare(panel);
+               if (ret < 0)
+                       goto exit;
+       }
+       panel->prepared = false;
+
+       ret = 0;
+exit:
+       mutex_unlock(&panel->follower_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(drm_panel_unprepare);
 
@@ -155,11 +207,17 @@ int drm_panel_enable(struct drm_panel *panel)
        if (!panel)
                return -EINVAL;
 
+       if (panel->enabled) {
+               dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
+               return 0;
+       }
+
        if (panel->funcs && panel->funcs->enable) {
                ret = panel->funcs->enable(panel);
                if (ret < 0)
                        return ret;
        }
+       panel->enabled = true;
 
        ret = backlight_enable(panel->backlight);
        if (ret < 0)
@@ -187,13 +245,22 @@ int drm_panel_disable(struct drm_panel *panel)
        if (!panel)
                return -EINVAL;
 
+       if (!panel->enabled) {
+               dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
+               return 0;
+       }
+
        ret = backlight_disable(panel->backlight);
        if (ret < 0)
                DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
                             ret);
 
-       if (panel->funcs && panel->funcs->disable)
-               return panel->funcs->disable(panel);
+       if (panel->funcs && panel->funcs->disable) {
+               ret = panel->funcs->disable(panel);
+               if (ret < 0)
+                       return ret;
+       }
+       panel->enabled = false;
 
        return 0;
 }
@@ -305,6 +372,141 @@ int of_drm_get_panel_orientation(const struct device_node *np,
 EXPORT_SYMBOL(of_drm_get_panel_orientation);
 #endif
 
+/**
+ * drm_is_panel_follower() - Check if the device is a panel follower
+ * @dev: The 'struct device' to check
+ *
+ * This checks to see if a device needs to be power sequenced together with
+ * a panel using the panel follower API.
+ * At the moment panels can only be followed on device tree enabled systems.
+ * The "panel" property of the follower points to the panel to be followed.
+ *
+ * Return: true if we should be power sequenced with a panel; false otherwise.
+ */
+bool drm_is_panel_follower(struct device *dev)
+{
+       /*
+        * The "panel" property is actually a phandle, but for simplicity we
+        * don't bother trying to parse it here. We just need to know if the
+        * property is there.
+        */
+       return of_property_read_bool(dev->of_node, "panel");
+}
+EXPORT_SYMBOL(drm_is_panel_follower);
+
+/**
+ * drm_panel_add_follower() - Register something to follow panel state.
+ * @follower_dev: The 'struct device' for the follower.
+ * @follower:     The panel follower descriptor for the follower.
+ *
+ * A panel follower is called right after preparing the panel and right before
+ * unpreparing the panel. It's primary intention is to power on an associated
+ * touchscreen, though it could be used for any similar devices. Multiple
+ * devices are allowed the follow the same panel.
+ *
+ * If a follower is added to a panel that's already been turned on, the
+ * follower's prepare callback is called right away.
+ *
+ * At the moment panels can only be followed on device tree enabled systems.
+ * The "panel" property of the follower points to the panel to be followed.
+ *
+ * Return: 0 or an error code. Note that -ENODEV means that we detected that
+ *         follower_dev is not actually following a panel. The caller may
+ *         choose to ignore this return value if following a panel is optional.
+ */
+int drm_panel_add_follower(struct device *follower_dev,
+                          struct drm_panel_follower *follower)
+{
+       struct device_node *panel_np;
+       struct drm_panel *panel;
+       int ret;
+
+       panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
+       if (!panel_np)
+               return -ENODEV;
+
+       panel = of_drm_find_panel(panel_np);
+       of_node_put(panel_np);
+       if (IS_ERR(panel))
+               return PTR_ERR(panel);
+
+       get_device(panel->dev);
+       follower->panel = panel;
+
+       mutex_lock(&panel->follower_lock);
+
+       list_add_tail(&follower->list, &panel->followers);
+       if (panel->prepared) {
+               ret = follower->funcs->panel_prepared(follower);
+               if (ret < 0)
+                       dev_info(panel->dev, "%ps failed: %d\n",
+                                follower->funcs->panel_prepared, ret);
+       }
+
+       mutex_unlock(&panel->follower_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_add_follower);
+
+/**
+ * drm_panel_remove_follower() - Reverse drm_panel_add_follower().
+ * @follower:     The panel follower descriptor for the follower.
+ *
+ * Undo drm_panel_add_follower(). This includes calling the follower's
+ * unprepare function if we're removed from a panel that's currently prepared.
+ *
+ * Return: 0 or an error code.
+ */
+void drm_panel_remove_follower(struct drm_panel_follower *follower)
+{
+       struct drm_panel *panel = follower->panel;
+       int ret;
+
+       mutex_lock(&panel->follower_lock);
+
+       if (panel->prepared) {
+               ret = follower->funcs->panel_unpreparing(follower);
+               if (ret < 0)
+                       dev_info(panel->dev, "%ps failed: %d\n",
+                                follower->funcs->panel_unpreparing, ret);
+       }
+       list_del_init(&follower->list);
+
+       mutex_unlock(&panel->follower_lock);
+
+       put_device(panel->dev);
+}
+EXPORT_SYMBOL(drm_panel_remove_follower);
+
+static void drm_panel_remove_follower_void(void *follower)
+{
+       drm_panel_remove_follower(follower);
+}
+
+/**
+ * devm_drm_panel_add_follower() - devm version of drm_panel_add_follower()
+ * @follower_dev: The 'struct device' for the follower.
+ * @follower:     The panel follower descriptor for the follower.
+ *
+ * Handles calling drm_panel_remove_follower() using devm on the follower_dev.
+ *
+ * Return: 0 or an error code.
+ */
+int devm_drm_panel_add_follower(struct device *follower_dev,
+                               struct drm_panel_follower *follower)
+{
+       int ret;
+
+       ret = drm_panel_add_follower(follower_dev, follower);
+       if (ret)
+               return ret;
+
+       return devm_add_action_or_reset(follower_dev,
+                                       drm_panel_remove_follower_void, follower);
+}
+EXPORT_SYMBOL(devm_drm_panel_add_follower);
+
 #if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
 /**
  * drm_panel_of_backlight - use backlight device node for backlight
index c91e454eba097942c708db2d66edc0a972d7a2ba..5e95089676ff81ed70562d6828d786603742e6bf 100644 (file)
@@ -40,8 +40,8 @@
 /**
  * DOC: overview
  *
- * This helper library has two parts. The first part has support to implement
- * primary plane support on top of the normal CRTC configuration interface.
+ * This helper library contains helpers to implement primary plane support on
+ * top of the normal CRTC configuration interface.
  * Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
  * plane together with the CRTC state this does not allow userspace to disable
  * the primary plane itself. The default primary plane only expose XRBG8888 and
  * planes, and newly merged drivers must not rely upon these transitional
  * helpers.
  *
- * The second part also implements transitional helpers which allow drivers to
- * gradually switch to the atomic helper infrastructure for plane updates. Once
- * that switch is complete drivers shouldn't use these any longer, instead using
- * the proper legacy implementations for update and disable plane hooks provided
- * by the atomic helpers.
- *
- * Again drivers are strongly urged to switch to the new interfaces.
- *
  * The plane helpers share the function table structures with other helpers,
  * specifically also the atomic helpers. See &struct drm_plane_helper_funcs for
  * the details.
index d29dafce9bb0a38bab42574e1293fd2152c31df7..63b709a67471b9b6e33a9aafe85c191892426ffb 100644 (file)
@@ -51,15 +51,10 @@ MODULE_IMPORT_NS(DMA_BUF);
  * between applications, they can't be guessed like the globally unique GEM
  * names.
  *
- * Drivers that support the PRIME API implement the
- * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
- * GEM based drivers must use drm_gem_prime_handle_to_fd() and
- * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
- * actual driver interfaces is provided through the &drm_gem_object_funcs.export
- * and &drm_driver.gem_prime_import hooks.
- *
- * &dma_buf_ops implementations for GEM drivers are all individually exported
- * for drivers which need to overwrite or reimplement some of them.
+ * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
+ * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
+ * drivers are all individually exported for drivers which need to overwrite
+ * or reimplement some of them.
  *
  * Reference Counting for GEM Drivers
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -283,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_release);
 
-/**
+/*
  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
  * @dev: drm_device to import into
  * @file_priv: drm file-private structure
@@ -297,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
  *
  * Returns 0 on success or a negative error code on failure.
  */
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
-                              struct drm_file *file_priv, int prime_fd,
-                              uint32_t *handle)
+static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+                                     struct drm_file *file_priv, int prime_fd,
+                                     uint32_t *handle)
 {
        struct dma_buf *dma_buf;
        struct drm_gem_object *obj;
@@ -365,18 +360,18 @@ out_put:
        dma_buf_put(dma_buf);
        return ret;
 }
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
 
 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv)
 {
        struct drm_prime_handle *args = data;
 
-       if (!dev->driver->prime_fd_to_handle)
-               return -ENOSYS;
+       if (dev->driver->prime_fd_to_handle) {
+               return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
+                                                      &args->handle);
+       }
 
-       return dev->driver->prime_fd_to_handle(dev, file_priv,
-                       args->fd, &args->handle);
+       return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
 }
 
 static struct dma_buf *export_and_register_object(struct drm_device *dev,
@@ -413,7 +408,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
        return dmabuf;
 }
 
-/**
+/*
  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
  * @dev: dev to export the buffer from
  * @file_priv: drm file-private structure
@@ -426,10 +421,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
  * The actual exporting from GEM object to a dma-buf is done through the
  * &drm_gem_object_funcs.export callback.
  */
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
-                              struct drm_file *file_priv, uint32_t handle,
-                              uint32_t flags,
-                              int *prime_fd)
+static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+                                     struct drm_file *file_priv, uint32_t handle,
+                                     uint32_t flags,
+                                     int *prime_fd)
 {
        struct drm_gem_object *obj;
        int ret = 0;
@@ -511,22 +506,23 @@ out_unlock:
 
        return ret;
 }
-EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv)
 {
        struct drm_prime_handle *args = data;
 
-       if (!dev->driver->prime_handle_to_fd)
-               return -ENOSYS;
-
        /* check flags are valid */
        if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
                return -EINVAL;
 
-       return dev->driver->prime_handle_to_fd(dev, file_priv,
-                       args->handle, args->flags, &args->fd);
+       if (dev->driver->prime_handle_to_fd) {
+               return dev->driver->prime_handle_to_fd(dev, file_priv,
+                                                      args->handle, args->flags,
+                                                      &args->fd);
+       }
+       return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
+                                         args->flags, &args->fd);
 }
 
 /**
@@ -715,8 +711,6 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
  * called to set up the mapping.
- *
- * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
  */
 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
@@ -772,25 +766,15 @@ EXPORT_SYMBOL(drm_gem_prime_mmap);
  * @vma: virtual address range
  *
  * Provides memory mapping for the buffer. This can be used as the
- * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
- * which should be set to drm_gem_prime_mmap().
- *
- * FIXME: There's really no point to this wrapper, drivers which need anything
- * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
+ * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
  *
  * Returns 0 on success or a negative error code on failure.
  */
 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = dma_buf->priv;
-       struct drm_device *dev = obj->dev;
-
-       dma_resv_assert_held(dma_buf->resv);
-
-       if (!dev->driver->gem_prime_mmap)
-               return -ENOSYS;
 
-       return dev->driver->gem_prime_mmap(obj, vma);
+       return drm_gem_prime_mmap(obj, vma);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 
@@ -880,9 +864,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
  * @obj: GEM object to export
  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
  *
- * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
- * using the PRIME helpers. It is used as the default in
- * drm_gem_prime_handle_to_fd().
+ * This is the implementation of the &drm_gem_object_funcs.export functions
+ * for GEM drivers using the PRIME helpers. It is used as the default for
+ * drivers that do not set their own.
  */
 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
                                     int flags)
@@ -978,10 +962,9 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
  * @dev: drm_device to import into
  * @dma_buf: dma-buf object to import
  *
- * This is the implementation of the gem_prime_import functions for GEM drivers
- * using the PRIME helpers. Drivers can use this as their
- * &drm_driver.gem_prime_import implementation. It is used as the default
- * implementation in drm_gem_prime_fd_to_handle().
+ * This is the implementation of the gem_prime_import functions for GEM
+ * drivers using the PRIME helpers. It is the default for drivers that do
+ * not set their own &drm_driver.gem_prime_import.
  *
  * Drivers must arrange to call drm_prime_gem_destroy() from their
  * &drm_gem_object_funcs.free hook when using this function.
index 0c2be83605258474df0b5ff332bdc93a0a582e65..f7003d1ec5ef1e080b8ea7e5c2dc22e16ff95d0d 100644 (file)
  * requirement is inherited from the wait-before-signal behavior required by
  * the Vulkan timeline semaphore API.
  *
+ * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
+ * blocking: an eventfd will be signaled when the syncobj is. This is useful to
+ * integrate the wait in an event loop.
+ *
  *
  * Import/export of syncobjs
  * -------------------------
 
 #include <linux/anon_inodes.h>
 #include <linux/dma-fence-unwrap.h>
+#include <linux/eventfd.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/sched/signal.h>
@@ -212,6 +217,20 @@ struct syncobj_wait_entry {
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
                                      struct syncobj_wait_entry *wait);
 
+struct syncobj_eventfd_entry {
+       struct list_head node;
+       struct dma_fence *fence;
+       struct dma_fence_cb fence_cb;
+       struct drm_syncobj *syncobj;
+       struct eventfd_ctx *ev_fd_ctx;
+       u64 point;
+       u32 flags;
+};
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+                          struct syncobj_eventfd_entry *entry);
+
 /**
  * drm_syncobj_find - lookup and reference a sync object.
  * @file_private: drm file private pointer
@@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
        spin_unlock(&syncobj->lock);
 }
 
+static void
+syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
+{
+       eventfd_ctx_put(entry->ev_fd_ctx);
+       dma_fence_put(entry->fence);
+       /* This happens either inside the syncobj lock, or after the node has
+        * already been removed from the list.
+        */
+       list_del(&entry->node);
+       kfree(entry);
+}
+
+static void
+drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
+                       struct syncobj_eventfd_entry *entry)
+{
+       spin_lock(&syncobj->lock);
+       list_add_tail(&entry->node, &syncobj->ev_fd_list);
+       syncobj_eventfd_entry_func(syncobj, entry);
+       spin_unlock(&syncobj->lock);
+}
+
 /**
  * drm_syncobj_add_point - add new timeline point to the syncobj
  * @syncobj: sync object to add timeline point do
@@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
                           struct dma_fence *fence,
                           uint64_t point)
 {
-       struct syncobj_wait_entry *cur, *tmp;
+       struct syncobj_wait_entry *wait_cur, *wait_tmp;
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
        struct dma_fence *prev;
 
        dma_fence_get(fence);
@@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
        dma_fence_chain_init(chain, prev, fence, point);
        rcu_assign_pointer(syncobj->fence, &chain->base);
 
-       list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-               syncobj_wait_syncobj_func(syncobj, cur);
+       list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+               syncobj_wait_syncobj_func(syncobj, wait_cur);
+       list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+               syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
        spin_unlock(&syncobj->lock);
 
        /* Walk the chain once to trigger garbage collection */
@@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
                               struct dma_fence *fence)
 {
        struct dma_fence *old_fence;
-       struct syncobj_wait_entry *cur, *tmp;
+       struct syncobj_wait_entry *wait_cur, *wait_tmp;
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
 
        if (fence)
                dma_fence_get(fence);
@@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
        rcu_assign_pointer(syncobj->fence, fence);
 
        if (fence != old_fence) {
-               list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-                       syncobj_wait_syncobj_func(syncobj, cur);
+               list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+                       syncobj_wait_syncobj_func(syncobj, wait_cur);
+               list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+                       syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
        }
 
        spin_unlock(&syncobj->lock);
@@ -353,10 +400,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
  */
 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
 {
-       struct dma_fence *fence = dma_fence_allocate_private_stub();
+       struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
 
-       if (IS_ERR(fence))
-               return PTR_ERR(fence);
+       if (!fence)
+               return -ENOMEM;
 
        drm_syncobj_replace_fence(syncobj, fence);
        dma_fence_put(fence);
@@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref)
        struct drm_syncobj *syncobj = container_of(kref,
                                                   struct drm_syncobj,
                                                   refcount);
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
+
        drm_syncobj_replace_fence(syncobj, NULL);
+
+       list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+               syncobj_eventfd_entry_free(ev_fd_cur);
+
        kfree(syncobj);
 }
 EXPORT_SYMBOL(drm_syncobj_free);
@@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 
        kref_init(&syncobj->refcount);
        INIT_LIST_HEAD(&syncobj->cb_list);
+       INIT_LIST_HEAD(&syncobj->ev_fd_list);
        spin_lock_init(&syncobj->lock);
 
        if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
@@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
        return ret;
 }
 
+static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
+                                            struct dma_fence_cb *cb)
+{
+       struct syncobj_eventfd_entry *entry =
+               container_of(cb, struct syncobj_eventfd_entry, fence_cb);
+
+       eventfd_signal(entry->ev_fd_ctx, 1);
+       syncobj_eventfd_entry_free(entry);
+}
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+                          struct syncobj_eventfd_entry *entry)
+{
+       int ret;
+       struct dma_fence *fence;
+
+       /* This happens inside the syncobj lock */
+       fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+       ret = dma_fence_chain_find_seqno(&fence, entry->point);
+       if (ret != 0 || !fence) {
+               dma_fence_put(fence);
+               return;
+       }
+
+       list_del_init(&entry->node);
+       entry->fence = fence;
+
+       if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
+               eventfd_signal(entry->ev_fd_ctx, 1);
+               syncobj_eventfd_entry_free(entry);
+       } else {
+               ret = dma_fence_add_callback(fence, &entry->fence_cb,
+                                            syncobj_eventfd_entry_fence_func);
+               if (ret == -ENOENT) {
+                       eventfd_signal(entry->ev_fd_ctx, 1);
+                       syncobj_eventfd_entry_free(entry);
+               }
+       }
+}
+
+int
+drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_private)
+{
+       struct drm_syncobj_eventfd *args = data;
+       struct drm_syncobj *syncobj;
+       struct eventfd_ctx *ev_fd_ctx;
+       struct syncobj_eventfd_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+
+       if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
+               return -EINVAL;
+
+       if (args->pad)
+               return -EINVAL;
+
+       syncobj = drm_syncobj_find(file_private, args->handle);
+       if (!syncobj)
+               return -ENOENT;
+
+       ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+       if (IS_ERR(ev_fd_ctx))
+               return PTR_ERR(ev_fd_ctx);
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               eventfd_ctx_put(ev_fd_ctx);
+               return -ENOMEM;
+       }
+       entry->syncobj = syncobj;
+       entry->ev_fd_ctx = ev_fd_ctx;
+       entry->point = args->point;
+       entry->flags = args->flags;
+
+       drm_syncobj_add_eventfd(syncobj, entry);
+       drm_syncobj_put(syncobj);
+
+       return 0;
+}
 
 int
 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
index f62767ff34b21f514a75f0e9f50fc5e4f36f5a66..b169b3e44a921b9c9e988c3cb6d34c1a378346cf 100644 (file)
@@ -487,17 +487,17 @@ void drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
 EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event);
 
 /**
- * drm_sysfs_connector_status_event - generate a DRM uevent for connector
- * property status change
- * @connector: connector on which property status changed
- * @property: connector property whose status changed.
+ * drm_sysfs_connector_property_event - generate a DRM uevent for connector
+ * property change
+ * @connector: connector on which property changed
+ * @property: connector property which has changed.
  *
- * Send a uevent for the DRM device specified by @dev.  Currently we
+ * Send a uevent for the specified DRM connector and property.  Currently we
  * set HOTPLUG=1 and connector id along with the attached property id
- * related to the status change.
+ * related to the change.
  */
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
-                                     struct drm_property *property)
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+                                       struct drm_property *property)
 {
        struct drm_device *dev = connector->dev;
        char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21];
@@ -511,11 +511,14 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
        snprintf(prop_id, ARRAY_SIZE(prop_id),
                 "PROPERTY=%u", property->base.id);
 
-       DRM_DEBUG("generating connector status event\n");
+       drm_dbg_kms(connector->dev,
+                   "[CONNECTOR:%d:%s] generating connector property event for [PROP:%d:%s]\n",
+                   connector->base.id, connector->name,
+                   property->base.id, property->name);
 
        kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 }
-EXPORT_SYMBOL(drm_sysfs_connector_status_event);
+EXPORT_SYMBOL(drm_sysfs_connector_property_event);
 
 struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
 {
index 31a7f59ccb49ee7c552e58991c444dd8754e8582..ea55f6b7b744a755d421fbd471e914f49346bb14 100644 (file)
@@ -481,10 +481,7 @@ static const struct drm_driver etnaviv_drm_driver = {
        .driver_features    = DRIVER_GEM | DRIVER_RENDER,
        .open               = etnaviv_open,
        .postclose           = etnaviv_postclose,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
-       .gem_prime_mmap     = drm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = etnaviv_debugfs_init,
 #endif
index de8c9894967c05908b18c9d358d39419ab474780..bbc9c54871f43cd4c188c38190649bba31ecda1a 100644 (file)
@@ -8,8 +8,8 @@
 #include <linux/delay.h>
 #include <linux/dma-fence.h>
 #include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
index 7ca7e1dab52c3ecbd16badcf0c21a85b7a84e2e2..733b109a509525f7c17bd69fe2e534073cb796e8 100644 (file)
@@ -7,7 +7,7 @@ config DRM_EXYNOS
        select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
        select DRM_KMS_HELPER
        select VIDEOMODE_HELPERS
-       select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+       select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
        select SND_SOC_HDMI_CODEC if SND_SOC
        help
          Choose this option if you have a Samsung SoC Exynos chipset.
index 2867b39fa35e386f8d4e1df43a411978e2bda7d2..4d986077738b9b61caad8720543137d6c2424bc1 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index 3126f735dedcbbce35f3e6529e062a841a66ed02..0156a5e9443594711d9286f25e0d9ca747a02e7d 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
index 4153f302de7c45a1351e91555e249292f078080d..d19e796c2061335e8017898067d3205e29b7230d 100644 (file)
@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
        if (exynos_crtc->ops->atomic_disable)
                exynos_crtc->ops->atomic_disable(exynos_crtc);
 
+       spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event && !crtc->state->active) {
-               spin_lock_irq(&crtc->dev->event_lock);
                drm_crtc_send_vblank_event(crtc, crtc->state->event);
-               spin_unlock_irq(&crtc->dev->event_lock);
-
                crtc->state->event = NULL;
        }
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
 static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
index 6b73fb7a83c3c98901a291dd7856b6fe7d2d6e87..8399256cb5c9d78b474cf3a7771d2c477160e10c 100644 (file)
@@ -109,11 +109,8 @@ static const struct drm_driver exynos_drm_driver = {
        .open                   = exynos_drm_open,
        .postclose              = exynos_drm_postclose,
        .dumb_create            = exynos_drm_gem_dumb_create,
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import       = exynos_drm_gem_prime_import,
        .gem_prime_import_sg_table      = exynos_drm_gem_prime_import_sg_table,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
        .ioctls                 = exynos_ioctls,
        .num_ioctls             = ARRAY_SIZE(exynos_ioctls),
        .fops                   = &exynos_drm_driver_fops,
index fc81f728e6baba1ab34fb0daffade9f3d3d5c757..69ea33cae65162afb9b5bb151682f432d5c4523b 100644 (file)
@@ -8,7 +8,8 @@
  */
 
 #include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 
 #include <drm/bridge/samsung-dsim.h>
 #include <drm/drm_probe_helper.h>
index fdf65587f1fe015868b2095cd120a27e5db596e4..a379c8ca435a334d52ae4d8c7adf17d2a76fa246 100644 (file)
@@ -49,9 +49,9 @@ static void exynos_drm_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops exynos_drm_fb_ops = {
        .owner          = THIS_MODULE,
-       __FB_DEFAULT_IO_OPS_RDWR,
+       __FB_DEFAULT_DMAMEM_OPS_RDWR,
        DRM_FB_HELPER_DEFAULT_OPS,
-       __FB_DEFAULT_IO_OPS_DRAW,
+       __FB_DEFAULT_DMAMEM_OPS_DRAW,
        .fb_mmap        = exynos_drm_fb_mmap,
        .fb_destroy     = exynos_drm_fb_destroy,
 };
@@ -79,6 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        offset = fbi->var.xoffset * fb->format->cpp[0];
        offset += fbi->var.yoffset * fb->pitches[0];
 
+       fbi->flags |= FBINFO_VIRTFB;
        fbi->screen_buffer = exynos_gem->kvaddr + offset;
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
@@ -215,10 +216,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
        if (ret)
                goto err_drm_client_init;
 
-       ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 7f4a0be03dd1c273e79aeefec97323cc6bcc7f23..8dde7b1e9b35d996f34afadb75701d4bc7cf5f62 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index 964dceb28c1eb880901da7b30220383f630592b2..34cdabc30b4f5ee62fd788923cbf335bb4af9409 100644 (file)
@@ -1426,6 +1426,6 @@ struct platform_driver gsc_driver = {
                .name   = "exynos-drm-gsc",
                .owner  = THIS_MODULE,
                .pm     = &gsc_pm_ops,
-               .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
+               .of_match_table = exynos_drm_gsc_of_match,
        },
 };
index 8706f377c349d5de6a71254484272180b2a8ab49..ffb327c5139ece0762142039d75947472d9cb60f 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
index 20608e9780cea509a8e3908f6e3ea35ad3b5b20f..f2b8b09a6b4ecaf5a9bc78663a2db1d9d2244f24 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
index b7c11bdce2c89a9fabb991a26f203416f1d15fb0..f3aaa4ea3e68208b1f6bfccaf49a2d646e43ded1 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
index 8d333db813b7d180a5d668f29b29a826e7d362fd..b302392ff0d7fbd7ee707ba1364b7e44ec74ffa8 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/kernel.h>
 #include <linux/ktime.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
index c09ba019ba5ecf0c8ccf4ee3a251a8decc3a246c..a395f93449f36813bef8433f16583e04f451d0cb 100644 (file)
@@ -346,7 +346,7 @@ disable_clk:
        return ret;
 }
 
-static int fsl_dcu_drm_remove(struct platform_device *pdev)
+static void fsl_dcu_drm_remove(struct platform_device *pdev)
 {
        struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
 
@@ -354,13 +354,11 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
        drm_dev_put(fsl_dev->drm);
        clk_disable_unprepare(fsl_dev->clk);
        clk_unregister(fsl_dev->pix_clk);
-
-       return 0;
 }
 
 static struct platform_driver fsl_dcu_drm_platform_driver = {
        .probe          = fsl_dcu_drm_probe,
-       .remove         = fsl_dcu_drm_remove,
+       .remove_new     = fsl_dcu_drm_remove,
        .driver         = {
                .name   = "fsl-dcu",
                .pm     = &fsl_dcu_drm_pm_ops,
index cd3d92725ed48f6231ab7f6bae456d84f0a32c1d..efb4a2dd2f80885cb59c925d09401002278d7d61 100644 (file)
@@ -3,7 +3,7 @@ config DRM_GMA500
        tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
        depends on DRM && PCI && X86 && MMU
        select DRM_KMS_HELPER
-       select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+       select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
        select I2C
        select I2C_ALGOBIT
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
index 955cbe9f05a72eab99a671bc456a0ee19d6ee22e..98b44974d42dd0e4f7ad87f1bc08345e468e065f 100644 (file)
@@ -135,10 +135,10 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops psb_fbdev_fb_ops = {
        .owner = THIS_MODULE,
-       __FB_DEFAULT_IO_OPS_RDWR,
+       __FB_DEFAULT_IOMEM_OPS_RDWR,
        DRM_FB_HELPER_DEFAULT_OPS,
        .fb_setcolreg = psb_fbdev_fb_setcolreg,
-       __FB_DEFAULT_IO_OPS_DRAW,
+       __FB_DEFAULT_IOMEM_OPS_DRAW,
        .fb_mmap = psb_fbdev_fb_mmap,
        .fb_destroy = psb_fbdev_fb_destroy,
 };
@@ -215,7 +215,7 @@ static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
        }
 
        info->fbops = &psb_fbdev_fb_ops;
-       info->flags = FBINFO_DEFAULT;
+
        /* Accessed stolen memory directly */
        info->screen_base = dev_priv->vram_addr + backing->offset;
        info->screen_size = size;
@@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
                goto err_drm_fb_helper_unprepare;
        }
 
-       ret = psb_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index dc16a92625d454d56fdf6e7c7f6ccf81bfb33422..d2f199ea3c111c532bbb609c684858332908f835 100644 (file)
@@ -390,7 +390,7 @@ static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *
        mutex_lock(&gdrm->damage_lock);
 
        if (!gdrm->shadow_buf) {
-               gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
+               gdrm->shadow_buf = vcalloc(fb->pitches[0], fb->height);
                if (!gdrm->shadow_buf) {
                        mutex_unlock(&gdrm->damage_lock);
                        return -ENOMEM;
index 0c4aa4d9b0a77cfed44b212fa6212966ea3cb42a..8a98fa276e8a9d9d21d05ed087ac51a1da5d3cfa 100644 (file)
@@ -63,7 +63,6 @@ static const struct drm_driver hibmc_driver = {
        .debugfs_init           = drm_vram_mm_debugfs_init,
        .dumb_create            = hibmc_dumb_create,
        .dumb_map_offset        = drm_gem_ttm_dumb_map_offset,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
 };
 
 static int __maybe_unused hibmc_pm_suspend(struct device *dev)
index d9978b79828c17b4bd5bc0ac3b6e7e15789542a1..566de46587195851d44c6c3fa0319f9f007d7867 100644 (file)
@@ -874,14 +874,12 @@ static int dsi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int dsi_remove(struct platform_device *pdev)
+static void dsi_remove(struct platform_device *pdev)
 {
        struct dsi_data *data = platform_get_drvdata(pdev);
        struct dw_dsi *dsi = &data->dsi;
 
        mipi_dsi_host_unregister(&dsi->host);
-
-       return 0;
 }
 
 static const struct of_device_id dsi_of_match[] = {
@@ -892,7 +890,7 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
 
 static struct platform_driver dsi_driver = {
        .probe = dsi_probe,
-       .remove = dsi_remove,
+       .remove_new = dsi_remove,
        .driver = {
                .name = "dw-dsi",
                .of_match_table = dsi_of_match,
index 9c5d49bf40c97ef2ee791c3c83c77449ddeffc1e..e8c77bcc6dae91e7cb13421d994832752a6e24e7 100644 (file)
@@ -11,9 +11,9 @@
  *     Xinwei Kong <kong.kongxinwei@hisilicon.com>
  */
 
-#include <linux/of_platform.h>
 #include <linux/component.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 
@@ -279,10 +279,9 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
        return component_master_add_with_match(dev, &kirin_drm_ops, match);
 }
 
-static int kirin_drm_platform_remove(struct platform_device *pdev)
+static void kirin_drm_platform_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &kirin_drm_ops);
-       return 0;
 }
 
 static const struct of_device_id kirin_drm_dt_ids[] = {
@@ -295,7 +294,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
 
 static struct platform_driver kirin_drm_platform_driver = {
        .probe = kirin_drm_platform_probe,
-       .remove = kirin_drm_platform_remove,
+       .remove_new = kirin_drm_platform_remove,
        .driver = {
                .name = "kirin-drm",
                .of_match_table = kirin_drm_dt_ids,
index a7d2c92d6c6a033b36a8c97b431538df62544d14..8026118c6e0330f49fa3851cfaf0b2dd6ec37349 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/hyperv.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/screen_info.h>
 
 #include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
index 521bdf656cca2df257f0c38acbe21dce756636e0..131512a5f3bd996ad1e2eb869ffa09837daba0c7 100644 (file)
@@ -497,7 +497,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
 
 static struct drm_i2c_encoder_driver ch7006_driver = {
        .i2c_driver = {
-               .probe_new = ch7006_probe,
+               .probe = ch7006_probe,
                .remove = ch7006_remove,
 
                .driver = {
index f57f9a8075429fea54174be4c55bd112af8a09c8..ff23422727fce290a188e495d343e32bc2c373ec 100644 (file)
@@ -420,7 +420,7 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
 
 static struct drm_i2c_encoder_driver sil164_driver = {
        .i2c_driver = {
-               .probe_new = sil164_probe,
+               .probe = sil164_probe,
                .driver = {
                        .name = "sil164",
                },
index 01b5a8272a271afccb24390bea2e4c6ed81ed821..ce397a8797f7b6356aa22184f78eeb12db0b73ac 100644 (file)
@@ -17,7 +17,7 @@ config DRM_I915
        select DRM_KMS_HELPER
        select DRM_PANEL
        select DRM_MIPI_DSI
-       select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+       select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
        select RELAY
        select I2C
        select I2C_ALGOBIT
index 789dce9e26088c25b2cd58d76de865ad9378295f..79f65eff6bb2a381a304e5d8f751c3e655e668be 100644 (file)
@@ -23,6 +23,11 @@ subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
 subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
 
+# Fine grained warnings disable
+CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
+CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
+CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
+
 subdir-ccflags-y += -I$(srctree)/$(src)
 
 # Please keep these build lists sorted!
@@ -127,6 +132,7 @@ gt-y += \
        gt/intel_sseu.o \
        gt/intel_sseu_debugfs.o \
        gt/intel_timeline.o \
+       gt/intel_tlb.o \
        gt/intel_wopcm.o \
        gt/intel_workarounds.o \
        gt/shmem_utils.o \
@@ -192,7 +198,8 @@ i915-y += \
          gt/uc/intel_gsc_fw.o \
          gt/uc/intel_gsc_proxy.o \
          gt/uc/intel_gsc_uc.o \
-         gt/uc/intel_gsc_uc_heci_cmd_submit.o\
+         gt/uc/intel_gsc_uc_debugfs.o \
+         gt/uc/intel_gsc_uc_heci_cmd_submit.o \
          gt/uc/intel_guc.o \
          gt/uc/intel_guc_ads.o \
          gt/uc/intel_guc_capture.o \
index 112d91d81fdc4c3585d9472a9c02f97a0bfb172f..4c7187f7913ea52b53a52d53539ffdb00451cf01 100644 (file)
@@ -1259,6 +1259,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
        struct drm_encoder *encoder;
        struct intel_connector *intel_connector;
 
+       if (!assert_port_valid(dev_priv, port))
+               return false;
+
        devdata = intel_bios_encoder_data_lookup(dev_priv, port);
 
        /* FIXME bail? */
@@ -1270,6 +1273,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
        if (!dig_port)
                return false;
 
+       dig_port->aux_ch = AUX_CH_NONE;
+
        intel_connector = intel_connector_alloc();
        if (!intel_connector)
                goto err_connector_alloc;
@@ -1373,6 +1378,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
                intel_infoframe_init(dig_port);
 
        dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
+       if (dig_port->aux_ch == AUX_CH_NONE)
+               goto err_init_connector;
+
        if (!intel_dp_init_connector(dig_port, intel_connector))
                goto err_init_connector;
 
index 5c187e6e0472ae4584649acb6b0ef33c772fc1df..634b14116d9dda13ea03c0f09f2419c5d50851e1 100644 (file)
@@ -659,6 +659,20 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
        return ret;
 }
 
+static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+{
+       if (IS_G4X(i915) || IS_VALLEYVIEW(i915))
+               return port == PORT_B || port == PORT_C;
+       else
+               return port == PORT_B || port == PORT_C || port == PORT_D;
+}
+
+static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+{
+       return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port),
+                        "Platform does not support HDMI %c\n", port_name(port));
+}
+
 void g4x_hdmi_init(struct drm_i915_private *dev_priv,
                   i915_reg_t hdmi_reg, enum port port)
 {
@@ -667,6 +681,12 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
 
+       if (!assert_port_valid(dev_priv, port))
+               return;
+
+       if (!assert_hdmi_port_valid(dev_priv, port))
+               return;
+
        devdata = intel_bios_encoder_data_lookup(dev_priv, port);
 
        /* FIXME bail? */
@@ -678,6 +698,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
        if (!dig_port)
                return;
 
+       dig_port->aux_ch = AUX_CH_NONE;
+
        intel_connector = intel_connector_alloc();
        if (!intel_connector) {
                kfree(dig_port);
@@ -753,6 +775,5 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
 
        intel_infoframe_init(dig_port);
 
-       dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
        intel_hdmi_init_connector(dig_port, intel_connector);
 }
index c133928a0655c72d435270236fd7b839cbf949b8..ad6488e9c2b2b8b97bb45c9d70913214ed412551 100644 (file)
@@ -444,7 +444,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
                intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
 
                /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
-               if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
+               if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+                   (DISPLAY_VER(dev_priv) >= 12)) {
                        intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
                                     LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
 
@@ -528,31 +529,16 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
        enum port port;
        enum phy phy;
 
-       /* Program T-INIT master registers */
-       for_each_dsi_port(port, intel_dsi->ports)
-               intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
-                            DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
-
        /* Program DPHY clock lanes timings */
-       for_each_dsi_port(port, intel_dsi->ports) {
+       for_each_dsi_port(port, intel_dsi->ports)
                intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
                               intel_dsi->dphy_reg);
 
-               /* shadow register inside display core */
-               intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
-                              intel_dsi->dphy_reg);
-       }
-
        /* Program DPHY data lanes timings */
-       for_each_dsi_port(port, intel_dsi->ports) {
+       for_each_dsi_port(port, intel_dsi->ports)
                intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
                               intel_dsi->dphy_data_lane_reg);
 
-               /* shadow register inside display core */
-               intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
-                              intel_dsi->dphy_data_lane_reg);
-       }
-
        /*
         * If DSI link operating at or below an 800 MHz,
         * TA_SURE should be override and programmed to
@@ -561,26 +547,55 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
         */
        if (DISPLAY_VER(dev_priv) == 11) {
                if (afe_clk(encoder, crtc_state) <= 800000) {
-                       for_each_dsi_port(port, intel_dsi->ports) {
+                       for_each_dsi_port(port, intel_dsi->ports)
                                intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
                                             TA_SURE_MASK,
                                             TA_SURE_OVERRIDE | TA_SURE(0));
-
-                               /* shadow register inside display core */
-                               intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
-                                            TA_SURE_MASK,
-                                            TA_SURE_OVERRIDE | TA_SURE(0));
-                       }
                }
        }
 
-       if (IS_JSL_EHL(dev_priv)) {
+       if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                for_each_dsi_phy(phy, intel_dsi->phys)
                        intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
                                     0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
        }
 }
 
+static void
+gen11_dsi_setup_timings(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       enum port port;
+
+       /* Program T-INIT master registers */
+       for_each_dsi_port(port, intel_dsi->ports)
+               intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+                            DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
+
+       /* shadow register inside display core */
+       for_each_dsi_port(port, intel_dsi->ports)
+               intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+                              intel_dsi->dphy_reg);
+
+       /* shadow register inside display core */
+       for_each_dsi_port(port, intel_dsi->ports)
+               intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+                              intel_dsi->dphy_data_lane_reg);
+
+       /* shadow register inside display core */
+       if (DISPLAY_VER(dev_priv) == 11) {
+               if (afe_clk(encoder, crtc_state) <= 800000) {
+                       for_each_dsi_port(port, intel_dsi->ports) {
+                               intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+                                            TA_SURE_MASK,
+                                            TA_SURE_OVERRIDE | TA_SURE(0));
+                       }
+               }
+       }
+}
+
 static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1090,11 +1105,15 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
        /* step 4c: configure voltage swing and skew */
        gen11_dsi_voltage_swing_program_seq(encoder);
 
+       /* setup D-PHY timings */
+       gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+
        /* enable DDI buffer */
        gen11_dsi_enable_ddi_buffer(encoder);
 
-       /* setup D-PHY timings */
-       gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+       gen11_dsi_gate_clocks(encoder);
+
+       gen11_dsi_setup_timings(encoder, crtc_state);
 
        /* Since transcoder is configured to take events from GPIO */
        gen11_dsi_config_util_pin(encoder, true);
@@ -1104,9 +1123,6 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
 
        /* Step (4h, 4i, 4j, 4k): Configure transcoder */
        gen11_dsi_configure_transcoder(encoder, crtc_state);
-
-       /* Step 4l: Gate DDI clocks */
-       gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1138,12 +1154,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
                                "error setting max return pkt size%d\n", tmp);
        }
 
-       /* panel power on related mipi dsi vbt sequences */
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
-       msleep(intel_dsi->panel_on_delay);
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 
        /* ensure all panel commands dispatched before enabling transcoder */
        wait_for_cmds_dispatched_to_panel(encoder);
@@ -1154,6 +1165,14 @@ static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
                                     const struct intel_crtc_state *crtc_state,
                                     const struct drm_connector_state *conn_state)
 {
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+       intel_dsi_wait_panel_power_cycle(intel_dsi);
+
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+       msleep(intel_dsi->panel_on_delay);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+
        /* step2: enable IO power */
        gen11_dsi_enable_io_power(encoder);
 
@@ -1225,9 +1244,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
                             const struct drm_connector_state *conn_state)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
-
-       drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
        /* Wa_1409054076:icl,jsl,ehl */
        icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
@@ -1238,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
        /* step6d: enable dsi transcoder */
        gen11_dsi_enable_transcoder(encoder);
 
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
        /* step7: enable backlight */
        intel_backlight_enable(crtc_state, conn_state);
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
@@ -1271,8 +1290,6 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
 
        /* ensure cmds dispatched to panel */
        wait_for_cmds_dispatched_to_panel(encoder);
@@ -1373,11 +1390,21 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
                              const struct drm_connector_state *old_conn_state)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc);
 
        /* step1: turn off backlight */
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
        intel_backlight_disable(old_conn_state);
+}
+
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+                                  struct intel_encoder *encoder,
+                                  const struct intel_crtc_state *old_crtc_state,
+                                  const struct drm_connector_state *old_conn_state)
+{
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+
+       intel_crtc_vblank_off(old_crtc_state);
 
        /* step2d,e: disable transcoder and wait */
        gen11_dsi_disable_transcoder(encoder);
@@ -1391,6 +1418,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
        /* step2h,i,j: deconfig trancoder */
        gen11_dsi_deconfigure_trancoder(encoder);
 
+       intel_dsc_disable(old_crtc_state);
+       skl_scaler_disable(old_crtc_state);
+
        /* step3: disable port */
        gen11_dsi_disable_port(encoder);
 
@@ -1398,18 +1428,13 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
 
        /* step4: disable IO power */
        gen11_dsi_disable_io_power(encoder);
-}
 
-static void gen11_dsi_post_disable(struct intel_atomic_state *state,
-                                  struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *old_crtc_state,
-                                  const struct drm_connector_state *old_conn_state)
-{
-       intel_crtc_vblank_off(old_crtc_state);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
 
-       intel_dsc_disable(old_crtc_state);
+       msleep(intel_dsi->panel_off_delay);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
 
-       skl_scaler_disable(old_crtc_state);
+       intel_dsi->panel_power_off_time = ktime_get_boottime();
 }
 
 static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
@@ -1909,7 +1934,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
                                                       fixed_mode->vdisplay);
 }
 
-void icl_dsi_init(struct drm_i915_private *dev_priv)
+void icl_dsi_init(struct drm_i915_private *dev_priv,
+                 const struct intel_bios_encoder_data *devdata)
 {
        struct intel_dsi *intel_dsi;
        struct intel_encoder *encoder;
@@ -1917,7 +1943,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
        struct drm_connector *connector;
        enum port port;
 
-       if (!intel_bios_is_dsi_present(dev_priv, &port))
+       port = intel_bios_encoder_port(devdata);
+       if (port == PORT_NONE)
                return;
 
        intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
@@ -1934,6 +1961,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
        intel_dsi->attached_connector = intel_connector;
        connector = &intel_connector->base;
 
+       encoder->devdata = devdata;
+
        /* register DSI encoder with DRM subsystem */
        drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs,
                         DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
@@ -1957,6 +1986,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
        encoder->get_power_domains = gen11_dsi_get_power_domains;
        encoder->disable_clock = gen11_dsi_gate_clocks;
        encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
+       encoder->shutdown = intel_dsi_shutdown;
 
        /* register DSI connector with DRM subsystem */
        drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs,
@@ -1968,7 +1998,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
        /* attach connector to encoder */
        intel_connector_attach_encoder(intel_connector, encoder);
 
-       encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+       intel_dsi->panel_power_off_time = ktime_get_boottime();
+
        intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
 
        mutex_lock(&dev_priv->drm.mode_config.mutex);
index b4861b56b5b2a85164c8794d53d81c09967a805d..43fa7d72eeb1800ea74d6f55b7f0b8aca1ca6980 100644 (file)
@@ -7,9 +7,11 @@
 #define __ICL_DSI_H__
 
 struct drm_i915_private;
+struct intel_bios_encoder_data;
 struct intel_crtc_state;
 
-void icl_dsi_init(struct drm_i915_private *i915);
+void icl_dsi_init(struct drm_i915_private *dev_priv,
+                 const struct intel_bios_encoder_data *devdata);
 void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
 
 #endif /* __ICL_DSI_H__ */
index 7d9578ebae556665aa798b8d09d586574ce622db..60a492e186ab836d42fe9a6cebe7e3cf9807d440 100644 (file)
@@ -212,6 +212,7 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
        struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
        const struct drm_framebuffer *fb = plane_state->hw.fb;
        int width, height;
+       unsigned int rel_data_rate;
 
        if (plane->id == PLANE_CURSOR)
                return 0;
@@ -241,7 +242,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
                height /= 2;
        }
 
-       return width * height * fb->format->cpp[color_plane];
+       rel_data_rate = width * height * fb->format->cpp[color_plane];
+
+       return intel_adjusted_rate(&plane_state->uapi.src,
+                                  &plane_state->uapi.dst,
+                                  rel_data_rate);
 }
 
 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
index 34a397adbd6b05529d6182d27c782081a3b68e43..858c959f7babfffbfae06f10057f6f463a9e4ae1 100644 (file)
@@ -2230,122 +2230,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
        return 0;
 }
 
-static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
-{
-       enum port port;
-
-       if (!ddc_pin)
-               return PORT_NONE;
-
-       for_each_port(port) {
-               const struct intel_bios_encoder_data *devdata =
-                       i915->display.vbt.ports[port];
-
-               if (devdata && ddc_pin == devdata->child.ddc_pin)
-                       return port;
-       }
-
-       return PORT_NONE;
-}
-
-static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
-                            enum port port)
-{
-       struct drm_i915_private *i915 = devdata->i915;
-       struct child_device_config *child;
-       u8 mapped_ddc_pin;
-       enum port p;
-
-       if (!devdata->child.ddc_pin)
-               return;
-
-       mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin);
-       if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) {
-               drm_dbg_kms(&i915->drm,
-                           "Port %c has invalid DDC pin %d, "
-                           "sticking to defaults\n",
-                           port_name(port), mapped_ddc_pin);
-               devdata->child.ddc_pin = 0;
-               return;
-       }
-
-       p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin);
-       if (p == PORT_NONE)
-               return;
-
-       drm_dbg_kms(&i915->drm,
-                   "port %c trying to use the same DDC pin (0x%x) as port %c, "
-                   "disabling port %c DVI/HDMI support\n",
-                   port_name(port), mapped_ddc_pin,
-                   port_name(p), port_name(p));
-
-       /*
-        * If we have multiple ports supposedly sharing the pin, then dvi/hdmi
-        * couldn't exist on the shared port. Otherwise they share the same ddc
-        * pin and system couldn't communicate with them separately.
-        *
-        * Give inverse child device order the priority, last one wins. Yes,
-        * there are real machines (eg. Asrock B250M-HDV) where VBT has both
-        * port A and port E with the same AUX ch and we must pick port E :(
-        */
-       child = &i915->display.vbt.ports[p]->child;
-
-       child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
-       child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
-
-       child->ddc_pin = 0;
-}
-
-static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
-{
-       enum port port;
-
-       if (!aux_ch)
-               return PORT_NONE;
-
-       for_each_port(port) {
-               const struct intel_bios_encoder_data *devdata =
-                       i915->display.vbt.ports[port];
-
-               if (devdata && aux_ch == devdata->child.aux_channel)
-                       return port;
-       }
-
-       return PORT_NONE;
-}
-
-static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
-                           enum port port)
-{
-       struct drm_i915_private *i915 = devdata->i915;
-       struct child_device_config *child;
-       enum port p;
-
-       p = get_port_by_aux_ch(i915, devdata->child.aux_channel);
-       if (p == PORT_NONE)
-               return;
-
-       drm_dbg_kms(&i915->drm,
-                   "port %c trying to use the same AUX CH (0x%x) as port %c, "
-                   "disabling port %c DP support\n",
-                   port_name(port), devdata->child.aux_channel,
-                   port_name(p), port_name(p));
-
-       /*
-        * If we have multiple ports supposedly sharing the aux channel, then DP
-        * couldn't exist on the shared port. Otherwise they share the same aux
-        * channel and system couldn't communicate with them separately.
-        *
-        * Give inverse child device order the priority, last one wins. Yes,
-        * there are real machines (eg. Asrock B250M-HDV) where VBT has both
-        * port A and port E with the same AUX ch and we must pick port E :(
-        */
-       child = &i915->display.vbt.ports[p]->child;
-
-       child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
-       child->aux_channel = 0;
-}
-
 static u8 dvo_port_type(u8 dvo_port)
 {
        switch (dvo_port) {
@@ -2490,6 +2374,19 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
        }
 }
 
+enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata)
+{
+       struct drm_i915_private *i915 = devdata->i915;
+       const struct child_device_config *child = &devdata->child;
+       enum port port;
+
+       port = dvo_port_to_port(i915, child->dvo_port);
+       if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+               port = dsi_dvo_port_to_port(i915, child->dvo_port);
+
+       return port;
+}
+
 static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
 {
        switch (vbt_max_link_rate) {
@@ -2600,7 +2497,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
                devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
 }
 
-static bool
+bool
 intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
 {
        return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
@@ -2615,7 +2512,8 @@ intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
 /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
 int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
 {
-       if (!devdata || devdata->i915->display.vbt.version < 158)
+       if (!devdata || devdata->i915->display.vbt.version < 158 ||
+           DISPLAY_VER(devdata->i915) >= 14)
                return -1;
 
        return devdata->child.hdmi_level_shifter_value;
@@ -2658,13 +2556,17 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
        return true;
 }
 
-static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
-                          enum port port)
+static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
 {
        struct drm_i915_private *i915 = devdata->i915;
        const struct child_device_config *child = &devdata->child;
        bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
        int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
+       enum port port;
+
+       port = intel_bios_encoder_port(devdata);
+       if (port == PORT_NONE)
+               return;
 
        is_dvi = intel_bios_encoder_supports_dvi(devdata);
        is_dp = intel_bios_encoder_supports_dp(devdata);
@@ -2728,12 +2630,9 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
 static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
 {
        struct drm_i915_private *i915 = devdata->i915;
-       const struct child_device_config *child = &devdata->child;
        enum port port;
 
-       port = dvo_port_to_port(i915, child->dvo_port);
-       if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
-               port = dsi_dvo_port_to_port(i915, child->dvo_port);
+       port = intel_bios_encoder_port(devdata);
        if (port == PORT_NONE)
                return;
 
@@ -2744,22 +2643,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
                return;
        }
 
-       if (i915->display.vbt.ports[port]) {
-               drm_dbg_kms(&i915->drm,
-                           "More than one child device for port %c in VBT, using the first.\n",
-                           port_name(port));
-               return;
-       }
-
        sanitize_device_type(devdata, port);
-
-       if (intel_bios_encoder_supports_dvi(devdata))
-               sanitize_ddc_pin(devdata, port);
-
-       if (intel_bios_encoder_supports_dp(devdata))
-               sanitize_aux_ch(devdata, port);
-
-       i915->display.vbt.ports[port] = devdata;
 }
 
 static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -2770,7 +2654,6 @@ static bool has_ddi_port_info(struct drm_i915_private *i915)
 static void parse_ddi_ports(struct drm_i915_private *i915)
 {
        struct intel_bios_encoder_data *devdata;
-       enum port port;
 
        if (!has_ddi_port_info(i915))
                return;
@@ -2778,10 +2661,8 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
        list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
                parse_ddi_port(devdata);
 
-       for_each_port(port) {
-               if (i915->display.vbt.ports[port])
-                       print_ddi_port(i915->display.vbt.ports[port], port);
-       }
+       list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+               print_ddi_port(devdata);
 }
 
 static void
@@ -3706,5 +3587,22 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata
 const struct intel_bios_encoder_data *
 intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
 {
-       return i915->display.vbt.ports[port];
+       struct intel_bios_encoder_data *devdata;
+
+       list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+               if (intel_bios_encoder_port(devdata) == port)
+                       return devdata;
+       }
+
+       return NULL;
+}
+
+void intel_bios_for_each_encoder(struct drm_i915_private *i915,
+                                void (*func)(struct drm_i915_private *i915,
+                                             const struct intel_bios_encoder_data *devdata))
+{
+       struct intel_bios_encoder_data *devdata;
+
+       list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+               func(i915, devdata);
 }
index 45fae97d97192aa4710f251a8a1aa339602391bb..9680e3e92bb516df9d42f9b5d1469010dfa1c65e 100644 (file)
@@ -263,10 +263,12 @@ bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdat
 bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
 bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
 enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
 int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
 int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
@@ -276,4 +278,8 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
 int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
 int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
 
+void intel_bios_for_each_encoder(struct drm_i915_private *i915,
+                                void (*func)(struct drm_i915_private *i915,
+                                             const struct intel_bios_encoder_data *devdata));
+
 #endif /* _INTEL_BIOS_H_ */
index 4207863b7b2ae5d4ad29d83deac0d875417984d1..2fb030b1ff1de3a03502e6668abf2a981d1e1224 100644 (file)
@@ -37,6 +37,7 @@
 #include "intel_pci_config.h"
 #include "intel_pcode.h"
 #include "intel_psr.h"
+#include "intel_vdsc.h"
 #include "vlv_sideband.h"
 
 /**
@@ -469,7 +470,7 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
                cdclk_config->cdclk = 450000;
        else if (freq == LCPLL_CLK_FREQ_450)
                cdclk_config->cdclk = 450000;
-       else if (IS_HSW_ULT(dev_priv))
+       else if (IS_HASWELL_ULT(dev_priv))
                cdclk_config->cdclk = 337500;
        else
                cdclk_config->cdclk = 540000;
@@ -2607,9 +2608,16 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
         * When we decide to use only one VDSC engine, since
         * each VDSC operates with 1 ppc throughput, pixel clock
         * cannot be higher than the VDSC clock (cdclk)
+        * If there 2 VDSC engines, then pixel clock can't be higher than
+        * VDSC clock(cdclk) * 2 and so on.
         */
-       if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
-               min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+       if (crtc_state->dsc.compression_enable) {
+               int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+
+               min_cdclk = max_t(int, min_cdclk,
+                                 DIV_ROUND_UP(crtc_state->pixel_rate,
+                                              num_vdsc_instances));
+       }
 
        /*
         * HACK. Currently for TGL/DG2 platforms we calculate
@@ -3147,7 +3155,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 {
-       if (IS_JSL_EHL(dev_priv)) {
+       if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                if (dev_priv->display.cdclk.hw.ref == 24000)
                        dev_priv->display.cdclk.max_cdclk_freq = 552000;
                else
@@ -3192,9 +3200,9 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
                 */
                if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
                        dev_priv->display.cdclk.max_cdclk_freq = 450000;
-               else if (IS_BDW_ULX(dev_priv))
+               else if (IS_BROADWELL_ULX(dev_priv))
                        dev_priv->display.cdclk.max_cdclk_freq = 450000;
-               else if (IS_BDW_ULT(dev_priv))
+               else if (IS_BROADWELL_ULT(dev_priv))
                        dev_priv->display.cdclk.max_cdclk_freq = 540000;
                else
                        dev_priv->display.cdclk.max_cdclk_freq = 675000;
@@ -3559,10 +3567,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.cdclk.table = dg2_cdclk_table;
        } else if (IS_ALDERLAKE_P(dev_priv)) {
                /* Wa_22011320316:adl-p[a0] */
-               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+               if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
                        dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
                        dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
-               } else if (IS_ADLP_RPLU(dev_priv)) {
+               } else if (IS_RAPTORLAKE_U(dev_priv)) {
                        dev_priv->display.cdclk.table = rplu_cdclk_table;
                        dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
                } else {
@@ -3575,7 +3583,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
        } else if (DISPLAY_VER(dev_priv) >= 12) {
                dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
                dev_priv->display.cdclk.table = icl_cdclk_table;
-       } else if (IS_JSL_EHL(dev_priv)) {
+       } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
                dev_priv->display.cdclk.table = icl_cdclk_table;
        } else if (DISPLAY_VER(dev_priv) >= 11) {
index 8966e65605168fb1798fa42634726596b72154b0..454607b4a02a76a3530baefa7d63cc1c23b2664f 100644 (file)
@@ -1453,6 +1453,16 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
                return 35;
 }
 
+/*
+ * change_lut_val_precision: helper function to upscale or downscale lut values.
+ * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
+ * as currently there are no lut values exceeding 32 bit.
+ */
+static u32 change_lut_val_precision(u32 lut_val, int to, int from)
+{
+       return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
+}
+
 static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
                                 const struct drm_property_blob *blob)
 {
@@ -1487,8 +1497,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
                 * ToDo: Extend to max 7.0. Enable 32 bit input value
                 * as compared to just 16 to achieve this.
                 */
+               u32 lut_val;
+
+               if (DISPLAY_VER(i915) >= 14)
+                       lut_val = change_lut_val_precision(lut[i].green, 24, 16);
+               else
+                       lut_val = lut[i].green;
+
                ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
-                             lut[i].green);
+                             lut_val);
        }
 
        /* Clamp values > 1.0. */
@@ -3439,6 +3456,14 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
        for (i = 0; i < lut_size; i++) {
                u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
 
+               /*
+                * For MTL and beyond, convert back the 24 bit lut values
+                * read from HW to 16 bit values to maintain parity with
+                * userspace values
+                */
+               if (DISPLAY_VER(dev_priv) >= 14)
+                       val = change_lut_val_precision(val, 16, 24);
+
                lut[i].red = val;
                lut[i].green = val;
                lut[i].blue = val;
index 922a6d87b55345c83920c4dccaefbf6887776921..e2a220cf2e57f5665e910494ebb538b92ecbad0f 100644 (file)
@@ -141,7 +141,7 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
 
        if (IS_ALDERLAKE_S(i915))
                return phy == PHY_A;
-       else if (IS_JSL_EHL(i915) ||
+       else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) ||
                 IS_ROCKETLAKE(i915) ||
                 IS_DG1(i915))
                return phy < PHY_C;
@@ -242,7 +242,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
                ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
                                     IREFGEN, IREFGEN);
 
-               if (IS_JSL_EHL(dev_priv)) {
+               if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                        if (ehl_vbt_ddi_d_present(dev_priv))
                                expected_val = ICL_PHY_MISC_MUX_DDID;
 
@@ -333,7 +333,8 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
                 * "internal" child devices.
                 */
                val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
-               if (IS_JSL_EHL(dev_priv) && phy == PHY_A) {
+               if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                   phy == PHY_A) {
                        val &= ~ICL_PHY_MISC_MUX_DDID;
 
                        if (ehl_vbt_ddi_d_present(dev_priv))
index ab7cd5e60a0a403f19e02a2f90f3d51990b03b94..8090747586877e0b653302e292bba57c0470e38f 100644 (file)
@@ -1064,6 +1064,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
        }
 
        if (HAS_DDI(dev_priv)) {
+               assert_port_valid(dev_priv, PORT_E);
+
                crt->base.port = PORT_E;
                crt->base.get_config = hsw_crt_get_config;
                crt->base.get_hw_state = intel_ddi_get_hw_state;
index 719447ce86e7012431f1a7f302c90b78eecc7724..1b00ef2c6185073fb29c1683ee1d3adae3c87402 100644 (file)
@@ -116,6 +116,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
                                         XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
                drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
                            phy_name(phy), *val);
+               intel_cx0_bus_reset(i915, port, lane);
                return -ETIMEDOUT;
        }
 
@@ -158,10 +159,8 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
                       XELPDP_PORT_M2P_ADDRESS(addr));
 
        ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
-       if (ack < 0) {
-               intel_cx0_bus_reset(i915, port, lane);
+       if (ack < 0)
                return ack;
-       }
 
        intel_clear_response_ready_flag(i915, port, lane);
 
@@ -202,6 +201,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
                                  int lane, u16 addr, u8 data, bool committed)
 {
        enum phy phy = intel_port_to_phy(i915, port);
+       int ack;
        u32 val;
 
        if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
@@ -230,10 +230,9 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
        }
 
        if (committed) {
-               if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) {
-                       intel_cx0_bus_reset(i915, port, lane);
-                       return -EINVAL;
-               }
+               ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+               if (ack < 0)
+                       return ack;
        } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
                    XELPDP_PORT_P2M_ERROR_SET)) {
                drm_dbg_kms(&i915->drm,
index f99809af257dc385b525ffa4f3a5b51040232eb7..4c4db5cdcbd0bda4ba4fcb3cce004d4dccb18c57 100644 (file)
@@ -43,8 +43,5 @@ int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
 void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
                                     const struct intel_crtc_state *crtc_state);
 int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
-void intel_cx0_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
-                                      const struct intel_crtc_state *crtc_state,
-                                      u32 level);
 int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
 #endif /* __INTEL_CX0_PHY_H__ */
index 090f242e610c54c257e74d0715c7796680314d24..84bbf854337aa74a4c801575ddf0f14470cceb5a 100644 (file)
@@ -32,6 +32,7 @@
 
 #include "i915_drv.h"
 #include "i915_reg.h"
+#include "icl_dsi.h"
 #include "intel_audio.h"
 #include "intel_audio_regs.h"
 #include "intel_backlight.h"
@@ -3582,7 +3583,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 {
        if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 2;
-       else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000)
+       else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 3;
        else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 1;
@@ -4653,13 +4655,95 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
 #define port_tc_name(port) ((port) - PORT_TC1 + '1')
 #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
 
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
+static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+{
+       /* straps not used on skl+ */
+       if (DISPLAY_VER(i915) >= 9)
+               return true;
+
+       switch (port) {
+       case PORT_A:
+               return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+       case PORT_B:
+               return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+       case PORT_C:
+               return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+       case PORT_D:
+               return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+       case PORT_E:
+               return true; /* no strap for DDI-E */
+       default:
+               MISSING_CASE(port);
+               return false;
+       }
+}
+
+static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+       return init_dp || intel_phy_is_tc(i915, phy);
+}
+
+static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+{
+       return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
+                        !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+                        "Platform does not support DSI\n");
+}
+
+static bool port_in_use(struct drm_i915_private *i915, enum port port)
+{
+       struct intel_encoder *encoder;
+
+       for_each_intel_encoder(&i915->drm, encoder) {
+               /* FIXME what about second port for dual link DSI? */
+               if (encoder->port == port)
+                       return true;
+       }
+
+       return false;
+}
+
+void intel_ddi_init(struct drm_i915_private *dev_priv,
+                   const struct intel_bios_encoder_data *devdata)
 {
        struct intel_digital_port *dig_port;
        struct intel_encoder *encoder;
-       const struct intel_bios_encoder_data *devdata;
        bool init_hdmi, init_dp;
-       enum phy phy = intel_port_to_phy(dev_priv, port);
+       enum port port;
+       enum phy phy;
+
+       port = intel_bios_encoder_port(devdata);
+       if (port == PORT_NONE)
+               return;
+
+       if (!port_strap_detected(dev_priv, port)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "Port %c strap not detected\n", port_name(port));
+               return;
+       }
+
+       if (!assert_port_valid(dev_priv, port))
+               return;
+
+       if (port_in_use(dev_priv, port)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "Port %c already claimed\n", port_name(port));
+               return;
+       }
+
+       if (intel_bios_encoder_supports_dsi(devdata)) {
+               /* BXT/GLK handled elsewhere, for now at least */
+               if (!assert_has_icl_dsi(dev_priv))
+                       return;
+
+               icl_dsi_init(dev_priv, devdata);
+               return;
+       }
+
+       phy = intel_port_to_phy(dev_priv, port);
 
        /*
         * On platforms with HTI (aka HDPORT), if it's enabled at boot it may
@@ -4673,14 +4757,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                return;
        }
 
-       devdata = intel_bios_encoder_data_lookup(dev_priv, port);
-       if (!devdata) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "VBT says port %c is not present\n",
-                           port_name(port));
-               return;
-       }
-
        init_hdmi = intel_bios_encoder_supports_dvi(devdata) ||
                intel_bios_encoder_supports_hdmi(devdata);
        init_dp = intel_bios_encoder_supports_dp(devdata);
@@ -4715,6 +4791,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        if (!dig_port)
                return;
 
+       dig_port->aux_ch = AUX_CH_NONE;
+
        encoder = &dig_port->base;
        encoder->devdata = devdata;
 
@@ -4801,7 +4879,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                encoder->disable_clock = dg1_ddi_disable_clock;
                encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
                encoder->get_config = dg1_ddi_get_config;
-       } else if (IS_JSL_EHL(dev_priv)) {
+       } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                if (intel_ddi_is_tc(dev_priv, port)) {
                        encoder->enable_clock = jsl_ddi_tc_enable_clock;
                        encoder->disable_clock = jsl_ddi_tc_disable_clock;
@@ -4872,7 +4950,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) >= 12)
                encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
-       else if (IS_JSL_EHL(dev_priv))
+       else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
                encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 11)
                encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
@@ -4895,7 +4973,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
        dig_port->dp.output_reg = INVALID_MMIO_REG;
        dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
-       dig_port->aux_ch = intel_dp_aux_ch(encoder);
+
+       if (need_aux_ch(encoder, init_dp)) {
+               dig_port->aux_ch = intel_dp_aux_ch(encoder);
+               if (dig_port->aux_ch == AUX_CH_NONE)
+                       goto err;
+       }
 
        if (intel_phy_is_tc(dev_priv, phy)) {
                bool is_legacy =
index 2bc034042a937184c6e01252f286c9de1b23a90f..4999c0ee229bd336ae5a9b0560ca0f811e71003a 100644 (file)
@@ -11,6 +11,7 @@
 struct drm_connector_state;
 struct drm_i915_private;
 struct intel_atomic_state;
+struct intel_bios_encoder_data;
 struct intel_connector;
 struct intel_crtc;
 struct intel_crtc_state;
@@ -50,7 +51,8 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state);
 void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
                             enum port port);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+void intel_ddi_init(struct drm_i915_private *dev_priv,
+                   const struct intel_bios_encoder_data *devdata);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
 void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
                                      const struct intel_crtc_state *crtc_state);
index b7d20485bde50175f290046136c430687ed4e607..de809e2d9cac7eac13f0ea2e53c8de56eacfeefd 100644 (file)
@@ -1049,12 +1049,26 @@ static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = {
        { .snps = { 62, 0, 0  } },      /* preset 9 */
 };
 
-static const struct intel_ddi_buf_trans mtl_cx0_trans = {
+static const struct intel_ddi_buf_trans mtl_c10_trans_dp14 = {
        .entries = _mtl_c10_trans_dp14,
        .num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14),
        .hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1,
 };
 
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _mtl_c20_trans_dp14[] = {
+       { .snps = { 20, 0, 0  } },      /* preset 0 */
+       { .snps = { 24, 0, 4  } },      /* preset 1 */
+       { .snps = { 30, 0, 9  } },      /* preset 2 */
+       { .snps = { 34, 0, 14 } },      /* preset 3 */
+       { .snps = { 29, 0, 0  } },      /* preset 4 */
+       { .snps = { 34, 0, 5  } },      /* preset 5 */
+       { .snps = { 38, 0, 10 } },      /* preset 6 */
+       { .snps = { 36, 0, 0  } },      /* preset 7 */
+       { .snps = { 40, 0, 6  } },      /* preset 8 */
+       { .snps = { 48, 0, 0  } },      /* preset 9 */
+};
+
 /* DP2.0 */
 static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = {
        { .snps = { 48, 0, 0 } },       /* preset 0 */
@@ -1072,7 +1086,7 @@ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = {
        { .snps = { 37, 4, 7 } },       /* preset 12 */
        { .snps = { 33, 4, 11 } },      /* preset 13 */
        { .snps = { 40, 8, 0 } },       /* preset 14 */
-       { .snps = { 28, 2, 2 } },       /* preset 15 */
+       { .snps = { 30, 2, 2 } },       /* preset 15 */
 };
 
 /* HDMI2.0 */
@@ -1090,6 +1104,12 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = {
        .hdmi_default_entry = 0,
 };
 
+static const struct intel_ddi_buf_trans mtl_c20_trans_dp14 = {
+       .entries = _mtl_c20_trans_dp14,
+       .num_entries = ARRAY_SIZE(_mtl_c20_trans_dp14),
+       .hdmi_default_entry = ARRAY_SIZE(_mtl_c20_trans_dp14) - 1,
+};
+
 static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
        .entries = _mtl_c20_trans_uhbr,
        .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
@@ -1390,7 +1410,7 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        if (crtc_state->port_clock > 270000) {
-               if (IS_TGL_UY(dev_priv)) {
+               if (IS_TIGERLAKE_UY(dev_priv)) {
                        return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
                                                   n_entries);
                } else {
@@ -1678,8 +1698,10 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
                return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
        else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
                return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
+       else if (!intel_is_c10phy(i915, phy))
+               return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
        else
-               return intel_get_buf_trans(&mtl_cx0_trans, n_entries);
+               return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
 }
 
 void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1718,15 +1740,15 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
                        encoder->get_buf_trans = icl_get_mg_buf_trans;
        } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
                encoder->get_buf_trans = bxt_get_buf_trans;
-       } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
+       } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) {
                encoder->get_buf_trans = kbl_y_get_buf_trans;
-       } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) {
+       } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) {
                encoder->get_buf_trans = kbl_u_get_buf_trans;
        } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
                encoder->get_buf_trans = kbl_get_buf_trans;
-       } else if (IS_SKL_ULX(i915)) {
+       } else if (IS_SKYLAKE_ULX(i915)) {
                encoder->get_buf_trans = skl_y_get_buf_trans;
-       } else if (IS_SKL_ULT(i915)) {
+       } else if (IS_SKYLAKE_ULT(i915)) {
                encoder->get_buf_trans = skl_u_get_buf_trans;
        } else if (IS_SKYLAKE(i915)) {
                encoder->get_buf_trans = skl_get_buf_trans;
index d8533603ad0523e1b621a4f6a95877df461ac433..763ab569d8f3248d52622a8572a64e07d96caad9 100644 (file)
@@ -53,7 +53,6 @@
 #include "i915_utils.h"
 #include "i9xx_plane.h"
 #include "i9xx_wm.h"
-#include "icl_dsi.h"
 #include "intel_atomic.h"
 #include "intel_atomic_plane.h"
 #include "intel_audio.h"
@@ -1750,7 +1749,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
                return phy <= PHY_E;
        else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
                return phy <= PHY_D;
-       else if (IS_JSL_EHL(dev_priv))
+       else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
                return phy <= PHY_C;
        else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
                return phy <= PHY_B;
@@ -1802,7 +1801,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
                return PHY_B + port - PORT_TC1;
        else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
                return PHY_C + port - PORT_TC1;
-       else if (IS_JSL_EHL(i915) && port == PORT_D)
+       else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+                port == PORT_D)
                return PHY_A;
 
        return PHY_A + port - PORT_A;
@@ -3153,6 +3153,10 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
        if (DISPLAY_VER(dev_priv) >= 12)
                val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
 
+       /* allow PSR with sprite enabled */
+       if (IS_BROADWELL(dev_priv))
+               val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
+
        intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
 }
 
@@ -4564,7 +4568,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
        saved_state->uapi = slave_crtc_state->uapi;
        saved_state->scaler_state = slave_crtc_state->scaler_state;
        saved_state->shared_dpll = slave_crtc_state->shared_dpll;
-       saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
        saved_state->crc_enabled = slave_crtc_state->crc_enabled;
 
        intel_crtc_free_hw_state(slave_crtc_state);
@@ -7144,7 +7147,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
                 */
                intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
        }
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+       /*
+        * Delay re-enabling DC states by 17 ms to avoid the off->on->off
+        * toggling overhead at and above 60 FPS.
+        */
+       intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
        intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
 
        /*
@@ -7371,7 +7378,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
        if (DISPLAY_VER(dev_priv) >= 9)
                return false;
 
-       if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+       if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
                return false;
 
        if (HAS_PCH_LPT_H(dev_priv) &&
@@ -7388,6 +7395,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
        return true;
 }
 
+bool assert_port_valid(struct drm_i915_private *i915, enum port port)
+{
+       return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
+                        "Platform does not support port %c\n", port_name(port));
+}
+
 void intel_setup_outputs(struct drm_i915_private *dev_priv)
 {
        struct intel_encoder *encoder;
@@ -7398,93 +7411,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
        if (!HAS_DISPLAY(dev_priv))
                return;
 
-       if (IS_METEORLAKE(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_TC1);
-               intel_ddi_init(dev_priv, PORT_TC2);
-               intel_ddi_init(dev_priv, PORT_TC3);
-               intel_ddi_init(dev_priv, PORT_TC4);
-       } else if (IS_DG2(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D_XELPD);
-               intel_ddi_init(dev_priv, PORT_TC1);
-       } else if (IS_ALDERLAKE_P(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_TC1);
-               intel_ddi_init(dev_priv, PORT_TC2);
-               intel_ddi_init(dev_priv, PORT_TC3);
-               intel_ddi_init(dev_priv, PORT_TC4);
-               icl_dsi_init(dev_priv);
-       } else if (IS_ALDERLAKE_S(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_TC1);
-               intel_ddi_init(dev_priv, PORT_TC2);
-               intel_ddi_init(dev_priv, PORT_TC3);
-               intel_ddi_init(dev_priv, PORT_TC4);
-       } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_TC1);
-               intel_ddi_init(dev_priv, PORT_TC2);
-       } else if (DISPLAY_VER(dev_priv) >= 12) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_TC1);
-               intel_ddi_init(dev_priv, PORT_TC2);
-               intel_ddi_init(dev_priv, PORT_TC3);
-               intel_ddi_init(dev_priv, PORT_TC4);
-               intel_ddi_init(dev_priv, PORT_TC5);
-               intel_ddi_init(dev_priv, PORT_TC6);
-               icl_dsi_init(dev_priv);
-       } else if (IS_JSL_EHL(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D);
-               icl_dsi_init(dev_priv);
-       } else if (DISPLAY_VER(dev_priv) == 11) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D);
-               intel_ddi_init(dev_priv, PORT_E);
-               intel_ddi_init(dev_priv, PORT_F);
-               icl_dsi_init(dev_priv);
-       } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               vlv_dsi_init(dev_priv);
-       } else if (DISPLAY_VER(dev_priv) >= 9) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D);
-               intel_ddi_init(dev_priv, PORT_E);
-       } else if (HAS_DDI(dev_priv)) {
-               u32 found;
-
+       if (HAS_DDI(dev_priv)) {
                if (intel_ddi_crt_present(dev_priv))
                        intel_crt_init(dev_priv);
 
-               /* Haswell uses DDI functions to detect digital outputs. */
-               found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
-               if (found)
-                       intel_ddi_init(dev_priv, PORT_A);
-
-               found = intel_de_read(dev_priv, SFUSE_STRAP);
-               if (found & SFUSE_STRAP_DDIB_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_B);
-               if (found & SFUSE_STRAP_DDIC_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_C);
-               if (found & SFUSE_STRAP_DDID_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_D);
-               if (found & SFUSE_STRAP_DDIF_DETECTED)
-                       intel_ddi_init(dev_priv, PORT_F);
+               intel_bios_for_each_encoder(dev_priv, intel_ddi_init);
+
+               if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+                       vlv_dsi_init(dev_priv);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                int found;
 
index c744c021af23689926d949faac7f9c8026b42a05..49ac8473b988b3d74473c9b398dbf89db2863ee3 100644 (file)
@@ -113,7 +113,7 @@ enum i9xx_plane_id {
 
 #define for_each_dbuf_slice(__dev_priv, __slice) \
        for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
-               for_each_if(INTEL_INFO(__dev_priv)->display->dbuf.slice_mask & BIT(__slice))
+               for_each_if(DISPLAY_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
 
 #define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
        for_each_dbuf_slice((__dev_priv), (__slice)) \
@@ -539,6 +539,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
 #define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
 #define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
 
+bool assert_port_valid(struct drm_i915_private *i915, enum port port);
+
 /*
  * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity
  * checks to check for unexpected conditions which may not necessarily be a user
index 8d2243c71dd85f91948c3fb778da2bd2640957dc..53e5c33e08c3bb9e9d29a3eac798a6a2df4a373e 100644 (file)
@@ -17,6 +17,7 @@
 #include <drm/drm_modeset_lock.h>
 
 #include "intel_cdclk.h"
+#include "intel_display_device.h"
 #include "intel_display_limits.h"
 #include "intel_display_power.h"
 #include "intel_dpll_mgr.h"
@@ -33,7 +34,6 @@ struct i915_audio_component;
 struct i915_hdcp_arbiter;
 struct intel_atomic_state;
 struct intel_audio_funcs;
-struct intel_bios_encoder_data;
 struct intel_cdclk_funcs;
 struct intel_cdclk_vals;
 struct intel_color_funcs;
@@ -218,7 +218,6 @@ struct intel_vbt_data {
        struct list_head display_devices;
        struct list_head bdb_blocks;
 
-       struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
        struct sdvo_device_mapping {
                u8 initialized;
                u8 dvo_port;
@@ -429,6 +428,14 @@ struct intel_display {
                u32 state;
        } hti;
 
+       struct {
+               /* Access with DISPLAY_INFO() */
+               const struct intel_display_device_info *__device_info;
+
+               /* Access with DISPLAY_RUNTIME_INFO() */
+               struct intel_display_runtime_info __runtime_info;
+       } info;
+
        struct {
                bool false_color;
        } ips;
index 165e2c7e3126a20893dbd93e2da14bba3e1b15c1..63c1fb9e479faa9a611f84517bc5b5d9f9a81d2b 100644 (file)
@@ -819,8 +819,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
        if (IS_ERR(input_buffer))
                return PTR_ERR(input_buffer);
 
-       drm_dbg(&to_i915(dev)->drm,
-               "Copied %d bytes from user\n", (unsigned int)len);
+       drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len);
 
        drm_connector_list_iter_begin(dev, &conn_iter);
        drm_for_each_connector_iter(connector, &conn_iter) {
@@ -839,8 +838,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
                        status = kstrtoint(input_buffer, 10, &val);
                        if (status < 0)
                                break;
-                       drm_dbg(&to_i915(dev)->drm,
-                               "Got %d for test active\n", val);
+                       drm_dbg(dev, "Got %d for test active\n", val);
                        /* To prevent erroneous activation of the compliance
                         * testing code, only accept an actual value of 1 here
                         */
index 3fd30e7f006297981534aa4b3d273f27e14e747b..c39f8a15d8aae99711adc7f8c2a2d12f60f8cde1 100644 (file)
@@ -16,9 +16,6 @@
 #include "intel_display_reg_defs.h"
 #include "intel_fbc.h"
 
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
-
 static const struct intel_display_device_info no_display = {};
 
 #define PIPE_A_OFFSET          0x70000
@@ -187,10 +184,6 @@ static const struct intel_display_device_info no_display = {};
        .__runtime_defaults.cpu_transcoder_mask = \
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
 
-static const struct intel_display_device_info i830_display = {
-       I830_DISPLAY,
-};
-
 #define I845_DISPLAY \
        .has_overlay = 1, \
        .overlay_needs_physical = 1, \
@@ -203,19 +196,29 @@ static const struct intel_display_device_info i830_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A), \
        .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A)
 
+static const struct intel_display_device_info i830_display = {
+       I830_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C), /* DVO A/B/C */
+};
+
 static const struct intel_display_device_info i845_display = {
        I845_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
 };
 
 static const struct intel_display_device_info i85x_display = {
        I830_DISPLAY,
 
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
 static const struct intel_display_device_info i865g_display = {
        I845_DISPLAY,
 
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -228,7 +231,8 @@ static const struct intel_display_device_info i865g_display = {
        .__runtime_defaults.ip.ver = 3, \
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
        .__runtime_defaults.cpu_transcoder_mask = \
-               BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
+               BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */
 
 static const struct intel_display_device_info i915g_display = {
        GEN3_DISPLAY,
@@ -293,6 +297,8 @@ static const struct intel_display_device_info pnv_display = {
 static const struct intel_display_device_info i965g_display = {
        GEN4_DISPLAY,
        .has_overlay = 1,
+
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */
 };
 
 static const struct intel_display_device_info i965gm_display = {
@@ -300,17 +306,21 @@ static const struct intel_display_device_info i965gm_display = {
        .has_overlay = 1,
        .supports_tv = 1,
 
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
 static const struct intel_display_device_info g45_display = {
        GEN4_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */
 };
 
 static const struct intel_display_device_info gm45_display = {
        GEN4_DISPLAY,
        .supports_tv = 1,
 
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -323,7 +333,8 @@ static const struct intel_display_device_info gm45_display = {
        .__runtime_defaults.ip.ver = 5, \
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
        .__runtime_defaults.cpu_transcoder_mask = \
-               BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
+               BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
 
 static const struct intel_display_device_info ilk_d_display = {
        ILK_DISPLAY,
@@ -345,6 +356,7 @@ static const struct intel_display_device_info snb_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -358,6 +370,7 @@ static const struct intel_display_device_info ivb_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -373,6 +386,7 @@ static const struct intel_display_device_info vlv_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* HDMI/DP B/C */
 };
 
 static const struct intel_display_device_info hsw_display = {
@@ -380,6 +394,8 @@ static const struct intel_display_device_info hsw_display = {
        .has_dp_mst = 1,
        .has_fpga_dbg = 1,
        .has_hotplug = 1,
+       .has_psr = 1,
+       .has_psr_hw_tracking = 1,
        HSW_PIPE_OFFSETS,
        IVB_CURSOR_OFFSETS,
        IVB_COLORS,
@@ -389,6 +405,7 @@ static const struct intel_display_device_info hsw_display = {
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -397,6 +414,8 @@ static const struct intel_display_device_info bdw_display = {
        .has_dp_mst = 1,
        .has_fpga_dbg = 1,
        .has_hotplug = 1,
+       .has_psr = 1,
+       .has_psr_hw_tracking = 1,
        HSW_PIPE_OFFSETS,
        IVB_CURSOR_OFFSETS,
        IVB_COLORS,
@@ -406,6 +425,7 @@ static const struct intel_display_device_info bdw_display = {
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -421,6 +441,7 @@ static const struct intel_display_device_info chv_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+       .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* HDMI/DP B/C/D */
 };
 
 static const struct intel_display_device_info skl_display = {
@@ -444,6 +465,7 @@ static const struct intel_display_device_info skl_display = {
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
        .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
 };
 
@@ -467,7 +489,8 @@ static const struct intel_display_device_info skl_display = {
        .__runtime_defaults.cpu_transcoder_mask = \
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
-               BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C)
+               BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C)
 
 static const struct intel_display_device_info bxt_display = {
        GEN9_LP_DISPLAY,
@@ -484,46 +507,57 @@ static const struct intel_display_device_info glk_display = {
        .__runtime_defaults.ip.ver = 10,
 };
 
-static const struct intel_display_device_info gen11_display = {
-       .abox_mask = BIT(0),
-       .dbuf.size = 2048,
-       .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2),
-       .has_ddi = 1,
-       .has_dp_mst = 1,
-       .has_fpga_dbg = 1,
-       .has_hotplug = 1,
-       .has_ipc = 1,
-       .has_psr = 1,
-       .has_psr_hw_tracking = 1,
-       .pipe_offsets = {
-               [TRANSCODER_A] = PIPE_A_OFFSET,
-               [TRANSCODER_B] = PIPE_B_OFFSET,
-               [TRANSCODER_C] = PIPE_C_OFFSET,
-               [TRANSCODER_EDP] = PIPE_EDP_OFFSET,
-               [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET,
-               [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET,
-       },
-       .trans_offsets = {
-               [TRANSCODER_A] = TRANSCODER_A_OFFSET,
-               [TRANSCODER_B] = TRANSCODER_B_OFFSET,
-               [TRANSCODER_C] = TRANSCODER_C_OFFSET,
-               [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET,
-               [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET,
-               [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET,
-       },
-       IVB_CURSOR_OFFSETS,
-       ICL_COLORS,
+#define ICL_DISPLAY \
+       .abox_mask = BIT(0), \
+       .dbuf.size = 2048, \
+       .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
+       .has_ddi = 1, \
+       .has_dp_mst = 1, \
+       .has_fpga_dbg = 1, \
+       .has_hotplug = 1, \
+       .has_ipc = 1, \
+       .has_psr = 1, \
+       .has_psr_hw_tracking = 1, \
+       .pipe_offsets = { \
+               [TRANSCODER_A] = PIPE_A_OFFSET, \
+               [TRANSCODER_B] = PIPE_B_OFFSET, \
+               [TRANSCODER_C] = PIPE_C_OFFSET, \
+               [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+               [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+               [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+       }, \
+       .trans_offsets = { \
+               [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+               [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+               [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+               [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+               [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+               [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+       }, \
+       IVB_CURSOR_OFFSETS, \
+       ICL_COLORS, \
+       \
+       .__runtime_defaults.ip.ver = 11, \
+       .__runtime_defaults.has_dmc = 1, \
+       .__runtime_defaults.has_dsc = 1, \
+       .__runtime_defaults.has_hdcp = 1, \
+       .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+       .__runtime_defaults.cpu_transcoder_mask = \
+               BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+               BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+               BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
+       .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A)
 
-       .__runtime_defaults.ip.ver = 11,
-       .__runtime_defaults.has_dmc = 1,
-       .__runtime_defaults.has_dsc = 1,
-       .__runtime_defaults.has_hdcp = 1,
-       .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
-       .__runtime_defaults.cpu_transcoder_mask =
-               BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
-               BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) |
-               BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
-       .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+static const struct intel_display_device_info icl_display = {
+       ICL_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
+};
+
+static const struct intel_display_device_info jsl_ehl_display = {
+       ICL_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D),
 };
 
 #define XE_D_DISPLAY \
@@ -571,6 +605,20 @@ static const struct intel_display_device_info gen11_display = {
 
 static const struct intel_display_device_info tgl_display = {
        XE_D_DISPLAY,
+
+       /*
+        * FIXME DDI C/combo PHY C missing due to combo PHY
+        * code making a mess on SKUs where the PHY is missing.
+        */
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+               BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6),
+};
+
+static const struct intel_display_device_info dg1_display = {
+       XE_D_DISPLAY,
+
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+               BIT(PORT_TC1) | BIT(PORT_TC2),
 };
 
 static const struct intel_display_device_info rkl_display = {
@@ -582,12 +630,17 @@ static const struct intel_display_device_info rkl_display = {
        .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+               BIT(PORT_TC1) | BIT(PORT_TC2),
 };
 
 static const struct intel_display_device_info adl_s_display = {
        XE_D_DISPLAY,
        .has_hti = 1,
        .has_psr_hw_tracking = 0,
+
+       .__runtime_defaults.port_mask = BIT(PORT_A) |
+               BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
 };
 
 #define XE_LPD_FEATURES \
@@ -642,6 +695,8 @@ static const struct intel_display_device_info xe_lpd_display = {
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
                BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+               BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
 };
 
 static const struct intel_display_device_info xe_hpd_display = {
@@ -651,6 +706,8 @@ static const struct intel_display_device_info xe_hpd_display = {
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D_XELPD) |
+               BIT(PORT_TC1),
 };
 
 static const struct intel_display_device_info xe_lpdp_display = {
@@ -663,14 +720,28 @@ static const struct intel_display_device_info xe_lpdp_display = {
        .__runtime_defaults.cpu_transcoder_mask =
                BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+       .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+               BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
 };
 
-__diag_pop();
+/*
+ * Separate detection for no display cases to keep the display id array simple.
+ *
+ * IVB Q requires subvendor and subdevice matching to differentiate from IVB D
+ * GT2 server.
+ */
+static bool has_no_display(struct pci_dev *pdev)
+{
+       static const struct pci_device_id ids[] = {
+               INTEL_IVB_Q_IDS(0),
+               {}
+       };
+
+       return pci_match_id(ids, pdev);
+}
 
 #undef INTEL_VGA_DEVICE
-#undef INTEL_QUANTA_VGA_DEVICE
 #define INTEL_VGA_DEVICE(id, info) { id, info }
-#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info }
 
 static const struct {
        u32 devid;
@@ -695,7 +766,6 @@ static const struct {
        INTEL_IRONLAKE_M_IDS(&ilk_m_display),
        INTEL_SNB_D_IDS(&snb_display),
        INTEL_SNB_M_IDS(&snb_display),
-       INTEL_IVB_Q_IDS(NULL),          /* must be first IVB in list */
        INTEL_IVB_M_IDS(&ivb_display),
        INTEL_IVB_D_IDS(&ivb_display),
        INTEL_HSW_IDS(&hsw_display),
@@ -707,11 +777,11 @@ static const struct {
        INTEL_GLK_IDS(&glk_display),
        INTEL_KBL_IDS(&skl_display),
        INTEL_CFL_IDS(&skl_display),
-       INTEL_ICL_11_IDS(&gen11_display),
-       INTEL_EHL_IDS(&gen11_display),
-       INTEL_JSL_IDS(&gen11_display),
+       INTEL_ICL_11_IDS(&icl_display),
+       INTEL_EHL_IDS(&jsl_ehl_display),
+       INTEL_JSL_IDS(&jsl_ehl_display),
        INTEL_TGL_12_IDS(&tgl_display),
-       INTEL_DG1_IDS(&tgl_display),
+       INTEL_DG1_IDS(&dg1_display),
        INTEL_RKL_IDS(&rkl_display),
        INTEL_ADLS_IDS(&adl_s_display),
        INTEL_RPLS_IDS(&adl_s_display),
@@ -743,6 +813,15 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
        u32 val;
        int i;
 
+       /* The caller expects to ver, rel and step to be initialized
+        * here, and there's no good way to check when there was a
+        * failure and no_display was returned.  So initialize all these
+        * values here zero, to be sure.
+        */
+       *ver = 0;
+       *rel = 0;
+       *step = 0;
+
        addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
        if (!addr) {
                drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
@@ -752,9 +831,10 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
        val = ioread32(addr);
        pci_iounmap(pdev, addr);
 
-       if (val == 0)
-               /* Platform doesn't have display */
+       if (val == 0) {
+               drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
                return &no_display;
+       }
 
        *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
        *rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
@@ -780,6 +860,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
        if (has_gmdid)
                return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step);
 
+       if (has_no_display(pdev)) {
+               drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+               return &no_display;
+       }
+
        for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) {
                if (intel_display_ids[i].devid == pdev->device)
                        return intel_display_ids[i].info;
@@ -796,8 +881,12 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
        struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915);
        enum pipe pipe;
 
+       BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES);
+       BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->cpu_transcoder_mask) < I915_MAX_TRANSCODERS);
+       BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
+
        /* Wa_14011765242: adl-s A0,A1 */
-       if (IS_ADLS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
+       if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
                for_each_pipe(i915, pipe)
                        display_runtime->num_scalers[pipe] = 0;
        else if (DISPLAY_VER(i915) >= 11) {
@@ -915,3 +1004,24 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
 display_fused_off:
        memset(display_runtime, 0, sizeof(*display_runtime));
 }
+
+void intel_display_device_info_print(const struct intel_display_device_info *info,
+                                    const struct intel_display_runtime_info *runtime,
+                                    struct drm_printer *p)
+{
+       if (runtime->ip.rel)
+               drm_printf(p, "display version: %u.%02u\n",
+                          runtime->ip.ver,
+                          runtime->ip.rel);
+       else
+               drm_printf(p, "display version: %u\n",
+                          runtime->ip.ver);
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
+       DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
+
+       drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
+       drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
+       drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+}
index 706ff2aa1f55edd966c5a3086a0ef0a65595ba98..215e682bd8b7a44520288951490f183d41d8385d 100644 (file)
@@ -11,6 +11,7 @@
 #include "intel_display_limits.h"
 
 struct drm_i915_private;
+struct drm_printer;
 
 #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
        /* Keep in alphabetical order */ \
@@ -53,7 +54,7 @@ struct drm_i915_private;
 #define HAS_GMCH(i915)                 (DISPLAY_INFO(i915)->has_gmch)
 #define HAS_HW_SAGV_WM(i915)           (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
 #define HAS_IPC(i915)                  (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915)                  (IS_HSW_ULT(i915) || IS_BROADWELL(i915))
+#define HAS_IPS(i915)                  (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
 #define HAS_LSPCON(i915)               (IS_DISPLAY_VER(i915, 9, 10))
 #define HAS_MBUS_JOINING(i915)         (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
 #define HAS_MSO(i915)                  (DISPLAY_VER(i915) >= 12)
@@ -79,6 +80,7 @@ struct intel_display_runtime_info {
 
        u8 pipe_mask;
        u8 cpu_transcoder_mask;
+       u16 port_mask;
 
        u8 num_sprites[I915_MAX_PIPES];
        u8 num_scalers[I915_MAX_PIPES];
@@ -126,4 +128,8 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
                           u16 *ver, u16 *rel, u16 *step);
 void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
 
+void intel_display_device_info_print(const struct intel_display_device_info *info,
+                                    const struct intel_display_runtime_info *runtime,
+                                    struct drm_printer *p);
+
 #endif
index b909814ae02b2717bef32016d6cc14747ceb7ed1..8f144d4d3c398398802e45a25f515330ea94cd64 100644 (file)
@@ -28,6 +28,7 @@
 #include "intel_crtc.h"
 #include "intel_display_debugfs.h"
 #include "intel_display_driver.h"
+#include "intel_display_irq.h"
 #include "intel_display_power.h"
 #include "intel_display_types.h"
 #include "intel_dkl_phy.h"
@@ -177,6 +178,7 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
        if (!HAS_DISPLAY(i915))
                return;
 
+       intel_display_irq_init(i915);
        intel_dkl_phy_init(i915);
        intel_color_init_hooks(i915);
        intel_init_cdclk_hooks(i915);
index ae2578741dfe896458dd225a03107c2a60fb592c..62ce5547555400ff58861111c860f83af2ee93f8 100644 (file)
@@ -749,6 +749,20 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
        if (de_iir & DE_ERR_INT_IVB)
                ivb_err_int_handler(dev_priv);
 
+       if (de_iir & DE_EDP_PSR_INT_HSW) {
+               struct intel_encoder *encoder;
+
+               for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+                       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+                       u32 psr_iir;
+
+                       psr_iir = intel_uncore_rmw(&dev_priv->uncore,
+                                                  EDP_PSR_IIR, 0, 0);
+                       intel_psr_irq_handler(intel_dp, psr_iir);
+                       break;
+               }
+       }
+
        if (de_iir & DE_AUX_CHANNEL_A_IVB)
                intel_dp_aux_irq_handler(dev_priv);
 
@@ -1135,7 +1149,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 
 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
 {
-       void __iomem * const regs = i915->uncore.regs;
+       void __iomem * const regs = intel_uncore_regs(&i915->uncore);
        u32 iir;
 
        if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1156,7 +1170,7 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
 
 void gen11_display_irq_handler(struct drm_i915_private *i915)
 {
-       void __iomem * const regs = i915->uncore.regs;
+       void __iomem * const regs = intel_uncore_regs(&i915->uncore);
        const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
 
        disable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1523,7 +1537,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
  * interrupts could still race.
  */
-void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
        u32 mask;
@@ -1569,6 +1583,50 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
                vlv_display_irq_reset(dev_priv);
 }
 
+void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+{
+       struct intel_uncore *uncore = &i915->uncore;
+       u32 display_mask, extra_mask;
+
+       if (GRAPHICS_VER(i915) >= 7) {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+                               DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
+               extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+                             DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+                             DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+                             DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+                             DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
+                             DE_DP_A_HOTPLUG_IVB);
+       } else {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                               DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
+                               DE_PIPEA_CRC_DONE | DE_POISON);
+               extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
+                             DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+                             DE_PLANE_FLIP_DONE(PLANE_A) |
+                             DE_PLANE_FLIP_DONE(PLANE_B) |
+                             DE_DP_A_HOTPLUG);
+       }
+
+       if (IS_HASWELL(i915)) {
+               gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+               display_mask |= DE_EDP_PSR_INT_HSW;
+       }
+
+       if (IS_IRONLAKE_M(i915))
+               extra_mask |= DE_PCU_EVENT;
+
+       i915->irq_mask = ~display_mask;
+
+       ibx_irq_postinstall(i915);
+
+       GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
+                     display_mask | extra_mask);
+}
+
+static void mtp_irq_postinstall(struct drm_i915_private *i915);
+static void icp_irq_postinstall(struct drm_i915_private *i915);
+
 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1586,6 +1644,13 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        if (!HAS_DISPLAY(dev_priv))
                return;
 
+       if (DISPLAY_VER(dev_priv) >= 14)
+               mtp_irq_postinstall(dev_priv);
+       else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+               icp_irq_postinstall(dev_priv);
+       else if (HAS_PCH_SPLIT(dev_priv))
+               ibx_irq_postinstall(dev_priv);
+
        if (DISPLAY_VER(dev_priv) <= 10)
                de_misc_masked |= GEN8_DE_MISC_GSE;
 
@@ -1652,7 +1717,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        }
 }
 
-void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct drm_i915_private *i915)
 {
        struct intel_uncore *uncore = &i915->uncore;
        u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
@@ -1666,7 +1731,7 @@ void mtp_irq_postinstall(struct drm_i915_private *i915)
        GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
 }
 
-void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
        u32 mask = SDE_GMBUS_ICP;
@@ -1685,3 +1750,30 @@ void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
                           GEN11_DISPLAY_IRQ_ENABLE);
 }
 
+void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+{
+       if (!HAS_DISPLAY(i915))
+               return;
+
+       gen8_de_irq_postinstall(i915);
+       intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
+                          GEN11_DISPLAY_IRQ_ENABLE);
+}
+
+void intel_display_irq_init(struct drm_i915_private *i915)
+{
+       i915->drm.vblank_disable_immediate = true;
+
+       /*
+        * Most platforms treat the display irq block as an always-on power
+        * domain. vlv/chv can disable it at runtime and need special care to
+        * avoid writing any of the display block registers outside of the power
+        * domain. We defer setting up the display irqs in this case to the
+        * runtime pm.
+        */
+       i915->display_irqs_enabled = true;
+       if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+               i915->display_irqs_enabled = false;
+
+       intel_hotplug_irq_init(i915);
+}
index 874893f4f16dbdf6f17af5b32969e9a5b7a7a6fa..2a090dd6abd7c490da75a4cc80b707540faa25e4 100644 (file)
@@ -58,12 +58,11 @@ void vlv_display_irq_reset(struct drm_i915_private *i915);
 void gen8_display_irq_reset(struct drm_i915_private *i915);
 void gen11_display_irq_reset(struct drm_i915_private *i915);
 
-void ibx_irq_postinstall(struct drm_i915_private *i915);
 void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void icp_irq_postinstall(struct drm_i915_private *i915);
+void ilk_de_irq_postinstall(struct drm_i915_private *i915);
 void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void mtp_irq_postinstall(struct drm_i915_private *i915);
 void gen11_de_irq_postinstall(struct drm_i915_private *i915);
+void dg1_de_irq_postinstall(struct drm_i915_private *i915);
 
 u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
 void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
@@ -78,4 +77,6 @@ void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_
 void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
 void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]);
 
+void intel_display_irq_init(struct drm_i915_private *i915);
+
 #endif /* __INTEL_DISPLAY_IRQ_H__ */
index db54370439042b24de78ec2365ad1f6a4c88932e..9e01054c243001a73934db472b8031ca3e371a3b 100644 (file)
@@ -10,6 +10,7 @@
 #include "i915_reg.h"
 #include "intel_backlight_regs.h"
 #include "intel_cdclk.h"
+#include "intel_clock_gating.h"
 #include "intel_combo_phy.h"
 #include "intel_de.h"
 #include "intel_display_power.h"
@@ -457,6 +458,17 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains,
        clear_bit(domain, power_domains->async_put_domains[1].bits);
 }
 
+static void
+cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
+{
+       if (sync)
+               cancel_delayed_work_sync(&power_domains->async_put_work);
+       else
+               cancel_delayed_work(&power_domains->async_put_work);
+
+       power_domains->async_put_next_delay = 0;
+}
+
 static bool
 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
                                       enum intel_display_power_domain domain)
@@ -477,7 +489,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
        if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
                goto out_verify;
 
-       cancel_delayed_work(&power_domains->async_put_work);
+       cancel_async_put_work(power_domains, false);
        intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
                                 fetch_and_zero(&power_domains->async_put_wakeref));
 out_verify:
@@ -608,7 +620,8 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
 
 static void
 queue_async_put_domains_work(struct i915_power_domains *power_domains,
-                            intel_wakeref_t wakeref)
+                            intel_wakeref_t wakeref,
+                            int delay_ms)
 {
        struct drm_i915_private *i915 = container_of(power_domains,
                                                     struct drm_i915_private,
@@ -617,7 +630,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
        power_domains->async_put_wakeref = wakeref;
        drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
                                                    &power_domains->async_put_work,
-                                                   msecs_to_jiffies(100)));
+                                                   msecs_to_jiffies(delay_ms)));
 }
 
 static void
@@ -680,13 +693,15 @@ intel_display_power_put_async_work(struct work_struct *work)
                bitmap_zero(power_domains->async_put_domains[1].bits,
                            POWER_DOMAIN_NUM);
                queue_async_put_domains_work(power_domains,
-                                            fetch_and_zero(&new_work_wakeref));
+                                            fetch_and_zero(&new_work_wakeref),
+                                            power_domains->async_put_next_delay);
+               power_domains->async_put_next_delay = 0;
        } else {
                /*
                 * Cancel the work that got queued after this one got dequeued,
                 * since here we released the corresponding async-put reference.
                 */
-               cancel_delayed_work(&power_domains->async_put_work);
+               cancel_async_put_work(power_domains, false);
        }
 
 out_verify:
@@ -705,19 +720,25 @@ out_verify:
  * @i915: i915 device instance
  * @domain: power domain to reference
  * @wakeref: wakeref acquired for the reference that is being released
+ * @delay_ms: delay of powering down the power domain
  *
  * This function drops the power domain reference obtained by
  * intel_display_power_get*() and schedules a work to power down the
  * corresponding hardware block if this is the last reference.
+ * The power down is delayed by @delay_ms if this is >= 0, or by a default
+ * 100 ms otherwise.
  */
 void __intel_display_power_put_async(struct drm_i915_private *i915,
                                     enum intel_display_power_domain domain,
-                                    intel_wakeref_t wakeref)
+                                    intel_wakeref_t wakeref,
+                                    int delay_ms)
 {
        struct i915_power_domains *power_domains = &i915->display.power.domains;
        struct intel_runtime_pm *rpm = &i915->runtime_pm;
        intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
 
+       delay_ms = delay_ms >= 0 ? delay_ms : 100;
+
        mutex_lock(&power_domains->lock);
 
        if (power_domains->domain_use_count[domain] > 1) {
@@ -731,10 +752,13 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
        /* Let a pending work requeue itself or queue a new one. */
        if (power_domains->async_put_wakeref) {
                set_bit(domain, power_domains->async_put_domains[1].bits);
+               power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
+                                                         delay_ms);
        } else {
                set_bit(domain, power_domains->async_put_domains[0].bits);
                queue_async_put_domains_work(power_domains,
-                                            fetch_and_zero(&work_wakeref));
+                                            fetch_and_zero(&work_wakeref),
+                                            delay_ms);
        }
 
 out_verify:
@@ -774,7 +798,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
 
        async_put_domains_mask(power_domains, &async_put_mask);
        release_async_put_domains(power_domains, &async_put_mask);
-       cancel_delayed_work(&power_domains->async_put_work);
+       cancel_async_put_work(power_domains, false);
 
 out_verify:
        verify_async_put_domains_state(power_domains);
@@ -798,7 +822,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
        struct i915_power_domains *power_domains = &i915->display.power.domains;
 
        intel_display_power_flush_work(i915);
-       cancel_delayed_work_sync(&power_domains->async_put_work);
+       cancel_async_put_work(power_domains, true);
 
        verify_async_put_domains_state(power_domains);
 
@@ -1385,9 +1409,8 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
        hsw_restore_lcpll(dev_priv);
        intel_init_pch_refclk(dev_priv);
 
-       if (HAS_PCH_LPT_LP(dev_priv))
-               intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
-                            0, PCH_LP_PARTITION_LEVEL_DISABLE);
+       /* Many display registers don't survive PC8+ */
+       intel_clock_gating_init(dev_priv);
 }
 
 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
@@ -1586,7 +1609,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
                return;
 
        if (IS_ALDERLAKE_S(dev_priv) ||
-           IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+           (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
                /* Wa_1409767108 */
                table = wa_1409767108_buddy_page_masks;
        else
index df38632c6237146ed7428bf4293fb7c2a53dfb27..d3b5d04b7b077855b2c48e79fd59091e86779316 100644 (file)
@@ -12,9 +12,6 @@
 #include "intel_wakeref.h"
 
 enum aux_ch;
-enum dpio_channel;
-enum dpio_phy;
-enum i915_drm_suspend_mode;
 enum port;
 struct drm_i915_private;
 struct i915_power_well;
@@ -154,6 +151,7 @@ struct i915_power_domains {
        struct delayed_work async_put_work;
        intel_wakeref_t async_put_wakeref;
        struct intel_power_domain_mask async_put_domains[2];
+       int async_put_next_delay;
 
        struct i915_power_well *power_wells;
 };
@@ -200,7 +198,8 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
                                   enum intel_display_power_domain domain);
 void __intel_display_power_put_async(struct drm_i915_private *i915,
                                     enum intel_display_power_domain domain,
-                                    intel_wakeref_t wakeref);
+                                    intel_wakeref_t wakeref,
+                                    int delay_ms);
 void intel_display_power_flush_work(struct drm_i915_private *i915);
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 void intel_display_power_put(struct drm_i915_private *dev_priv,
@@ -211,7 +210,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
                              enum intel_display_power_domain domain,
                              intel_wakeref_t wakeref)
 {
-       __intel_display_power_put_async(i915, domain, wakeref);
+       __intel_display_power_put_async(i915, domain, wakeref, -1);
+}
+
+static inline void
+intel_display_power_put_async_delay(struct drm_i915_private *i915,
+                                   enum intel_display_power_domain domain,
+                                   intel_wakeref_t wakeref,
+                                   int delay_ms)
+{
+       __intel_display_power_put_async(i915, domain, wakeref, delay_ms);
 }
 #else
 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
@@ -230,7 +238,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
                              enum intel_display_power_domain domain,
                              intel_wakeref_t wakeref)
 {
-       __intel_display_power_put_async(i915, domain, -1);
+       __intel_display_power_put_async(i915, domain, -1, -1);
+}
+
+static inline void
+intel_display_power_put_async_delay(struct drm_i915_private *i915,
+                                   enum intel_display_power_domain domain,
+                                   intel_wakeref_t wakeref,
+                                   int delay_ms)
+{
+       __intel_display_power_put_async(i915, domain, -1, delay_ms);
 }
 #endif
 
index 1015bba4af01e9f8149e5576db55e7ad7c778edf..a8736588314d9dc53afdb403cba2eaef3f55ca9b 100644 (file)
@@ -11,7 +11,6 @@
 #include "intel_dpio_phy.h"
 
 struct drm_i915_private;
-struct i915_power_well;
 struct i915_power_well_ops;
 struct intel_encoder;
 
index 9f40da20e88d28e6dca7fa3286f3b4d970a34002..12bd2f322e6279a7b85f2ef29a8474903f18ec9d 100644 (file)
@@ -500,7 +500,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
                else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
                         IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
                        max_rate = 810000;
-               else if (IS_JSL_EHL(dev_priv))
+               else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
                        max_rate = icl_max_source_rate(intel_dp);
@@ -510,7 +510,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        } else if (DISPLAY_VER(dev_priv) == 9) {
                source_rates = skl_rates;
                size = ARRAY_SIZE(skl_rates);
-       } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+       } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
                   IS_BROADWELL(dev_priv)) {
                source_rates = hsw_rates;
                size = ARRAY_SIZE(hsw_rates);
@@ -713,9 +713,18 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
 
                /*
                 * According to BSpec, 27 is the max DSC output bpp,
-                * 8 is the min DSC output bpp
+                * 8 is the min DSC output bpp.
+                * While we can still clamp higher bpp values to 27, saving bandwidth,
+                * if it is required to oompress up to bpp < 8, means we can't do
+                * that and probably means we can't fit the required mode, even with
+                * DSC enabled.
                 */
-               bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
+               if (bits_per_pixel < 8) {
+                       drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+                                   bits_per_pixel);
+                       return 0;
+               }
+               bits_per_pixel = min_t(u32, bits_per_pixel, 27);
        } else {
                /* Find the nearest match in the array of known BPPs from VESA */
                for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
index 197c6e81db147e23cadb3f34cb0820cd0c185d4a..2d173bd495a337e172d45ac492e113984b0368f0 100644 (file)
@@ -14,7 +14,7 @@
 #include "intel_pps.h"
 #include "intel_tc.h"
 
-static u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
+u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
 {
        int i;
        u32 v = 0;
@@ -792,25 +792,60 @@ static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
        return (enum aux_ch)encoder->port;
 }
 
+static struct intel_encoder *
+get_encoder_by_aux_ch(struct intel_encoder *encoder,
+                     enum aux_ch aux_ch)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_encoder *other;
+
+       for_each_intel_encoder(&i915->drm, other) {
+               if (other == encoder)
+                       continue;
+
+               if (!intel_encoder_is_dig_port(other))
+                       continue;
+
+               if (enc_to_dig_port(other)->aux_ch == aux_ch)
+                       return other;
+       }
+
+       return NULL;
+}
+
 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
 {
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_encoder *other;
+       const char *source;
        enum aux_ch aux_ch;
 
        aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
-       if (aux_ch != AUX_CH_NONE) {
-               drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
-                           encoder->base.base.id, encoder->base.name,
-                           aux_ch_name(aux_ch));
-               return aux_ch;
+       source = "VBT";
+
+       if (aux_ch == AUX_CH_NONE) {
+               aux_ch = default_aux_ch(encoder);
+               source = "platform default";
        }
 
-       aux_ch = default_aux_ch(encoder);
+       if (aux_ch == AUX_CH_NONE)
+               return AUX_CH_NONE;
+
+       /* FIXME validate aux_ch against platform caps */
+
+       other = get_encoder_by_aux_ch(encoder, aux_ch);
+       if (other) {
+               drm_dbg_kms(&i915->drm,
+                           "[ENCODER:%d:%s] AUX CH %c already claimed by [ENCODER:%d:%s]\n",
+                           encoder->base.base.id, encoder->base.name, aux_ch_name(aux_ch),
+                           other->base.base.id, other->base.name);
+               return AUX_CH_NONE;
+       }
 
        drm_dbg_kms(&i915->drm,
-                   "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+                   "[ENCODER:%d:%s] Using AUX CH %c (%s)\n",
                    encoder->base.base.id, encoder->base.name,
-                   aux_ch_name(aux_ch));
+                   aux_ch_name(aux_ch), source);
 
        return aux_ch;
 }
index 5b608f9d34993be06b991eb7772dd68112dd6393..8447f3e601fe4090de1c137e103ca3e04b890fcd 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __INTEL_DP_AUX_H__
 #define __INTEL_DP_AUX_H__
 
+#include <linux/types.h>
+
 enum aux_ch;
 struct drm_i915_private;
 struct intel_dp;
@@ -17,5 +19,6 @@ void intel_dp_aux_init(struct intel_dp *intel_dp);
 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
 
 void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
+u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
 
 #endif /* __INTEL_DP_AUX_H__ */
index 6b2d8a1e2aa90bc598d44b25a798dd1e6632484a..a9b19e80bff717c6d6583ac83007d656f19634d0 100644 (file)
@@ -191,7 +191,8 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
 {
        if (IS_DG1(i915))
                return DG1_DPLL_ENABLE(pll->info->id);
-       else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+       else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+                (pll->info->id == DPLL_ID_EHL_DPLL4))
                return MG_PLL_ENABLE(0);
 
        return ICL_DPLL_ENABLE(pll->info->id);
@@ -927,7 +928,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
        switch (wrpll & WRPLL_REF_MASK) {
        case WRPLL_REF_SPECIAL_HSW:
                /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
-               if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+               if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) {
                        refclk = dev_priv->display.dpll.ref_clks.nssc;
                        break;
                }
@@ -2460,8 +2461,8 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
 static bool
 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
 {
-       return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
-                IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+       return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+                IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
                 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
                 i915->display.dpll.ref_clks.nssc == 38400;
 }
@@ -3226,7 +3227,8 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
                        BIT(DPLL_ID_EHL_DPLL4) |
                        BIT(DPLL_ID_ICL_DPLL1) |
                        BIT(DPLL_ID_ICL_DPLL0);
-       } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
+       } else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                               port != PORT_A) {
                dpll_mask =
                        BIT(DPLL_ID_EHL_DPLL4) |
                        BIT(DPLL_ID_ICL_DPLL1) |
@@ -3567,7 +3569,8 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
                        hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
                }
        } else {
-               if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+               if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                   id == DPLL_ID_EHL_DPLL4) {
                        hw_state->cfgcr0 = intel_de_read(dev_priv,
                                                         ICL_DPLL_CFGCR0(4));
                        hw_state->cfgcr1 = intel_de_read(dev_priv,
@@ -3623,7 +3626,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
                cfgcr1_reg = TGL_DPLL_CFGCR1(id);
                div0_reg = TGL_DPLL0_DIV0(id);
        } else {
-               if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+               if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                   id == DPLL_ID_EHL_DPLL4) {
                        cfgcr0_reg = ICL_DPLL_CFGCR0(4);
                        cfgcr1_reg = ICL_DPLL_CFGCR1(4);
                } else {
@@ -3781,7 +3785,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
 {
        u32 val;
 
-       if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
+       if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
            pll->info->id != DPLL_ID_ICL_DPLL0)
                return;
        /*
@@ -3806,7 +3810,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
 {
        i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
 
-       if (IS_JSL_EHL(dev_priv) &&
+       if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
            pll->info->id == DPLL_ID_EHL_DPLL4) {
 
                /*
@@ -3914,7 +3918,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
 
        icl_pll_disable(dev_priv, pll, enable_reg);
 
-       if (IS_JSL_EHL(dev_priv) &&
+       if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
            pll->info->id == DPLL_ID_EHL_DPLL4)
                intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
                                        pll->wakeref);
@@ -4150,7 +4154,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
                dpll_mgr = &rkl_pll_mgr;
        else if (DISPLAY_VER(dev_priv) >= 12)
                dpll_mgr = &tgl_pll_mgr;
-       else if (IS_JSL_EHL(dev_priv))
+       else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
                dpll_mgr = &ehl_pll_mgr;
        else if (DISPLAY_VER(dev_priv) >= 11)
                dpll_mgr = &icl_pll_mgr;
@@ -4335,7 +4339,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
 
        pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
 
-       if (IS_JSL_EHL(i915) && pll->on &&
+       if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+           pll->on &&
            pll->info->id == DPLL_ID_EHL_DPLL4) {
                pll->wakeref = intel_display_power_get(i915,
                                                       POWER_DOMAIN_DC_OFF);
index 7c5fddb203ba0796e4b808656f1e7c5996d2e662..fbfd8f959f172f87e68677d9f3671eeecd1506a6 100644 (file)
@@ -166,6 +166,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
                i915_vma_get(vma);
        }
 
+       dpt->obj->mm.dirty = true;
+
        atomic_dec(&i915->gpu_error.pending_fb_pin);
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
@@ -261,7 +263,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
                dpt_obj = i915_gem_object_create_stolen(i915, size);
        if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
                drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
-               dpt_obj = i915_gem_object_create_internal(i915, size);
+               dpt_obj = i915_gem_object_create_shmem(i915, size);
        }
        if (IS_ERR(dpt_obj))
                return ERR_CAST(dpt_obj);
index 5efdd471ac2b500d597760d74e1cca66836cc61c..d3cf6a652221fa22a9ace04b18d05850125b15d0 100644 (file)
@@ -9,6 +9,26 @@
 #include "intel_dsi.h"
 #include "intel_panel.h"
 
+void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
+{
+       ktime_t panel_power_on_time;
+       s64 panel_power_off_duration;
+
+       panel_power_on_time = ktime_get_boottime();
+       panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
+                                                 intel_dsi->panel_power_off_time);
+
+       if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
+               msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
+}
+
+void intel_dsi_shutdown(struct intel_encoder *encoder)
+{
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+       intel_dsi_wait_panel_power_cycle(intel_dsi);
+}
+
 int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
 {
        int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
index ce80bd8be5194c30f05a7fdd275a1a3f3f75922f..083390e5e4429daf6c68cd241bd6316a1895fa33 100644 (file)
@@ -173,5 +173,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
 struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
                                           const struct mipi_dsi_host_ops *funcs,
                                           enum port port);
+void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi);
+void intel_dsi_shutdown(struct intel_encoder *encoder);
 
 #endif /* _INTEL_DSI_H */
index c7935ea498c44fc62a9baafb953608d024c4673d..e56ec3f2d84aecc3a1a6fd76bb0c51bdd712de30 100644 (file)
@@ -235,7 +235,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
        struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
        u32 delay = *((const u32 *) data);
 
-       drm_dbg_kms(&i915->drm, "\n");
+       drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
 
        usleep_range(delay, delay + 10);
        data += 4;
index 9884678743b66589ba1d3e7f503bbd2a18e1a8bb..b386894c3a6db2f25119ea726826f6300c69facc 100644 (file)
@@ -509,6 +509,8 @@ void intel_dvo_init(struct drm_i915_private *i915)
                return;
        }
 
+       assert_port_valid(i915, intel_dvo->dev.port);
+
        encoder->type = INTEL_OUTPUT_DVO;
        encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
        encoder->port = intel_dvo->dev.port;
index 7f8b2d7713c7a3bfbafd9b82c5f1f1552357284f..25382022cd2768301c417a37e456526deed7547b 100644 (file)
@@ -47,6 +47,7 @@
 #include "i915_reg.h"
 #include "i915_utils.h"
 #include "i915_vgpu.h"
+#include "i915_vma.h"
 #include "intel_cdclk.h"
 #include "intel_de.h"
 #include "intel_display_trace.h"
@@ -94,8 +95,7 @@ struct intel_fbc {
        struct mutex lock;
        unsigned int busy_bits;
 
-       struct drm_mm_node compressed_fb;
-       struct drm_mm_node compressed_llb;
+       struct i915_stolen_fb compressed_fb, compressed_llb;
 
        enum intel_fbc_id id;
 
@@ -332,15 +332,16 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
 {
        struct drm_i915_private *i915 = fbc->i915;
 
-       GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
-                                        fbc->compressed_fb.start, U32_MAX));
-       GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
-                                        fbc->compressed_llb.start, U32_MAX));
-
+       GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+                                        i915_gem_stolen_node_offset(&fbc->compressed_fb),
+                                        U32_MAX));
+       GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+                                        i915_gem_stolen_node_offset(&fbc->compressed_llb),
+                                        U32_MAX));
        intel_de_write(i915, FBC_CFB_BASE,
-                      i915->dsm.stolen.start + fbc->compressed_fb.start);
+                      i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
        intel_de_write(i915, FBC_LL_BASE,
-                      i915->dsm.stolen.start + fbc->compressed_llb.start);
+                      i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
 }
 
 static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -447,7 +448,8 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
 {
        struct drm_i915_private *i915 = fbc->i915;
 
-       intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
+       intel_de_write(i915, DPFC_CB_BASE,
+                      i915_gem_stolen_node_offset(&fbc->compressed_fb));
 }
 
 static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -498,7 +500,8 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
 {
        struct drm_i915_private *i915 = fbc->i915;
 
-       intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
+       intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
+                      i915_gem_stolen_node_offset(&fbc->compressed_fb));
 }
 
 static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -605,7 +608,7 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
        else if (DISPLAY_VER(i915) == 9)
                skl_fbc_program_cfb_stride(fbc);
 
-       if (to_gt(i915)->ggtt->num_fences)
+       if (intel_gt_support_legacy_fencing(to_gt(i915)))
                snb_fbc_program_fence(fbc);
 
        intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
@@ -713,7 +716,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
         * underruns, even if that range is not reserved by the BIOS. */
        if (IS_BROADWELL(i915) ||
            (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
-               end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
+               end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
        else
                end = U64_MAX;
 
@@ -770,9 +773,9 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
        int ret;
 
        drm_WARN_ON(&i915->drm,
-                   drm_mm_node_allocated(&fbc->compressed_fb));
+                   i915_gem_stolen_node_allocated(&fbc->compressed_fb));
        drm_WARN_ON(&i915->drm,
-                   drm_mm_node_allocated(&fbc->compressed_llb));
+                   i915_gem_stolen_node_allocated(&fbc->compressed_llb));
 
        if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
                ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
@@ -792,15 +795,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
 
        drm_dbg_kms(&i915->drm,
                    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
-                   fbc->compressed_fb.size, fbc->limit);
-
+                   i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
        return 0;
 
 err_llb:
-       if (drm_mm_node_allocated(&fbc->compressed_llb))
+       if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
                i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
 err:
-       if (drm_mm_initialized(&i915->mm.stolen))
+       if (i915_gem_stolen_initialized(i915))
                drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
        return -ENOSPC;
 }
@@ -825,9 +827,9 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
        if (WARN_ON(intel_fbc_hw_is_active(fbc)))
                return;
 
-       if (drm_mm_node_allocated(&fbc->compressed_llb))
+       if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
                i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
-       if (drm_mm_node_allocated(&fbc->compressed_fb))
+       if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
                i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
 }
 
@@ -990,11 +992,10 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
        fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
 
        drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
-                   !plane_state->ggtt_vma->fence);
+                   !intel_gt_support_legacy_fencing(to_gt(i915)));
 
-       if (plane_state->flags & PLANE_HAS_FENCE &&
-           plane_state->ggtt_vma->fence)
-               fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
+       if (plane_state->flags & PLANE_HAS_FENCE)
+               fbc_state->fence_id =  i915_vma_fence_id(plane_state->ggtt_vma);
        else
                fbc_state->fence_id = -1;
 
@@ -1021,7 +1022,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
         */
        return DISPLAY_VER(i915) >= 9 ||
                (plane_state->flags & PLANE_HAS_FENCE &&
-                plane_state->ggtt_vma->fence);
+                i915_vma_fence_id(plane_state->ggtt_vma) != -1);
 }
 
 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
@@ -1030,7 +1031,8 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
        struct intel_fbc *fbc = plane->fbc;
 
        return intel_fbc_min_limit(plane_state) <= fbc->limit &&
-               intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
+               intel_fbc_cfb_size(plane_state) <= fbc->limit *
+                       i915_gem_stolen_node_size(&fbc->compressed_fb);
 }
 
 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1054,6 +1056,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
        if (!fbc)
                return 0;
 
+       if (!i915_gem_stolen_initialized(i915)) {
+               plane_state->no_fbc_reason = "stolen memory not initialised";
+               return 0;
+       }
+
        if (intel_vgpu_active(i915)) {
                plane_state->no_fbc_reason = "VGPU active";
                return 0;
@@ -1707,9 +1714,6 @@ void intel_fbc_init(struct drm_i915_private *i915)
 {
        enum intel_fbc_id fbc_id;
 
-       if (!drm_mm_initialized(&i915->mm.stolen))
-               DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
-
        if (need_fbc_vtd_wa(i915))
                DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
 
index 1cc0ddc6a310ed64275d350ad2e85ebce7a44226..31d0d695d5671b0c6fc05d35c0d5443257158a75 100644 (file)
@@ -85,9 +85,9 @@ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
        intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
 }
 
-FB_GEN_DEFAULT_DEFERRED_IO_OPS(intel_fbdev,
-                              drm_fb_helper_damage_range,
-                              drm_fb_helper_damage_area)
+FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(intel_fbdev,
+                                 drm_fb_helper_damage_range,
+                                 drm_fb_helper_damage_area)
 
 static int intel_fbdev_set_par(struct fb_info *info)
 {
@@ -135,9 +135,6 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
        return i915_gem_fb_mmap(obj, vma);
 }
 
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding the default ops");
-
 static const struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@@ -149,8 +146,6 @@ static const struct fb_ops intelfb_ops = {
        .fb_mmap = intel_fbdev_mmap,
 };
 
-__diag_pop();
-
 static int intelfb_alloc(struct drm_fb_helper *helper,
                         struct drm_fb_helper_surface_size *sizes)
 {
@@ -187,8 +182,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
                 * If the FB is too big, just don't use it since fbdev is not very
                 * important and we should probably use that space with FBC or other
                 * features.
+                *
+                * Also skip stolen on MTL as Wa_22018444074 mitigation.
                 */
-               if (size * 2 < dev_priv->dsm.usable_size)
+               if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
                        obj = i915_gem_object_create_stolen(dev_priv, size);
                if (IS_ERR(obj))
                        obj = i915_gem_object_create_shmem(dev_priv, size);
index 17a7aa8b28c242ea2399b36d1155d24393043133..22392f94b62677d45f1466a75a2f87f8df8a1fa0 100644 (file)
@@ -167,7 +167,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
                           enum fb_op_origin origin,
                           unsigned int frontbuffer_bits)
 {
-       struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+       struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
 
        if (origin == ORIGIN_CS) {
                spin_lock(&i915->display.fb_tracking.lock);
@@ -188,7 +188,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
                      enum fb_op_origin origin,
                      unsigned int frontbuffer_bits)
 {
-       struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+       struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
 
        if (origin == ORIGIN_CS) {
                spin_lock(&i915->display.fb_tracking.lock);
@@ -221,24 +221,18 @@ static void frontbuffer_retire(struct i915_active *ref)
 }
 
 static void frontbuffer_release(struct kref *ref)
-       __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
+       __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock)
 {
        struct intel_frontbuffer *front =
                container_of(ref, typeof(*front), ref);
        struct drm_i915_gem_object *obj = front->obj;
-       struct i915_vma *vma;
 
-       drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
+       drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits));
 
-       spin_lock(&obj->vma.lock);
-       for_each_ggtt_vma(vma, obj) {
-               i915_vma_clear_scanout(vma);
-               vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-       }
-       spin_unlock(&obj->vma.lock);
+       i915_ggtt_clear_scanout(obj);
 
-       RCU_INIT_POINTER(obj->frontbuffer, NULL);
-       spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
+       i915_gem_object_set_frontbuffer(obj, NULL);
+       spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock);
 
        i915_active_fini(&front->write);
 
@@ -249,10 +243,10 @@ static void frontbuffer_release(struct kref *ref)
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct intel_frontbuffer *front;
+       struct drm_i915_private *i915 = intel_bo_to_i915(obj);
+       struct intel_frontbuffer *front, *cur;
 
-       front = __intel_frontbuffer_get(obj);
+       front = i915_gem_object_get_frontbuffer(obj);
        if (front)
                return front;
 
@@ -269,24 +263,18 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
                         I915_ACTIVE_RETIRE_SLEEPS);
 
        spin_lock(&i915->display.fb_tracking.lock);
-       if (rcu_access_pointer(obj->frontbuffer)) {
-               kfree(front);
-               front = rcu_dereference_protected(obj->frontbuffer, true);
-               kref_get(&front->ref);
-       } else {
-               i915_gem_object_get(obj);
-               rcu_assign_pointer(obj->frontbuffer, front);
-       }
+       cur = i915_gem_object_set_frontbuffer(obj, front);
        spin_unlock(&i915->display.fb_tracking.lock);
-
-       return front;
+       if (cur != front)
+               kfree(front);
+       return cur;
 }
 
 void intel_frontbuffer_put(struct intel_frontbuffer *front)
 {
        kref_put_lock(&front->ref,
                      frontbuffer_release,
-                     &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
+                     &intel_bo_to_i915(front->obj)->display.fb_tracking.lock);
 }
 
 /**
@@ -315,13 +303,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
        BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
 
        if (old) {
-               drm_WARN_ON(old->obj->base.dev,
+               drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm,
                            !(atomic_read(&old->bits) & frontbuffer_bits));
                atomic_andnot(frontbuffer_bits, &old->bits);
        }
 
        if (new) {
-               drm_WARN_ON(new->obj->base.dev,
+               drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm,
                            atomic_read(&new->bits) & frontbuffer_bits);
                atomic_or(frontbuffer_bits, &new->bits);
        }
index 3c474ed937fb2c4352639b65a443e31647ec5401..72d89be3284b94fd7b7c94577ccc6126c3d36597 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/bits.h>
 #include <linux/kref.h>
 
-#include "gem/i915_gem_object_types.h"
 #include "i915_active_types.h"
 
 struct drm_i915_private;
@@ -75,33 +74,6 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915,
 
 void intel_frontbuffer_put(struct intel_frontbuffer *front);
 
-static inline struct intel_frontbuffer *
-__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
-{
-       struct intel_frontbuffer *front;
-
-       if (likely(!rcu_access_pointer(obj->frontbuffer)))
-               return NULL;
-
-       rcu_read_lock();
-       do {
-               front = rcu_dereference(obj->frontbuffer);
-               if (!front)
-                       break;
-
-               if (unlikely(!kref_get_unless_zero(&front->ref)))
-                       continue;
-
-               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
-                       break;
-
-               intel_frontbuffer_put(front);
-       } while (1);
-       rcu_read_unlock();
-
-       return front;
-}
-
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 
index 34fabadefaf66991e50b2ddb507fd9cd62a497ce..a42549fa96918e2eacc06990de16961269b1bf48 100644 (file)
@@ -177,8 +177,11 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
                struct intel_gt *gt = i915->media_gt;
                struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
 
-               if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
+               if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
+                       drm_dbg_kms(&i915->drm,
+                                   "GSC components required for HDCP2.2 are not ready\n");
                        return false;
+               }
        }
 
        /* MEI/GSC interface is solid depending on which is used */
index 72573ce1d0e983908a7708ae9c4b81ccec72ef31..d753db3eef155c728d3d99acd1fb0049633dd129 100644 (file)
@@ -6,6 +6,7 @@
 #include <drm/i915_hdcp_interface.h>
 
 #include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
 #include "i915_drv.h"
 #include "i915_utils.h"
@@ -621,24 +622,26 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
        struct intel_gt *gt = i915->media_gt;
        struct drm_i915_gem_object *obj = NULL;
        struct i915_vma *vma = NULL;
-       void *cmd;
+       void *cmd_in, *cmd_out;
        int err;
 
-       /* allocate object of one page for HDCP command memory and store it */
-       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+       /* allocate object of two page for HDCP command memory and store it */
+       obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
 
        if (IS_ERR(obj)) {
                drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
                return PTR_ERR(obj);
        }
 
-       cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
-       if (IS_ERR(cmd)) {
+       cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
+       if (IS_ERR(cmd_in)) {
                drm_err(&i915->drm, "Failed to map gsc message page!\n");
-               err = PTR_ERR(cmd);
+               err = PTR_ERR(cmd_in);
                goto out_unpin;
        }
 
+       cmd_out = cmd_in + PAGE_SIZE;
+
        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
@@ -649,9 +652,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
        if (err)
                goto out_unmap;
 
-       memset(cmd, 0, obj->base.size);
+       memset(cmd_in, 0, obj->base.size);
 
-       hdcp_message->hdcp_cmd = cmd;
+       hdcp_message->hdcp_cmd_in = cmd_in;
+       hdcp_message->hdcp_cmd_out = cmd_out;
        hdcp_message->vma = vma;
 
        return 0;
@@ -691,6 +695,8 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
        struct intel_hdcp_gsc_message *hdcp_message =
                                        i915->display.hdcp.hdcp_message;
 
+       hdcp_message->hdcp_cmd_in = NULL;
+       hdcp_message->hdcp_cmd_out = NULL;
        i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
        kfree(hdcp_message);
 }
@@ -721,38 +727,42 @@ void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
 }
 
 static int intel_gsc_send_sync(struct drm_i915_private *i915,
-                              struct intel_gsc_mtl_header *header, u64 addr,
+                              struct intel_gsc_mtl_header *header_in,
+                              struct intel_gsc_mtl_header *header_out,
+                              u64 addr_in, u64 addr_out,
                               size_t msg_out_len)
 {
        struct intel_gt *gt = i915->media_gt;
        int ret;
 
-       header->flags = 0;
-       ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
-                                                 header->message_size,
-                                                 addr,
-                                                 msg_out_len + sizeof(*header));
+       ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr_in,
+                                                 header_in->message_size,
+                                                 addr_out,
+                                                 msg_out_len + sizeof(*header_out));
        if (ret) {
                drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
                return ret;
        }
 
        /*
-        * Checking validity marker for memory sanity
+        * Checking validity marker and header status to see if some error has
+        * blocked us from sending message to gsc cs
         */
-       if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+       if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) {
                drm_err(&i915->drm, "invalid validity marker\n");
                return -EINVAL;
        }
 
-       if (header->status != 0) {
+       if (header_out->status != 0) {
                drm_err(&i915->drm, "header status indicates error %d\n",
-                       header->status);
+                       header_out->status);
                return -EINVAL;
        }
 
-       if (header->flags & GSC_OUTFLAG_MSG_PENDING)
+       if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) {
+               header_in->gsc_message_handle = header_out->gsc_message_handle;
                return -EAGAIN;
+       }
 
        return 0;
 }
@@ -769,11 +779,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
                                size_t msg_out_len)
 {
        struct intel_gt *gt = i915->media_gt;
-       struct intel_gsc_mtl_header *header;
-       const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
+       struct intel_gsc_mtl_header *header_in, *header_out;
+       const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
        struct intel_hdcp_gsc_message *hdcp_message;
-       u64 addr, host_session_id;
-       u32 reply_size, msg_size;
+       u64 addr_in, addr_out, host_session_id;
+       u32 reply_size, msg_size_in, msg_size_out;
        int ret, tries = 0;
 
        if (!intel_uc_uses_gsc_uc(&gt->uc))
@@ -782,16 +792,20 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
        if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
                return -ENOSPC;
 
+       msg_size_in = msg_in_len + sizeof(*header_in);
+       msg_size_out = msg_out_len + sizeof(*header_out);
        hdcp_message = i915->display.hdcp.hdcp_message;
-       header = hdcp_message->hdcp_cmd;
-       addr = i915_ggtt_offset(hdcp_message->vma);
+       header_in = hdcp_message->hdcp_cmd_in;
+       header_out = hdcp_message->hdcp_cmd_out;
+       addr_in = i915_ggtt_offset(hdcp_message->vma);
+       addr_out = addr_in + PAGE_SIZE;
 
-       msg_size = msg_in_len + sizeof(*header);
-       memset(header, 0, msg_size);
+       memset(header_in, 0, msg_size_in);
+       memset(header_out, 0, msg_size_out);
        get_random_bytes(&host_session_id, sizeof(u64));
-       intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
-                                             msg_size, host_session_id);
-       memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
+       intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
+                                             msg_size_in, host_session_id);
+       memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
 
        /*
         * Keep sending request in case the pending bit is set no need to add
@@ -800,7 +814,8 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
         * 20 times each message 50 ms apart
         */
        do {
-               ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
+               ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in,
+                                         addr_out, msg_out_len);
 
                /* Only try again if gsc says so */
                if (ret != -EAGAIN)
@@ -814,7 +829,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
                goto err;
 
        /* we use the same mem for the reply, so header is in the same loc */
-       reply_size = header->message_size - sizeof(*header);
+       reply_size = header_out->message_size - sizeof(*header_out);
        if (reply_size > msg_out_len) {
                drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
                         reply_size, (u32)msg_out_len);
@@ -824,7 +839,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
                            reply_size, (u32)msg_out_len);
        }
 
-       memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
+       memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
 
 err:
        return ret;
index 5cc9fd2e88f67f95f42cbc98cfe673de1c728d12..cbf96551e534f8e9ebeee3bab44e2b1f5bcdbbc3 100644 (file)
@@ -13,7 +13,8 @@ struct drm_i915_private;
 
 struct intel_hdcp_gsc_message {
        struct i915_vma *vma;
-       void *hdcp_cmd;
+       void *hdcp_cmd_in;
+       void *hdcp_cmd_out;
 };
 
 bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
index 7ac5e6c5e00dc2ce6024a3dfc52ae39ce31c8e9a..94a7e1537f42780282960c434903cf24d9dba742 100644 (file)
@@ -2880,21 +2880,12 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
        return ddc_pin;
 }
 
-static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
+static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
        u8 ddc_pin;
 
-       ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
-       if (ddc_pin) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
-                           encoder->base.base.id, encoder->base.name,
-                           ddc_pin);
-               return ddc_pin;
-       }
-
        if (IS_ALDERLAKE_S(dev_priv))
                ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
@@ -2903,7 +2894,8 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
                ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
                ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
-       else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
+       else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+                HAS_PCH_TGP(dev_priv))
                ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
@@ -2916,10 +2908,62 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
        else
                ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
 
-       drm_dbg_kms(&dev_priv->drm,
-                   "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+       return ddc_pin;
+}
+
+static struct intel_encoder *
+get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_encoder *other;
+
+       for_each_intel_encoder(&i915->drm, other) {
+               if (other == encoder)
+                       continue;
+
+               if (!intel_encoder_is_dig_port(other))
+                       continue;
+
+               if (enc_to_dig_port(other)->hdmi.ddc_bus == ddc_pin)
+                       return other;
+       }
+
+       return NULL;
+}
+
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_encoder *other;
+       const char *source;
+       u8 ddc_pin;
+
+       ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
+       source = "VBT";
+
+       if (!ddc_pin) {
+               ddc_pin = intel_hdmi_default_ddc_pin(encoder);
+               source = "platform default";
+       }
+
+       if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
+               drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n",
+                           encoder->base.base.id, encoder->base.name, ddc_pin);
+               return 0;
+       }
+
+       other = get_encoder_by_ddc_pin(encoder, ddc_pin);
+       if (other) {
+               drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
+                           encoder->base.base.id, encoder->base.name, ddc_pin,
+                           other->base.base.id, other->base.name);
+               return 0;
+       }
+
+       drm_dbg_kms(&i915->drm,
+                   "[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n",
                    encoder->base.base.id, encoder->base.name,
-                   ddc_pin);
+                   ddc_pin, source);
 
        return ddc_pin;
 }
@@ -2990,6 +3034,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
                return;
 
        intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
+       if (!intel_hdmi->ddc_bus)
+               return;
+
        ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
 
        drm_connector_init_with_ddc(dev, connector,
index 1160fa20433bd65a649728d92d8f19fd6d706082..0ff5ed46ae1e748b5a8cfd91688ca9c18a15770d 100644 (file)
@@ -376,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        u32 changed = 0, retry = 0;
        u32 hpd_event_bits;
        u32 hpd_retry_bits;
+       struct drm_connector *first_changed_connector = NULL;
+       int changed_connectors = 0;
 
        mutex_lock(&dev_priv->drm.mode_config.mutex);
        drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
@@ -428,6 +430,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                break;
                        case INTEL_HOTPLUG_CHANGED:
                                changed |= hpd_bit;
+                               changed_connectors++;
+                               if (!first_changed_connector) {
+                                       drm_connector_get(&connector->base);
+                                       first_changed_connector = &connector->base;
+                               }
                                break;
                        case INTEL_HOTPLUG_RETRY:
                                retry |= hpd_bit;
@@ -438,9 +445,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
        drm_connector_list_iter_end(&conn_iter);
        mutex_unlock(&dev_priv->drm.mode_config.mutex);
 
-       if (changed)
+       if (changed_connectors == 1)
+               drm_kms_helper_connector_hotplug_event(first_changed_connector);
+       else if (changed_connectors > 0)
                drm_kms_helper_hotplug_event(&dev_priv->drm);
 
+       if (first_changed_connector)
+               drm_connector_put(first_changed_connector);
+
        /* Remove shared HPD pins that have changed */
        retry &= ~changed;
        if (retry) {
index f95fa793fabba4f1812d779ad510b12c4ccf91c1..95a7ea94f41743d715a64a4e119630d566c31714 100644 (file)
@@ -842,6 +842,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
                intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+       else
+               intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -1049,7 +1051,7 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
        enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
        hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
 
-       intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+       intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
 
        mtp_hpd_invert(i915);
        ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
index d6fe2bbabe559a16ed3a22a43fedd26bf831244f..09c1aa1427adb00d4d7610d75753081cb25e959e 100644 (file)
@@ -1348,11 +1348,12 @@ out_unlock:
 static int get_registers(struct intel_overlay *overlay, bool use_phys)
 {
        struct drm_i915_private *i915 = overlay->i915;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
        struct i915_vma *vma;
        int err;
 
-       obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+       if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */
+               obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
        if (IS_ERR(obj))
                obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj))
index f4c09cc37a5e1e572995de749819374feaf6c193..9583e86b602adad6f42de641ecdd476bc661e70e 100644 (file)
@@ -423,7 +423,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
        if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
                return true;
 
-       if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+       if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
            (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
            (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
                return true;
index 56c17283ba2d5bddd77989b99c62d3033b08dbc5..97d5eef10130df56bbeccb292363800e849d664f 100644 (file)
@@ -234,23 +234,91 @@ static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
                EDP_PSR_MASK(intel_dp->psr.transcoder);
 }
 
-static void psr_irq_control(struct intel_dp *intel_dp)
+static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
+                             enum transcoder cpu_transcoder)
 {
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       i915_reg_t imr_reg;
-       u32 mask;
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_CTL(cpu_transcoder);
+       else
+               return HSW_SRD_CTL;
+}
+
+static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
+                               enum transcoder cpu_transcoder)
+{
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_DEBUG(cpu_transcoder);
+       else
+               return HSW_SRD_DEBUG;
+}
+
+static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
+                                  enum transcoder cpu_transcoder)
+{
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_PERF_CNT(cpu_transcoder);
+       else
+               return HSW_SRD_PERF_CNT;
+}
+
+static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
+                                enum transcoder cpu_transcoder)
+{
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_STATUS(cpu_transcoder);
+       else
+               return HSW_SRD_STATUS;
+}
+
+static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
+                             enum transcoder cpu_transcoder)
+{
+       if (DISPLAY_VER(dev_priv) >= 12)
+               return TRANS_PSR_IMR(cpu_transcoder);
+       else
+               return EDP_PSR_IMR;
+}
 
+static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
+                             enum transcoder cpu_transcoder)
+{
        if (DISPLAY_VER(dev_priv) >= 12)
-               imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
+               return TRANS_PSR_IIR(cpu_transcoder);
+       else
+               return EDP_PSR_IIR;
+}
+
+static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
+                                 enum transcoder cpu_transcoder)
+{
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_AUX_CTL(cpu_transcoder);
+       else
+               return HSW_SRD_AUX_CTL;
+}
+
+static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
+                                  enum transcoder cpu_transcoder, int i)
+{
+       if (DISPLAY_VER(dev_priv) >= 8)
+               return EDP_PSR_AUX_DATA(cpu_transcoder, i);
        else
-               imr_reg = EDP_PSR_IMR;
+               return HSW_SRD_AUX_DATA(i);
+}
+
+static void psr_irq_control(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+       u32 mask;
 
        mask = psr_irq_psr_error_bit_get(intel_dp);
        if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
                mask |= psr_irq_post_exit_bit_get(intel_dp) |
                        psr_irq_pre_entry_bit_get(intel_dp);
 
-       intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
+       intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+                    psr_irq_mask_get(intel_dp), ~mask);
 }
 
 static void psr_event_print(struct drm_i915_private *i915,
@@ -296,12 +364,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        ktime_t time_ns =  ktime_get();
-       i915_reg_t imr_reg;
-
-       if (DISPLAY_VER(dev_priv) >= 12)
-               imr_reg = TRANS_PSR_IMR(cpu_transcoder);
-       else
-               imr_reg = EDP_PSR_IMR;
 
        if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
                intel_dp->psr.last_entry_attempt = time_ns;
@@ -339,7 +401,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
                 * again so we don't care about unmask the interruption
                 * or unset irq_aux_error.
                 */
-               intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
+               intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+                            0, psr_irq_psr_error_bit_get(intel_dp));
 
                queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
        }
@@ -467,6 +530,43 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        }
 }
 
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+       u32 aux_clock_divider, aux_ctl;
+       /* write DP_SET_POWER=D0 */
+       static const u8 aux_msg[] = {
+               [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
+               [1] = (DP_SET_POWER >> 8) & 0xff,
+               [2] = DP_SET_POWER & 0xff,
+               [3] = 1 - 1,
+               [4] = DP_SET_POWER_D0,
+       };
+       int i;
+
+       BUILD_BUG_ON(sizeof(aux_msg) > 20);
+       for (i = 0; i < sizeof(aux_msg); i += 4)
+               intel_de_write(dev_priv,
+                              psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
+                              intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
+
+       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+       /* Start with bits set for DDI_AUX_CTL register */
+       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
+                                            aux_clock_divider);
+
+       /* Select only valid bits for SRD_AUX_CTL */
+       aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+               EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+               EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+               EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
+
+       intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
+                      aux_ctl);
+}
+
 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -528,6 +628,15 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
        else
                val |= EDP_PSR_TP2_TP3_TIME_2500us;
 
+       /*
+        * WA 0479: hsw,bdw
+        * "Do not skip both TP1 and TP2/TP3"
+        */
+       if (DISPLAY_VER(dev_priv) < 9 &&
+           connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
+           connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
+               val |= EDP_PSR_TP2_TP3_TIME_100us;
+
 check_tp3_sel:
        if (intel_dp_source_supports_tps3(dev_priv) &&
            drm_dp_tps3_supported(intel_dp->dpcd))
@@ -577,7 +686,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
        if (DISPLAY_VER(dev_priv) >= 8)
                val |= EDP_PSR_CRC_ENABLE;
 
-       intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+       intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
                     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
 }
 
@@ -639,7 +748,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        }
 
        /* Wa_22012278275:adl-p */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+       if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
                static const u8 map[] = {
                        2, /* 5 lines */
                        1, /* 6 lines */
@@ -685,7 +794,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
         * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
         * recommending keep this bit unset while PSR2 is enabled.
         */
-       intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0);
+       intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
 
        intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
 }
@@ -697,8 +806,10 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans
                return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
        else if (DISPLAY_VER(dev_priv) >= 12)
                return cpu_transcoder == TRANSCODER_A;
-       else
+       else if (DISPLAY_VER(dev_priv) >= 9)
                return cpu_transcoder == TRANSCODER_EDP;
+       else
+               return false;
 }
 
 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
@@ -807,7 +918,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
                return;
 
        /* Wa_16011303918:adl-p */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+       if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                return;
 
        /*
@@ -963,7 +1074,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
 
        /* JSL and EHL only supports eDP 1.3 */
-       if (IS_JSL_EHL(dev_priv)) {
+       if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
                return false;
        }
@@ -975,7 +1086,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+       if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
                return false;
        }
@@ -1033,7 +1144,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 
        /* Wa_16011303918:adl-p */
        if (crtc_state->vrr.enable &&
-           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+           IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR2 not enabled, not compatible with HW stepping + VRR\n");
                return false;
@@ -1201,13 +1312,15 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 
-       if (transcoder_has_psr2(dev_priv, cpu_transcoder))
-               drm_WARN_ON(&dev_priv->drm,
-                           intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
+       drm_WARN_ON(&dev_priv->drm,
+                   transcoder_has_psr2(dev_priv, cpu_transcoder) &&
+                   intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
 
        drm_WARN_ON(&dev_priv->drm,
-                   intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE);
+                   intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
+
        drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+
        lockdep_assert_held(&intel_dp->psr.lock);
 
        /* psr1 and psr2 are mutually exclusive.*/
@@ -1271,6 +1384,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 mask;
 
+       /*
+        * Only HSW and BDW have PSR AUX registers that need to be setup.
+        * SKL+ use hardcoded values PSR AUX transactions
+        */
+       if (DISPLAY_VER(dev_priv) < 9)
+               hsw_psr_setup_aux(intel_dp);
+
        /*
         * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
         * mask LPSP to avoid dependency on other drivers that might block
@@ -1282,11 +1402,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
               EDP_PSR_DEBUG_MASK_LPSP |
               EDP_PSR_DEBUG_MASK_MAX_SLEEP;
 
-       if (DISPLAY_VER(dev_priv) < 11)
+       /*
+        * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+        * registers in order to keep the CURSURFLIVE tricks working :(
+        */
+       if (IS_DISPLAY_VER(dev_priv, 9, 10))
                mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
 
-       intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder),
-                      mask);
+       /* allow PSR with sprite enabled */
+       if (IS_HASWELL(dev_priv))
+               mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+
+       intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
 
        psr_irq_control(intel_dp);
 
@@ -1352,10 +1479,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
         * first time that PSR HW tries to activate so lets keep PSR disabled
         * to avoid any rendering problems.
         */
-       if (DISPLAY_VER(dev_priv) >= 12)
-               val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder));
-       else
-               val = intel_de_read(dev_priv, EDP_PSR_IIR);
+       val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
        val &= psr_irq_psr_error_bit_get(intel_dp);
        if (val) {
                intel_dp->psr.sink_not_reliable = true;
@@ -1418,7 +1542,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
                        drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
                }
 
-               val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
+               val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
                drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
 
                return;
@@ -1432,7 +1556,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
 
                drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
        } else {
-               val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+               val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
                                   EDP_PSR_ENABLE, 0);
 
                drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
@@ -1451,7 +1575,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
                psr_status = EDP_PSR2_STATUS(cpu_transcoder);
                psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               psr_status = EDP_PSR_STATUS(cpu_transcoder);
+               psr_status = psr_status_reg(dev_priv, cpu_transcoder);
                psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
@@ -2151,7 +2275,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
         * defensive enough to cover everything.
         */
        return intel_de_wait_for_clear(dev_priv,
-                                      EDP_PSR_STATUS(cpu_transcoder),
+                                      psr_status_reg(dev_priv, cpu_transcoder),
                                       EDP_PSR_STATUS_STATE_MASK, 50);
 }
 
@@ -2205,7 +2329,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
                reg = EDP_PSR2_STATUS(cpu_transcoder);
                mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               reg = EDP_PSR_STATUS(cpu_transcoder);
+               reg = psr_status_reg(dev_priv, cpu_transcoder);
                mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
@@ -2825,7 +2949,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
                        "SRDOFFACK",
                        "SRDENT_ON",
                };
-               val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder));
+               val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
                status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
                if (status_val < ARRAY_SIZE(live_status))
                        status = live_status[status_val];
@@ -2872,7 +2996,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
                val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
                enabled = val & EDP_PSR2_ENABLE;
        } else {
-               val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
+               val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
                enabled = val & EDP_PSR_ENABLE;
        }
        seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2884,7 +3008,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
        /*
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
-       val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder));
+       val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
        seq_printf(m, "Performance counter: %u\n",
                   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
 
index 8750cb0d8d9dd0e74011c870219f81d6af35cd1f..d39951383c929d5740853fd8fef4a074e7f4de46 100644 (file)
@@ -7,6 +7,7 @@
 #define __INTEL_PSR_REGS_H__
 
 #include "intel_display_reg_defs.h"
+#include "intel_dp_aux_regs.h"
 
 #define TRANS_EXITLINE(trans)  _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
 #define   EXITLINE_ENABLE      REG_BIT(31)
@@ -19,6 +20,7 @@
  * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
  * instance of it
  */
+#define HSW_SRD_CTL                            _MMIO(0x64800)
 #define _SRD_CTL_A                             0x60800
 #define _SRD_CTL_EDP                           0x6f800
 #define EDP_PSR_CTL(tran)                      _MMIO_TRANS2(tran, _SRD_CTL_A)
 #define   EDP_PSR_PRE_ENTRY(trans)     (TGL_PSR_PRE_ENTRY <<           \
                                         _EDP_PSR_TRANS_SHIFT(trans))
 
+#define HSW_SRD_AUX_CTL                                _MMIO(0x64810)
+#define _SRD_AUX_CTL_A                         0x60810
+#define _SRD_AUX_CTL_EDP                       0x6f810
+#define EDP_PSR_AUX_CTL(tran)                  _MMIO_TRANS2(tran, _SRD_AUX_CTL_A)
+#define   EDP_PSR_AUX_CTL_TIME_OUT_MASK                DP_AUX_CH_CTL_TIME_OUT_MASK
+#define   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK    DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
+#define   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK   DP_AUX_CH_CTL_PRECHARGE_2US_MASK
+#define   EDP_PSR_AUX_CTL_ERROR_INTERRUPT      REG_BIT(11)
+#define   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK    DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK
+
+#define HSW_SRD_AUX_DATA(i)                    _MMIO(0x64814 + (i) * 4) /* 5 registers */
 #define _SRD_AUX_DATA_A                                0x60814
 #define _SRD_AUX_DATA_EDP                      0x6f814
 #define EDP_PSR_AUX_DATA(tran, i)              _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
 
+#define HSW_SRD_STATUS                         _MMIO(0x64840)
 #define _SRD_STATUS_A                          0x60840
 #define _SRD_STATUS_EDP                                0x6f840
 #define EDP_PSR_STATUS(tran)                   _MMIO_TRANS2(tran, _SRD_STATUS_A)
 #define   EDP_PSR_STATUS_SENDING_TP1           REG_BIT(4)
 #define   EDP_PSR_STATUS_IDLE_MASK             REG_GENMASK(3, 0)
 
+#define HSW_SRD_PERF_CNT               _MMIO(0x64844)
 #define _SRD_PERF_CNT_A                        0x60844
 #define _SRD_PERF_CNT_EDP              0x6f844
 #define EDP_PSR_PERF_CNT(tran)         _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
 #define   EDP_PSR_PERF_CNT_MASK                REG_GENMASK(23, 0)
 
 /* PSR_MASK on SKL+ */
+#define HSW_SRD_DEBUG                          _MMIO(0x64860)
 #define _SRD_DEBUG_A                           0x60860
 #define _SRD_DEBUG_EDP                         0x6f860
 #define EDP_PSR_DEBUG(tran)                    _MMIO_TRANS2(tran, _SRD_DEBUG_A)
index 6e86c0971d2413eaf80d1ebec2b0bac66acf52c9..543cdc46aa1dfcf4288d3f4a681d263fbc2631a8 100644 (file)
 /* from BPP 6 to 36 in steps of 0.5 */
 #define RC_RANGE_QP444_12BPC_MAX_NUM_BPP       61
 
-/* from BPP 6 to 24 in steps of 0.5 */
+/* For YCbCr420 the bits_per_pixel sent in PPS params
+ * is double the target bpp. The below values represent
+ * the target bpp.
+ */
+/* from BPP 4 to 12 in steps of 0.5 */
 #define RC_RANGE_QP420_8BPC_MAX_NUM_BPP                17
 
-/* from BPP 6 to 30 in steps of 0.5 */
+/* from BPP 4 to 15 in steps of 0.5 */
 #define RC_RANGE_QP420_10BPC_MAX_NUM_BPP       23
 
-/* from BPP 6 to 36 in steps of 0.5 */
+/* from BPP 4 to 18 in steps of 0.5 */
 #define RC_RANGE_QP420_12BPC_MAX_NUM_BPP       29
 
 /*
index 21f92123c844645069311deb630650cccc04b6e0..7d25a64698e2f503797a700a001ed2af096ec325 100644 (file)
@@ -2097,7 +2097,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
                const struct edid *edid = drm_edid_raw(drm_edid);
 
                /* DDC bus is shared, match EDID to connector type */
-               if (edid->input & DRM_EDID_INPUT_DIGITAL)
+               if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
                        status = connector_status_connected;
                else
                        status = connector_status_disconnected;
@@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
        __drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
                                            &conn_state->base.base);
 
-       INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
+       intel_panel_init_alloc(&sdvo_connector->base);
 
        return sdvo_connector;
 }
@@ -3313,13 +3313,19 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
        return i2c_add_adapter(&sdvo->ddc) == 0;
 }
 
-static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
-                                  enum port port)
+static bool is_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port)
 {
        if (HAS_PCH_SPLIT(dev_priv))
-               drm_WARN_ON(&dev_priv->drm, port != PORT_B);
+               return port == PORT_B;
        else
-               drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
+               return port == PORT_B || port == PORT_C;
+}
+
+static bool assert_sdvo_port_valid(struct drm_i915_private *dev_priv,
+                                  enum port port)
+{
+       return !drm_WARN(&dev_priv->drm, !is_sdvo_port_valid(dev_priv, port),
+                        "Platform does not support SDVO %c\n", port_name(port));
 }
 
 bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3329,7 +3335,11 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
        struct intel_sdvo *intel_sdvo;
        int i;
 
-       assert_sdvo_port_valid(dev_priv, port);
+       if (!assert_port_valid(dev_priv, port))
+               return false;
+
+       if (!assert_sdvo_port_valid(dev_priv, port))
+               return false;
 
        intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
        if (!intel_sdvo)
index bd9116d2cd76cf2f6b2b35b6dd2a21834cc59f69..9d76c27567845b4fc54a9dcf5c354fea530f9aed 100644 (file)
@@ -52,23 +52,33 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
        return true;
 }
 
+static void
+intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
+                         int bpp)
+{
+       int bpc = vdsc_cfg->bits_per_component;
+
+       /* Read range_minqp and range_max_qp from qp tables */
+       vdsc_cfg->rc_range_params[buf].range_min_qp =
+               intel_lookup_range_min_qp(bpc, buf, bpp, vdsc_cfg->native_420);
+       vdsc_cfg->rc_range_params[buf].range_max_qp =
+               intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420);
+}
+
+/*
+ * We are using the method provided in DSC 1.2a C-Model in codec_main.c
+ * Above method use a common formula to derive values for any combination of DSC
+ * variables. The formula approach may yield slight differences in the derived PPS
+ * parameters from the original parameter sets. These differences are not consequential
+ * to the coding performance because all parameter sets have been shown to produce
+ * visually lossless quality (provides the same PPS values as
+ * DSCParameterValuesVESA V1-2 spreadsheet).
+ */
 static void
 calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
 {
        int bpc = vdsc_cfg->bits_per_component;
        int bpp = vdsc_cfg->bits_per_pixel >> 4;
-       static const s8 ofs_und6[] = {
-               0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
-       };
-       static const s8 ofs_und8[] = {
-               2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
-       };
-       static const s8 ofs_und12[] = {
-               2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
-       };
-       static const s8 ofs_und15[] = {
-               10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
-       };
        int qp_bpc_modifier = (bpc - 8) * 2;
        u32 res, buf_i, bpp_i;
 
@@ -78,6 +88,28 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
        else
                vdsc_cfg->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1);
 
+       /*
+        * According to DSC 1.2 spec in Section 4.1 if native_420 is set:
+        * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
+        * height < 8.
+        * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma
+        * preservation in second line.
+        * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
+        * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
+        * fractional bits.
+        */
+       if (vdsc_cfg->native_420) {
+               if (vdsc_cfg->slice_height >= 8)
+                       vdsc_cfg->second_line_bpg_offset = 12;
+               else
+                       vdsc_cfg->second_line_bpg_offset =
+                               2 * (vdsc_cfg->slice_height - 1);
+
+               vdsc_cfg->second_line_offset_adj = 512;
+               vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11,
+                                                       vdsc_cfg->slice_height - 1);
+       }
+
        /* Our hw supports only 444 modes as of today */
        if (bpp >= 12)
                vdsc_cfg->initial_offset = 2048;
@@ -97,33 +129,88 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
        vdsc_cfg->rc_quant_incr_limit0 = 11 + qp_bpc_modifier;
        vdsc_cfg->rc_quant_incr_limit1 = 11 + qp_bpc_modifier;
 
-       bpp_i  = (2 * (bpp - 6));
-       for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
-               u8 range_bpg_offset;
-
-               /* Read range_minqp and range_max_qp from qp tables */
-               vdsc_cfg->rc_range_params[buf_i].range_min_qp =
-                       intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
-               vdsc_cfg->rc_range_params[buf_i].range_max_qp =
-                       intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
-
-               /* Calculate range_bpg_offset */
-               if (bpp <= 6) {
-                       range_bpg_offset = ofs_und6[buf_i];
-               } else if (bpp <= 8) {
-                       res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
-                       range_bpg_offset = ofs_und6[buf_i] + res;
-               } else if (bpp <= 12) {
-                       range_bpg_offset = ofs_und8[buf_i];
-               } else if (bpp <= 15) {
-                       res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
-                       range_bpg_offset = ofs_und12[buf_i] + res;
-               } else {
-                       range_bpg_offset = ofs_und15[buf_i];
+       if (vdsc_cfg->native_420) {
+               static const s8 ofs_und4[] = {
+                       2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+               };
+               static const s8 ofs_und5[] = {
+                       2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+               };
+               static const s8 ofs_und6[] = {
+                       2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+               };
+               static const s8 ofs_und8[] = {
+                       10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+               };
+
+               bpp_i  = bpp - 8;
+               for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+                       u8 range_bpg_offset;
+
+                       intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
+
+                       /* Calculate range_bpg_offset */
+                       if (bpp <= 8) {
+                               range_bpg_offset = ofs_und4[buf_i];
+                       } else if (bpp <= 10) {
+                               res = DIV_ROUND_UP(((bpp - 8) *
+                                                   (ofs_und5[buf_i] - ofs_und4[buf_i])), 2);
+                               range_bpg_offset = ofs_und4[buf_i] + res;
+                       } else if (bpp <= 12) {
+                               res = DIV_ROUND_UP(((bpp - 10) *
+                                                   (ofs_und6[buf_i] - ofs_und5[buf_i])), 2);
+                               range_bpg_offset = ofs_und5[buf_i] + res;
+                       } else if (bpp <= 16) {
+                               res = DIV_ROUND_UP(((bpp - 12) *
+                                                   (ofs_und8[buf_i] - ofs_und6[buf_i])), 4);
+                               range_bpg_offset = ofs_und6[buf_i] + res;
+                       } else {
+                               range_bpg_offset = ofs_und8[buf_i];
+                       }
+
+                       vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
+                               range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
+               }
+       } else {
+               static const s8 ofs_und6[] = {
+                       0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+               };
+               static const s8 ofs_und8[] = {
+                       2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+               };
+               static const s8 ofs_und12[] = {
+                       2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+               };
+               static const s8 ofs_und15[] = {
+                       10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+               };
+
+               bpp_i  = (2 * (bpp - 6));
+               for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+                       u8 range_bpg_offset;
+
+                       intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
+
+                       /* Calculate range_bpg_offset */
+                       if (bpp <= 6) {
+                               range_bpg_offset = ofs_und6[buf_i];
+                       } else if (bpp <= 8) {
+                               res = DIV_ROUND_UP(((bpp - 6) *
+                                                   (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
+                               range_bpg_offset = ofs_und6[buf_i] + res;
+                       } else if (bpp <= 12) {
+                               range_bpg_offset = ofs_und8[buf_i];
+                       } else if (bpp <= 15) {
+                               res = DIV_ROUND_UP(((bpp - 12) *
+                                                   (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
+                               range_bpg_offset = ofs_und12[buf_i] + res;
+                       } else {
+                               range_bpg_offset = ofs_und15[buf_i];
+                       }
+
+                       vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
+                               range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
                }
-
-               vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
-                       range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
        }
 }
 
@@ -190,30 +277,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
        vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
 
        /*
-        * According to DSC 1.2 specs in Section 4.1 if native_420 is set:
-        * -We need to double the current bpp.
-        * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
-        * height < 8.
-        * -second_line_offset_adj is 512 as shown by emperical values to yeild best chroma
-        * preservation in second line.
-        * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
-        * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
-        * fractional bits.
+        * According to DSC 1.2 specs in Section 4.1 if native_420 is set
+        * we need to double the current bpp.
         */
-       if (vdsc_cfg->native_420) {
+       if (vdsc_cfg->native_420)
                vdsc_cfg->bits_per_pixel <<= 1;
 
-               if (vdsc_cfg->slice_height >= 8)
-                       vdsc_cfg->second_line_bpg_offset = 12;
-               else
-                       vdsc_cfg->second_line_bpg_offset =
-                               2 * (vdsc_cfg->slice_height - 1);
-
-               vdsc_cfg->second_line_offset_adj = 512;
-               vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11,
-                                                       vdsc_cfg->slice_height - 1);
-       }
-
        vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
 
        drm_dsc_set_rc_buf_thresh(vdsc_cfg);
@@ -237,18 +306,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
 
                if (ret)
                        return ret;
-
-               /*
-                * FIXME: verify that the hardware actually needs these
-                * modifications rather than them being simple typos.
-                */
-               if (compressed_bpp == 6 &&
-                   vdsc_cfg->bits_per_component == 8)
-                       vdsc_cfg->rc_quant_incr_limit1 = 23;
-
-               if (compressed_bpp == 8 &&
-                   vdsc_cfg->bits_per_component == 14)
-                       vdsc_cfg->rc_range_params[0].range_bpg_offset = 0;
        }
 
        /*
@@ -293,6 +350,16 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
                return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
 }
 
+int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
+{
+       int num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
+
+       if (crtc_state->bigjoiner_pipes)
+               num_vdsc_instances *= 2;
+
+       return num_vdsc_instances;
+}
+
 static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -303,11 +370,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
        u32 pps_val = 0;
        u32 rc_buf_thresh_dword[4];
        u32 rc_range_params_dword[8];
-       u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
        int i = 0;
-
-       if (crtc_state->bigjoiner_pipes)
-               num_vdsc_instances *= 2;
+       int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
 
        /* Populate PICTURE_PARAMETER_SET_0 registers */
        pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
index 8763f00fa7e252330fda152b815f3f6c82939624..2cc41ff0890942586d143ddc7db27fbf2beb5a53 100644 (file)
@@ -22,6 +22,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain
 intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
 struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
+int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state);
 void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
                             const struct intel_crtc_state *crtc_state);
 void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
index 6b01a0b68b97fe8f5011c5e6f3c689eb508f6f91..ffc15d278a39d3cfea480f8406fc36b423d7331f 100644 (file)
@@ -2174,7 +2174,7 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
                return false;
 
        /* Wa_22011186057 */
-       if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+       if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
                return false;
 
        if (DISPLAY_VER(i915) >= 11)
@@ -2196,11 +2196,11 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
 
        /* Wa_14010477008 */
        if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
-           IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+               (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
                return false;
 
        /* Wa_22011186057 */
-       if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+       if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
                return false;
 
        /* Wa_14013215631 */
index ae2f3ab3e73de82dabcb3c1d728ac1b37d13f58f..a96e7d028c5c61fb24a1be9a0b120c5e70628d59 100644 (file)
@@ -671,20 +671,6 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
                intel_de_posting_read(dev_priv, port_ctrl);
        }
 }
-
-static void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
-{
-       ktime_t panel_power_on_time;
-       s64 panel_power_off_duration;
-
-       panel_power_on_time = ktime_get_boottime();
-       panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
-                                                 intel_dsi->panel_power_off_time);
-
-       if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
-               msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
-}
-
 static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
                              const struct intel_crtc_state *pipe_config);
 static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -831,8 +817,6 @@ static void bxt_dsi_enable(struct intel_atomic_state *state,
                           const struct intel_crtc_state *crtc_state,
                           const struct drm_connector_state *conn_state)
 {
-       drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
-
        intel_crtc_vblank_on(crtc_state);
 }
 
@@ -943,13 +927,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
        intel_dsi->panel_power_off_time = ktime_get_boottime();
 }
 
-static void intel_dsi_shutdown(struct intel_encoder *encoder)
-{
-       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
-       intel_dsi_wait_panel_power_cycle(intel_dsi);
-}
-
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
index fd556a076d05167d3160bbe9cb318a7d1bd6e5f4..1df74f7aa3dcbdda1b5dbff0df8dc07e3fa58a5b 100644 (file)
@@ -97,8 +97,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        int ret;
 
-       dma_resv_assert_held(dma_buf->resv);
-
        if (obj->base.size < vma->vm_end - vma->vm_start)
                return -EINVAL;
 
index dfaaa8b66ac30c0c17698005b4be703617828b54..ffddec1d2a764c82cde9bad6e596025b0d5a556c 100644 (file)
@@ -68,10 +68,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
        switch (obj->write_domain) {
        case I915_GEM_DOMAIN_GTT:
                spin_lock(&obj->vma.lock);
-               for_each_ggtt_vma(vma, obj) {
-                       if (i915_vma_unset_ggtt_write(vma))
-                               intel_gt_flush_ggtt_writes(vma->vm->gt);
-               }
+               for_each_ggtt_vma(vma, obj)
+                       i915_vma_flush_writes(vma);
                spin_unlock(&obj->vma.lock);
 
                i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
index cfd7929587d8fa7709806c13c2291bc3071e0e87..5a687a3686bd5390333c26ce5ce329ccb1f9c368 100644 (file)
@@ -2229,8 +2229,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
        u32 *cs;
        int i;
 
-       if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
-               drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
+       if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) {
+               drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
 
@@ -2691,6 +2691,7 @@ static int
 eb_select_engine(struct i915_execbuffer *eb)
 {
        struct intel_context *ce, *child;
+       struct intel_gt *gt;
        unsigned int idx;
        int err;
 
@@ -2714,10 +2715,17 @@ eb_select_engine(struct i915_execbuffer *eb)
                }
        }
        eb->num_batches = ce->parallel.number_children + 1;
+       gt = ce->engine->gt;
 
        for_each_child(ce, child)
                intel_context_get(child);
-       intel_gt_pm_get(ce->engine->gt);
+       intel_gt_pm_get(gt);
+       /*
+        * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+        * free VMAs while execbuf ioctl is validating VMAs.
+        */
+       if (gt->info.id)
+               intel_gt_pm_get(to_gt(gt->i915));
 
        if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
                err = intel_context_alloc_state(ce);
@@ -2756,7 +2764,10 @@ eb_select_engine(struct i915_execbuffer *eb)
        return err;
 
 err:
-       intel_gt_pm_put(ce->engine->gt);
+       if (gt->info.id)
+               intel_gt_pm_put(to_gt(gt->i915));
+
+       intel_gt_pm_put(gt);
        for_each_child(ce, child)
                intel_context_put(child);
        intel_context_put(ce);
@@ -2769,6 +2780,12 @@ eb_put_engine(struct i915_execbuffer *eb)
        struct intel_context *child;
 
        i915_vm_put(eb->context->vm);
+       /*
+        * This works in conjunction with eb_select_engine() to prevent
+        * i915_vma_parked() from interfering while execbuf validates vmas.
+        */
+       if (eb->gt->info.id)
+               intel_gt_pm_put(to_gt(eb->gt->i915));
        intel_gt_pm_put(eb->gt);
        for_each_child(eb->context, child)
                intel_context_put(child);
index 97ac6fb37958f4a44e8ee8b5c826219be908eda8..ef9346ed6d0fc2c351fcda2e61749c2a2e43be45 100644 (file)
@@ -226,7 +226,7 @@ bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
         * it, but since i915 takes the stance of always zeroing memory before
         * handing it to userspace, we need to prevent this.
         */
-       return IS_JSL_EHL(i915);
+       return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915));
 }
 
 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
@@ -469,7 +469,7 @@ void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 {
        struct intel_frontbuffer *front;
 
-       front = __intel_frontbuffer_get(obj);
+       front = i915_gem_object_get_frontbuffer(obj);
        if (front) {
                intel_frontbuffer_flush(front, origin);
                intel_frontbuffer_put(front);
@@ -481,7 +481,7 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 {
        struct intel_frontbuffer *front;
 
-       front = __intel_frontbuffer_get(obj);
+       front = i915_gem_object_get_frontbuffer(obj);
        if (front) {
                intel_frontbuffer_invalidate(front, origin);
                intel_frontbuffer_put(front);
index 884a17275b3a4ab42caa50173b54fb4040596660..f607b87890ddd6d97375c793cc937c7e03687218 100644 (file)
@@ -716,10 +716,6 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
                                                    enum i915_map_type type);
 
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
-                                         struct drm_i915_gem_object *obj,
-                                         bool always_coherent);
-
 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
                                 unsigned long offset,
                                 unsigned long size);
@@ -891,4 +887,71 @@ static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *o
 
 #endif
 
+/**
+ * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
+ * @obj: The object whose frontbuffer to get.
+ *
+ * Get pointer to object's frontbuffer if such exists. Please note that RCU
+ * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
+ *
+ * Return: pointer to object's frontbuffer is such exists or NULL
+ */
+static inline struct intel_frontbuffer *
+i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
+{
+       struct intel_frontbuffer *front;
+
+       if (likely(!rcu_access_pointer(obj->frontbuffer)))
+               return NULL;
+
+       rcu_read_lock();
+       do {
+               front = rcu_dereference(obj->frontbuffer);
+               if (!front)
+                       break;
+
+               if (unlikely(!kref_get_unless_zero(&front->ref)))
+                       continue;
+
+               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+                       break;
+
+               intel_frontbuffer_put(front);
+       } while (1);
+       rcu_read_unlock();
+
+       return front;
+}
+
+/**
+ * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
+ * @obj: The object whose frontbuffer to set.
+ * @front: The frontbuffer to set
+ *
+ * Set object's frontbuffer pointer. If frontbuffer is already set for the
+ * object keep it and return it's pointer to the caller. Please note that RCU
+ * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
+ * function is protected by i915->display.fb_tracking.lock
+ *
+ * Return: pointer to frontbuffer which was set.
+ */
+static inline struct intel_frontbuffer *
+i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
+                               struct intel_frontbuffer *front)
+{
+       struct intel_frontbuffer *cur = front;
+
+       if (!front) {
+               RCU_INIT_POINTER(obj->frontbuffer, NULL);
+       } else if (rcu_access_pointer(obj->frontbuffer)) {
+               cur = rcu_dereference_protected(obj->frontbuffer, true);
+               kref_get(&cur->ref);
+       } else {
+               drm_gem_object_get(intel_bo_to_drm_bo(obj));
+               rcu_assign_pointer(obj->frontbuffer, front);
+       }
+
+       return cur;
+}
+
 #endif
index e72c57716beecbb356f26acd300a1f9868ee99d2..2292404007c8152ab99bc709d7b43ff0b88bd355 100644 (file)
@@ -17,6 +17,8 @@
 #include "i915_selftest.h"
 #include "i915_vma_resource.h"
 
+#include "gt/intel_gt_defines.h"
+
 struct drm_i915_gem_object;
 struct intel_fronbuffer;
 struct intel_memory_region;
@@ -675,7 +677,7 @@ struct drm_i915_gem_object {
                 */
                bool dirty:1;
 
-               u32 tlb;
+               u32 tlb[I915_MAX_GT];
        } mm;
 
        struct {
@@ -718,6 +720,9 @@ struct drm_i915_gem_object {
        };
 };
 
+#define intel_bo_to_drm_bo(bo) (&(bo)->base)
+#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
+
 static inline struct drm_i915_gem_object *
 to_intel_bo(struct drm_gem_object *gem)
 {
index 89fc8ea6bcfc43cf77df9387cb70e59f57a28bd1..6b6d22c194110c5c04797493db44154b8426d0d1 100644 (file)
@@ -7,7 +7,7 @@
 #include <drm/drm_cache.h>
 
 #include "gt/intel_gt.h"
-#include "gt/intel_gt_pm.h"
+#include "gt/intel_tlb.h"
 
 #include "i915_drv.h"
 #include "i915_gem_object.h"
@@ -193,13 +193,16 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct intel_gt *gt = to_gt(i915);
+       struct intel_gt *gt;
+       int id;
 
-       if (!obj->mm.tlb)
-               return;
+       for_each_gt(gt, i915, id) {
+               if (!obj->mm.tlb[id])
+                       return;
 
-       intel_gt_invalidate_tlb(gt, obj->mm.tlb);
-       obj->mm.tlb = 0;
+               intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
+               obj->mm.tlb[id] = 0;
+       }
 }
 
 struct sg_table *
@@ -465,21 +468,6 @@ void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
        return ret;
 }
 
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
-                                         struct drm_i915_gem_object *obj,
-                                         bool always_coherent)
-{
-       /*
-        * Wa_22016122933: always return I915_MAP_WC for MTL
-        */
-       if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
-               return I915_MAP_WC;
-       if (HAS_LLC(i915) || always_coherent)
-               return I915_MAP_WB;
-       else
-               return I915_MAP_WC;
-}
-
 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
                                 unsigned long offset,
                                 unsigned long size)
index 3b094d36a0b04d21d6080bbb99d4f4218a6a30f5..1a766d8e7ccecd43c05bb5908d24f294ecf9ff2c 100644 (file)
@@ -892,7 +892,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
        } else {
                resource_size_t lmem_range;
 
-               lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+               lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
                lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
                lmem_size *= SZ_1G;
        }
@@ -974,3 +974,39 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
 {
        return obj->ops == &i915_gem_object_stolen_ops;
 }
+
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+{
+       return drm_mm_initialized(&i915->mm.stolen);
+}
+
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+{
+       return i915->dsm.stolen.start;
+}
+
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+{
+       return resource_size(&i915->dsm.stolen);
+}
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+                                const struct drm_mm_node *node)
+{
+       return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
+}
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+{
+       return drm_mm_node_allocated(node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+{
+       return node->start;
+}
+
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+{
+       return node->size;
+}
index d5005a39d13017d65155491b59753460a8946a69..258381d1c054e2960cd04bb4579f07adbaee8178 100644 (file)
@@ -12,6 +12,8 @@ struct drm_i915_private;
 struct drm_mm_node;
 struct drm_i915_gem_object;
 
+#define i915_stolen_fb drm_mm_node
+
 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
                                struct drm_mm_node *node, u64 size,
                                unsigned alignment);
@@ -36,4 +38,15 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
 
 #define I915_GEM_STOLEN_BIAS SZ_128K
 
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+                                const struct drm_mm_node *node);
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+
 #endif /* __I915_GEM_STOLEN_H__ */
index 4a33ad2d122bd01b57849c81a05873e8fb661cdb..d4b918fb11cebd73b1e5368b6fa1319bce3a653f 100644 (file)
@@ -186,7 +186,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
 {
        /* nsecs_to_jiffies64() does not guard against overflow */
-       if (NSEC_PER_SEC % HZ &&
+       if ((NSEC_PER_SEC % HZ) != 0 &&
            div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
                return MAX_JIFFY_OFFSET;
 
index df6c9a84252cb8d700498c8dcd3c918029d22724..6b9f6cf50bf6b24b9bb5804b123db70adeb206d8 100644 (file)
@@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
         * times in succession a possibility by enlarging the permutation array.
         */
        order = i915_random_order(count * count, &prng);
-       if (!order)
-               return -ENOMEM;
+       if (!order) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
        max = div_u64(max - size, max_page_size);
index a93a90b15907befc800243e6f4e684a9888840f0..d8f4a10d71deb664e722a785c2b70ec16a6aae5a 100644 (file)
 #include "selftests/igt_spinner.h"
 
 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
+                                struct intel_gt *gt,
                                 bool fill)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        unsigned int i, count = obj->base.size / sizeof(u32);
        enum i915_map_type map_type =
-               i915_coherent_map_type(i915, obj, false);
+               intel_gt_coherent_map_type(gt, obj, false);
        u32 *cur;
        int err = 0;
 
@@ -66,7 +66,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
                if (err)
                        continue;
 
-               err = igt_fill_check_buffer(obj, true);
+               err = igt_fill_check_buffer(obj, gt, true);
                if (err)
                        continue;
 
@@ -86,7 +86,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
                if (err)
                        continue;
 
-               err = igt_fill_check_buffer(obj, false);
+               err = igt_fill_check_buffer(obj, gt, false);
        }
        i915_gem_object_put(obj);
 
@@ -233,7 +233,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
                        continue;
 
                if (!vma) {
-                       err = igt_fill_check_buffer(obj, true);
+                       err = igt_fill_check_buffer(obj, gt, true);
                        if (err)
                                continue;
                }
@@ -276,7 +276,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
                if (err)
                        goto out_unlock;
        } else {
-               err = igt_fill_check_buffer(obj, false);
+               err = igt_fill_check_buffer(obj, gt, false);
        }
 
 out_unlock:
index 1c82caf525c34664cb9f27d65efe4e097c7a6a34..8fe0499308ffe533232efcf944f5c3c39016e921 100644 (file)
@@ -76,7 +76,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
        cmd = MI_FLUSH;
        if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
-               if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
+               if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5)
                        cmd |= MI_INVALIDATE_ISP;
        }
 
index 23857cc08eca1fbfec37a6ab0c96987798706a49..a4ff55aa5e55b91835e0bbde3617601d56d9a6ef 100644 (file)
@@ -39,11 +39,11 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (GRAPHICS_VER(rq->engine->i915) == 9)
+               if (GRAPHICS_VER(rq->i915) == 9)
                        vf_flush_wa = true;
 
                /* WaForGAMHang:kbl */
-               if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
+               if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
                        dc_flush_wa = true;
        }
 
@@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
        return MI_ARB_CHECK | 1 << 8 | state;
 }
 
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
+static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
 {
-       u32 gsi_offset = gt->uncore->gsi_offset;
+       switch (engine->id) {
+       case RCS0:
+               return GEN12_CCS_AUX_INV;
+       case BCS0:
+               return GEN12_BCS0_AUX_INV;
+       case VCS0:
+               return GEN12_VD0_AUX_INV;
+       case VCS2:
+               return GEN12_VD2_AUX_INV;
+       case VECS0:
+               return GEN12_VE0_AUX_INV;
+       case CCS0:
+               return GEN12_CCS0_AUX_INV;
+       default:
+               return INVALID_MMIO_REG;
+       }
+}
+
+static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
+{
+       i915_reg_t reg = gen12_get_aux_inv_reg(engine);
+
+       if (IS_PONTEVECCHIO(engine->i915))
+               return false;
+
+       /*
+        * So far platforms supported by i915 having flat ccs do not require
+        * AUX invalidation. Check also whether the engine requires it.
+        */
+       return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
+}
+
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
+{
+       i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
+       u32 gsi_offset = engine->gt->uncore->gsi_offset;
+
+       if (!gen12_needs_ccs_aux_inv(engine))
+               return cs;
 
        *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
        *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
        *cs++ = AUX_INV;
-       *cs++ = MI_NOOP;
+
+       *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+               MI_SEMAPHORE_REGISTER_POLL |
+               MI_SEMAPHORE_POLL |
+               MI_SEMAPHORE_SAD_EQ_SDD;
+       *cs++ = 0;
+       *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+       *cs++ = 0;
+       *cs++ = 0;
 
        return cs;
 }
@@ -180,8 +226,8 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv
 static int mtl_dummy_pipe_control(struct i915_request *rq)
 {
        /* Wa_14016712196 */
-       if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) ||
-           IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) {
+       if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
+           IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
                u32 *cs;
 
                /* dummy PIPE_CONTROL + depth flush */
@@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 {
        struct intel_engine_cs *engine = rq->engine;
 
-       if (mode & EMIT_FLUSH) {
-               u32 flags = 0;
+       /*
+        * On Aux CCS platforms the invalidation of the Aux
+        * table requires quiescing memory traffic beforehand
+        */
+       if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
+               u32 bit_group_0 = 0;
+               u32 bit_group_1 = 0;
                int err;
                u32 *cs;
 
@@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
                if (err)
                        return err;
 
-               flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
-               flags |= PIPE_CONTROL_FLUSH_L3;
-               flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-               flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
+
+               /*
+                * When required, in MTL and beyond platforms we
+                * need to set the CCS_FLUSH bit in the pipe control
+                */
+               if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
+                       bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
+
+               bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+               bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
+               bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+               bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                /* Wa_1409600907:tgl,adl-p */
-               flags |= PIPE_CONTROL_DEPTH_STALL;
-               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
-               flags |= PIPE_CONTROL_FLUSH_ENABLE;
+               bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
+               bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+               bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
 
-               flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-               flags |= PIPE_CONTROL_QW_WRITE;
+               bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
+               bit_group_1 |= PIPE_CONTROL_QW_WRITE;
 
-               flags |= PIPE_CONTROL_CS_STALL;
+               bit_group_1 |= PIPE_CONTROL_CS_STALL;
 
                if (!HAS_3D_PIPELINE(engine->i915))
-                       flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+                       bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
                else if (engine->class == COMPUTE_CLASS)
-                       flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+                       bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
 
                cs = intel_ring_begin(rq, 6);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
 
-               cs = gen12_emit_pipe_control(cs,
-                                            PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
-                                            flags, LRC_PPHWSP_SCRATCH_ADDR);
+               cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
+                                            LRC_PPHWSP_SCRATCH_ADDR);
                intel_ring_advance(rq, cs);
        }
 
@@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
                else if (engine->class == COMPUTE_CLASS)
                        flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
 
-               if (!HAS_FLAT_CCS(rq->engine->i915))
-                       count = 8 + 4;
-               else
-                       count = 8;
+               count = 8;
+               if (gen12_needs_ccs_aux_inv(rq->engine))
+                       count += 8;
 
                cs = intel_ring_begin(rq, count);
                if (IS_ERR(cs))
@@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 
                cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
 
-               if (!HAS_FLAT_CCS(rq->engine->i915)) {
-                       /* hsdes: 1809175790 */
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_GFX_CCS_AUX_NV);
-               }
+               cs = gen12_emit_aux_table_inv(engine, cs);
 
                *cs++ = preparser_disable(false);
                intel_ring_advance(rq, cs);
@@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 
 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
 {
-       intel_engine_mask_t aux_inv = 0;
-       u32 cmd, *cs;
+       u32 cmd = 4;
+       u32 *cs;
 
-       cmd = 4;
        if (mode & EMIT_INVALIDATE) {
                cmd += 2;
 
-               if (!HAS_FLAT_CCS(rq->engine->i915) &&
-                   (rq->engine->class == VIDEO_DECODE_CLASS ||
-                    rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
-                       aux_inv = rq->engine->mask &
-                               ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
-                       if (aux_inv)
-                               cmd += 4;
-               }
+               if (gen12_needs_ccs_aux_inv(rq->engine))
+                       cmd += 8;
        }
 
        cs = intel_ring_begin(rq, cmd);
@@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
                cmd |= MI_INVALIDATE_TLB;
                if (rq->engine->class == VIDEO_DECODE_CLASS)
                        cmd |= MI_INVALIDATE_BSD;
+
+               if (gen12_needs_ccs_aux_inv(rq->engine) &&
+                   rq->engine->class == COPY_ENGINE_CLASS)
+                       cmd |= MI_FLUSH_DW_CCS;
        }
 
        *cs++ = cmd;
@@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
        *cs++ = 0; /* upper addr */
        *cs++ = 0; /* value */
 
-       if (aux_inv) { /* hsdes: 1809175790 */
-               if (rq->engine->class == VIDEO_DECODE_CLASS)
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_VD0_AUX_NV);
-               else
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_VE0_AUX_NV);
-       }
+       cs = gen12_emit_aux_table_inv(rq->engine, cs);
 
        if (mode & EMIT_INVALIDATE)
                *cs++ = preparser_disable(false);
@@ -754,7 +798,7 @@ u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
 
 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
 {
-       struct drm_i915_private *i915 = rq->engine->i915;
+       struct drm_i915_private *i915 = rq->i915;
        u32 flags = (PIPE_CONTROL_CS_STALL |
                     PIPE_CONTROL_TLB_INVALIDATE |
                     PIPE_CONTROL_TILE_CACHE_FLUSH |
@@ -775,7 +819,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
                /* Wa_1409600907 */
                flags |= PIPE_CONTROL_DEPTH_STALL;
 
-       if (!HAS_3D_PIPELINE(rq->engine->i915))
+       if (!HAS_3D_PIPELINE(rq->i915))
                flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
        else if (rq->engine->class == COMPUTE_CLASS)
                flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
index 655e5c00ddc27751ef3f92d8020676ca6252d745..867ba697aceb83e5f8c24ca121bf70a7a85b1c62 100644 (file)
@@ -13,6 +13,7 @@
 #include "intel_gt_regs.h"
 #include "intel_gpu_commands.h"
 
+struct intel_engine_cs;
 struct intel_gt;
 struct i915_request;
 
@@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
 
 static inline u32 *
-__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0,
+                        u32 bit_group_1, u32 offset)
 {
        memset(batch, 0, 6 * sizeof(u32));
 
-       batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
-       batch[1] = flags1;
+       batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
+       batch[1] = bit_group_1;
        batch[2] = offset;
 
        return batch + 6;
 }
 
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *gen8_emit_pipe_control(u32 *batch,
+                                         u32 bit_group_1, u32 offset)
 {
-       return __gen8_emit_pipe_control(batch, 0, flags, offset);
+       return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset);
 }
 
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0,
+                                          u32 bit_group_1, u32 offset)
 {
-       return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+       return __gen8_emit_pipe_control(batch, bit_group_0,
+                                       bit_group_1, offset);
 }
 
 static inline u32 *
index f948d33e5ec5aa81bb027d36b4bfdd749ca3c5aa..c8568e5d11479dcd09609cba6c91c748349a90c4 100644 (file)
@@ -37,9 +37,6 @@ static u64 gen8_pte_encode(dma_addr_t addr,
        if (unlikely(flags & PTE_READ_ONLY))
                pte &= ~GEN8_PAGE_RW;
 
-       if (flags & PTE_LM)
-               pte |= GEN12_PPGTT_PTE_LM;
-
        /*
         * For pre-gen12 platforms pat_index is the same as enum
         * i915_cache_level, so the switch-case here is still valid.
index 0aff5bb13c538ea28d0673220141e5b4cfb7a2f8..ee15486fed0daa57479b07da01e2b7c3ea78391b 100644 (file)
@@ -1333,6 +1333,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
        if (!frame)
                return -ENOMEM;
 
+       frame->rq.i915 = engine->i915;
        frame->rq.engine = engine;
        frame->rq.context = ce;
        rcu_assign_pointer(frame->rq.timeline, ce->timeline);
index 21af0ec52223421115011d30cad0c88ff020c126..b538b5c04948f63ee459411a10b246fe56c41c3b 100644 (file)
@@ -39,7 +39,7 @@ static void dbg_poison_ce(struct intel_context *ce)
 
        if (ce->state) {
                struct drm_i915_gem_object *obj = ce->state->obj;
-               int type = i915_coherent_map_type(ce->engine->i915, obj, true);
+               int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
                void *map;
 
                if (!i915_gem_object_trylock(obj, NULL))
index 2ebd937f3b4cb61907c234fc94746d127077143d..8a641bcf777cb4da2c211911a470272d759314a4 100644 (file)
@@ -2718,7 +2718,7 @@ static int emit_pdps(struct i915_request *rq)
        int err, i;
        u32 *cs;
 
-       GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+       GEM_BUG_ON(intel_vgpu_active(rq->i915));
 
        /*
         * Beware ye of the dragons, this sequence is magic!
@@ -3556,16 +3556,16 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
        lrc_init_wa_ctx(engine);
 
        if (HAS_LOGICAL_RING_ELSQ(i915)) {
-               execlists->submit_reg = uncore->regs +
+               execlists->submit_reg = intel_uncore_regs(uncore) +
                        i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
-               execlists->ctrl_reg = uncore->regs +
+               execlists->ctrl_reg = intel_uncore_regs(uncore) +
                        i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
 
                engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore,
                                    RING_EXECLIST_CONTROL(engine->mmio_base),
                                    FW_REG_WRITE);
        } else {
-               execlists->submit_reg = uncore->regs +
+               execlists->submit_reg = intel_uncore_regs(uncore) +
                        i915_mmio_reg_offset(RING_ELSP(base));
        }
 
index 37d0b0fe791df6746cf8236c5d00b79030a42306..40371b8a9bbbd70ccf0e38184478df73f666856d 100644 (file)
@@ -818,7 +818,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
        if (obj->bit_17 == NULL) {
                obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
                if (obj->bit_17 == NULL) {
-                       drm_err(&to_i915(obj->base.dev)->drm,
+                       drm_err(obj->base.dev,
                                "Failed to allocate memory for bit 17 record\n");
                        return;
                }
index 5d143e2a8db03ea60a509a2a7afb649638542720..2bd8d98d211023cb927aa12caa308243ee5fe3cb 100644 (file)
 #define   MI_SEMAPHORE_TARGET(engine)  ((engine)<<15)
 #define MI_SEMAPHORE_WAIT      MI_INSTR(0x1c, 2) /* GEN8+ */
 #define MI_SEMAPHORE_WAIT_TOKEN        MI_INSTR(0x1c, 3) /* GEN12+ */
+#define   MI_SEMAPHORE_REGISTER_POLL   (1 << 16)
 #define   MI_SEMAPHORE_POLL            (1 << 15)
 #define   MI_SEMAPHORE_SAD_GT_SDD      (0 << 12)
 #define   MI_SEMAPHORE_SAD_GTE_SDD     (1 << 12)
 #define   PIPE_CONTROL_QW_WRITE                                (1<<14)
 #define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
 #define   PIPE_CONTROL_DEPTH_STALL                     (1<<13)
+#define   PIPE_CONTROL_CCS_FLUSH                       (1<<13) /* MTL+ */
 #define   PIPE_CONTROL_WRITE_FLUSH                     (1<<12)
 #define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH       (1<<12) /* gen6+ */
 #define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE    (1<<11) /* MBZ on ILK */
index 7a008e829d4d099e9372be32b37d70ad60b67384..449f0b7fc8434354ec4869d9134bf5e43ba37600 100644 (file)
@@ -33,6 +33,7 @@
 #include "intel_rps.h"
 #include "intel_sa_media.h"
 #include "intel_gt_sysfs.h"
+#include "intel_tlb.h"
 #include "intel_uncore.h"
 #include "shmem_utils.h"
 
@@ -50,8 +51,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
        intel_gt_init_reset(gt);
        intel_gt_init_requests(gt);
        intel_gt_init_timelines(gt);
-       mutex_init(&gt->tlb.invalidate_lock);
-       seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
+       intel_gt_init_tlb(gt);
        intel_gt_pm_init_early(gt);
 
        intel_wopcm_init_early(&gt->wopcm);
@@ -179,7 +179,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
        if (IS_HASWELL(i915))
                intel_uncore_write(uncore,
                                   HSW_MI_PREDICATE_RESULT_2,
-                                  IS_HSW_GT3(i915) ?
+                                  IS_HASWELL_GT3(i915) ?
                                   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
        /* Apply the GT workarounds... */
@@ -466,7 +466,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
        obj = i915_gem_object_create_lmem(i915, size,
                                          I915_BO_ALLOC_VOLATILE |
                                          I915_BO_ALLOC_GPU_ONLY);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
                obj = i915_gem_object_create_stolen(i915, size);
        if (IS_ERR(obj))
                obj = i915_gem_object_create_internal(i915, size);
@@ -846,7 +846,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
                intel_gt_fini_requests(gt);
                intel_gt_fini_reset(gt);
                intel_gt_fini_timelines(gt);
-               mutex_destroy(&gt->tlb.invalidate_lock);
+               intel_gt_fini_tlb(gt);
                intel_engines_free(gt);
        }
 }
@@ -887,7 +887,7 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
 int intel_gt_probe_all(struct drm_i915_private *i915)
 {
        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
-       struct intel_gt *gt = &i915->gt0;
+       struct intel_gt *gt = to_gt(i915);
        const struct intel_gt_definition *gtdef;
        phys_addr_t phys_addr;
        unsigned int mmio_bar;
@@ -904,7 +904,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
         */
        gt->i915 = i915;
        gt->name = "Primary GT";
-       gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+       gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
 
        gt_dbg(gt, "Setting up %s\n", gt->name);
        ret = intel_gt_tile_setup(gt, phys_addr);
@@ -1004,136 +1004,18 @@ void intel_gt_info_print(const struct intel_gt_info *info,
        intel_sseu_dump(&info->sseu, p);
 }
 
-/*
- * HW architecture suggest typical invalidation time at 40us,
- * with pessimistic cases up to 100us and a recommendation to
- * cap at 1ms. We go a bit higher just in case.
- */
-#define TLB_INVAL_TIMEOUT_US 100
-#define TLB_INVAL_TIMEOUT_MS 4
-
-/*
- * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
- * but are now considered MCR registers.  Since they exist within a GAM range,
- * the primary instance of the register rolls up the status from each unit.
- */
-static int wait_for_invalidate(struct intel_engine_cs *engine)
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+                                             struct drm_i915_gem_object *obj,
+                                             bool always_coherent)
 {
-       if (engine->tlb_inv.mcr)
-               return intel_gt_mcr_wait_for_reg(engine->gt,
-                                                engine->tlb_inv.reg.mcr_reg,
-                                                engine->tlb_inv.done,
-                                                0,
-                                                TLB_INVAL_TIMEOUT_US,
-                                                TLB_INVAL_TIMEOUT_MS);
-       else
-               return __intel_wait_for_register_fw(engine->gt->uncore,
-                                                   engine->tlb_inv.reg.reg,
-                                                   engine->tlb_inv.done,
-                                                   0,
-                                                   TLB_INVAL_TIMEOUT_US,
-                                                   TLB_INVAL_TIMEOUT_MS,
-                                                   NULL);
-}
-
-static void mmio_invalidate_full(struct intel_gt *gt)
-{
-       struct drm_i915_private *i915 = gt->i915;
-       struct intel_uncore *uncore = gt->uncore;
-       struct intel_engine_cs *engine;
-       intel_engine_mask_t awake, tmp;
-       enum intel_engine_id id;
-       unsigned long flags;
-
-       if (GRAPHICS_VER(i915) < 8)
-               return;
-
-       intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
-       intel_gt_mcr_lock(gt, &flags);
-       spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
-
-       awake = 0;
-       for_each_engine(engine, gt, id) {
-               if (!intel_engine_pm_is_awake(engine))
-                       continue;
-
-               if (engine->tlb_inv.mcr)
-                       intel_gt_mcr_multicast_write_fw(gt,
-                                                       engine->tlb_inv.reg.mcr_reg,
-                                                       engine->tlb_inv.request);
-               else
-                       intel_uncore_write_fw(uncore,
-                                             engine->tlb_inv.reg.reg,
-                                             engine->tlb_inv.request);
-
-               awake |= engine->mask;
-       }
-
-       GT_TRACE(gt, "invalidated engines %08x\n", awake);
-
-       /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
-       if (awake &&
-           (IS_TIGERLAKE(i915) ||
-            IS_DG1(i915) ||
-            IS_ROCKETLAKE(i915) ||
-            IS_ALDERLAKE_S(i915) ||
-            IS_ALDERLAKE_P(i915)))
-               intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
-
-       spin_unlock(&uncore->lock);
-       intel_gt_mcr_unlock(gt, flags);
-
-       for_each_engine_masked(engine, gt, awake, tmp) {
-               if (wait_for_invalidate(engine))
-                       gt_err_ratelimited(gt,
-                                          "%s TLB invalidation did not complete in %ums!\n",
-                                          engine->name, TLB_INVAL_TIMEOUT_MS);
-       }
-
        /*
-        * Use delayed put since a) we mostly expect a flurry of TLB
-        * invalidations so it is good to avoid paying the forcewake cost and
-        * b) it works around a bug in Icelake which cannot cope with too rapid
-        * transitions.
+        * Wa_22016122933: always return I915_MAP_WC for Media
+        * version 13.0 when the object is on the Media GT
         */
-       intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
-}
-
-static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
-{
-       u32 cur = intel_gt_tlb_seqno(gt);
-
-       /* Only skip if a *full* TLB invalidate barrier has passed */
-       return (s32)(cur - ALIGN(seqno, 2)) > 0;
-}
-
-void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
-{
-       intel_wakeref_t wakeref;
-
-       if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
-               return;
-
-       if (intel_gt_is_wedged(gt))
-               return;
-
-       if (tlb_seqno_passed(gt, seqno))
-               return;
-
-       with_intel_gt_pm_if_awake(gt, wakeref) {
-               mutex_lock(&gt->tlb.invalidate_lock);
-               if (tlb_seqno_passed(gt, seqno))
-                       goto unlock;
-
-               mmio_invalidate_full(gt);
-
-               write_seqcount_invalidate(&gt->tlb.seqno);
-unlock:
-               mutex_unlock(&gt->tlb.invalidate_lock);
-       }
+       if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
+               return I915_MAP_WC;
+       if (HAS_LLC(gt->i915) || always_coherent)
+               return I915_MAP_WB;
+       else
+               return I915_MAP_WC;
 }
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_tlb.c"
-#endif
index d2f4fbde5f9f0de2021b5bdbb32e9cc09ae1385f..6c34547b58b59fd06a57b902e9f1f7b205744fd7 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef __INTEL_GT__
 #define __INTEL_GT__
 
+#include "i915_drv.h"
 #include "intel_engine_types.h"
 #include "intel_gt_types.h"
 #include "intel_reset.h"
@@ -24,6 +25,11 @@ static inline bool gt_is_root(struct intel_gt *gt)
        return !gt->info.id;
 }
 
+static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
+{
+       return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
+}
+
 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
 {
        return container_of(uc, struct intel_gt, uc);
@@ -107,16 +113,8 @@ void intel_gt_info_print(const struct intel_gt_info *info,
 
 void intel_gt_watchdog_work(struct work_struct *work);
 
-static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
-{
-       return seqprop_sequence(&gt->tlb.seqno);
-}
-
-static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
-{
-       return intel_gt_tlb_seqno(gt) | 1;
-}
-
-void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+                                             struct drm_i915_gem_object *obj,
+                                             bool always_coherent);
 
 #endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_defines.h b/drivers/gpu/drm/i915/gt/intel_gt_defines.h
new file mode 100644 (file)
index 0000000..5017788
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_DEFINES__
+#define __INTEL_GT_DEFINES__
+
+#define I915_MAX_GT 2
+
+#endif
index 62fd00c9e5192a0e8da971119eb9fe836384592d..77fb57223465149ba13958be05cef14e69b3f897 100644 (file)
@@ -31,7 +31,7 @@ static u32
 gen11_gt_engine_identity(struct intel_gt *gt,
                         const unsigned int bank, const unsigned int bit)
 {
-       void __iomem * const regs = gt->uncore->regs;
+       void __iomem * const regs = intel_uncore_regs(gt->uncore);
        u32 timeout_ts;
        u32 ident;
 
@@ -148,7 +148,7 @@ gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
 static void
 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
 {
-       void __iomem * const regs = gt->uncore->regs;
+       void __iomem * const regs = intel_uncore_regs(gt->uncore);
        unsigned long intr_dw;
        unsigned int bit;
 
@@ -183,7 +183,7 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
                            const unsigned int bank, const unsigned int bit)
 {
-       void __iomem * const regs = gt->uncore->regs;
+       void __iomem * const regs = intel_uncore_regs(gt->uncore);
        u32 dw;
 
        lockdep_assert_held(gt->irq_lock);
@@ -404,7 +404,7 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 
 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
 {
-       void __iomem * const regs = gt->uncore->regs;
+       void __iomem * const regs = intel_uncore_regs(gt->uncore);
        u32 iir;
 
        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
index 718cb2c80f79ee2d4c61e2792deeda526d99d64b..2cdfb2f713d02631d4001553f2ac89b828d320ea 100644 (file)
 #define GEN8_PRIVATE_PAT_HI                    _MMIO(0x40e0 + 4)
 #define GEN10_PAT_INDEX(index)                 _MMIO(0x40e0 + (index) * 4)
 #define BSD_HWS_PGA_GEN7                       _MMIO(0x4180)
-#define GEN12_GFX_CCS_AUX_NV                   _MMIO(0x4208)
-#define GEN12_VD0_AUX_NV                       _MMIO(0x4218)
-#define GEN12_VD1_AUX_NV                       _MMIO(0x4228)
+
+#define GEN12_CCS_AUX_INV                      _MMIO(0x4208)
+#define GEN12_VD0_AUX_INV                      _MMIO(0x4218)
+#define GEN12_VE0_AUX_INV                      _MMIO(0x4238)
+#define GEN12_BCS0_AUX_INV                     _MMIO(0x4248)
 
 #define GEN8_RTCR                              _MMIO(0x4260)
 #define GEN8_M1TCR                             _MMIO(0x4264)
 #define GEN8_BTCR                              _MMIO(0x426c)
 #define GEN8_VTCR                              _MMIO(0x4270)
 
-#define GEN12_VD2_AUX_NV                       _MMIO(0x4298)
-#define GEN12_VD3_AUX_NV                       _MMIO(0x42a8)
-#define GEN12_VE0_AUX_NV                       _MMIO(0x4238)
-
 #define BLT_HWS_PGA_GEN7                       _MMIO(0x4280)
 
-#define GEN12_VE1_AUX_NV                       _MMIO(0x42b8)
+#define GEN12_VD2_AUX_INV                      _MMIO(0x4298)
+#define GEN12_CCS0_AUX_INV                     _MMIO(0x42c8)
 #define   AUX_INV                              REG_BIT(0)
+
 #define VEBOX_HWS_PGA_GEN7                     _MMIO(0x4380)
 
 #define GEN12_AUX_ERR_DBG                      _MMIO(0x43f4)
index ee2b44f896a27bee22e34ccd98c07d6922cded8c..f0dea54880af258ba607b8989b9a3d3fd712f32c 100644 (file)
@@ -700,6 +700,80 @@ static const struct attribute *media_perf_power_attrs[] = {
        NULL
 };
 
+static ssize_t
+rps_up_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
+                         char *buf)
+{
+       struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+       struct intel_rps *rps = &gt->rps;
+
+       return sysfs_emit(buf, "%u\n", intel_rps_get_up_threshold(rps));
+}
+
+static ssize_t
+rps_up_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+       struct intel_rps *rps = &gt->rps;
+       int ret;
+       u8 val;
+
+       ret = kstrtou8(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       ret = intel_rps_set_up_threshold(rps, val);
+
+       return ret == 0 ? count : ret;
+}
+
+static struct kobj_attribute rps_up_threshold_pct =
+       __ATTR(rps_up_threshold_pct,
+              0664,
+              rps_up_threshold_pct_show,
+              rps_up_threshold_pct_store);
+
+static ssize_t
+rps_down_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
+                           char *buf)
+{
+       struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+       struct intel_rps *rps = &gt->rps;
+
+       return sysfs_emit(buf, "%u\n", intel_rps_get_down_threshold(rps));
+}
+
+static ssize_t
+rps_down_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+       struct intel_rps *rps = &gt->rps;
+       int ret;
+       u8 val;
+
+       ret = kstrtou8(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       ret = intel_rps_set_down_threshold(rps, val);
+
+       return ret == 0 ? count : ret;
+}
+
+static struct kobj_attribute rps_down_threshold_pct =
+       __ATTR(rps_down_threshold_pct,
+              0664,
+              rps_down_threshold_pct_show,
+              rps_down_threshold_pct_store);
+
+static const struct attribute * const gen6_gt_rps_attrs[] = {
+       &rps_up_threshold_pct.attr,
+       &rps_down_threshold_pct.attr,
+       NULL
+};
+
 static ssize_t
 default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
@@ -722,9 +796,37 @@ default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, cha
 static struct kobj_attribute default_max_freq_mhz =
 __ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
 
+static ssize_t
+default_rps_up_threshold_pct_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+       return sysfs_emit(buf, "%u\n", gt->defaults.rps_up_threshold);
+}
+
+static struct kobj_attribute default_rps_up_threshold_pct =
+__ATTR(rps_up_threshold_pct, 0444, default_rps_up_threshold_pct_show, NULL);
+
+static ssize_t
+default_rps_down_threshold_pct_show(struct kobject *kobj,
+                                   struct kobj_attribute *attr,
+                                   char *buf)
+{
+       struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+       return sysfs_emit(buf, "%u\n", gt->defaults.rps_down_threshold);
+}
+
+static struct kobj_attribute default_rps_down_threshold_pct =
+__ATTR(rps_down_threshold_pct, 0444, default_rps_down_threshold_pct_show, NULL);
+
 static const struct attribute * const rps_defaults_attrs[] = {
        &default_min_freq_mhz.attr,
        &default_max_freq_mhz.attr,
+       &default_rps_up_threshold_pct.attr,
+       &default_rps_down_threshold_pct.attr,
        NULL
 };
 
@@ -752,6 +854,12 @@ static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
        if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
                ret = sysfs_create_file(kobj, vlv_attr);
 
+       if (is_object_gt(kobj) && !intel_uc_uses_guc_slpc(&gt->uc)) {
+               ret = sysfs_create_files(kobj, gen6_gt_rps_attrs);
+               if (ret)
+                       return ret;
+       }
+
        return ret;
 }
 
index f08c2556aa258e8a83eec0aafc9f1654710ea968..def7dd0eb6f196d45be6c3f3cd3f767a5fc1eb48 100644 (file)
@@ -83,6 +83,9 @@ enum intel_submission_method {
 struct gt_defaults {
        u32 min_freq;
        u32 max_freq;
+
+       u8 rps_up_threshold;
+       u8 rps_down_threshold;
 };
 
 enum intel_gt_type {
@@ -306,4 +309,6 @@ enum intel_gt_scratch_field {
        INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
 };
 
+#define intel_gt_support_legacy_fencing(gt) ((gt)->ggtt->num_fences > 0)
+
 #endif /* __INTEL_GT_TYPES_H__ */
index 2f6a9be0ffe61a8ecfdf4d0d0bb42cbbd9d5bb22..13944a14ea2d1b1d7a6082889c92c34addab3031 100644 (file)
@@ -89,7 +89,7 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
        enum i915_map_type type;
        void *vaddr;
 
-       type = i915_coherent_map_type(vm->i915, obj, true);
+       type = intel_gt_coherent_map_type(vm->gt, obj, true);
        vaddr = i915_gem_object_pin_map_unlocked(obj, type);
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
@@ -103,7 +103,7 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
        enum i915_map_type type;
        void *vaddr;
 
-       type = i915_coherent_map_type(vm->i915, obj, true);
+       type = intel_gt_coherent_map_type(vm->gt, obj, true);
        vaddr = i915_gem_object_pin_map(obj, type);
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
@@ -670,7 +670,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+       i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
index a4ec20aaafe2850ccf9ed2c7e0973b8bc7fd63c9..957d0aeb0c022f464720f1b456192b849ebff875 100644 (file)
@@ -1092,8 +1092,16 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
 
        obj = i915_gem_object_create_lmem(engine->i915, context_size,
                                          I915_BO_ALLOC_PM_VOLATILE);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj)) {
                obj = i915_gem_object_create_shmem(engine->i915, context_size);
+               /*
+                * Wa_22016122933: For Media version 13.0, all Media GT shared
+                * memory needs to be mapped as WC on CPU side and UC (PAT
+                * index 2) on GPU side.
+                */
+               if (intel_gt_needs_wa_22016122933(engine->gt))
+                       i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+       }
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
@@ -1184,9 +1192,9 @@ lrc_pre_pin(struct intel_context *ce,
        GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
        *vaddr = i915_gem_object_pin_map(ce->state->obj,
-                                        i915_coherent_map_type(ce->engine->i915,
-                                                               ce->state->obj,
-                                                               false) |
+                                        intel_gt_coherent_map_type(ce->engine->gt,
+                                                                   ce->state->obj,
+                                                                   false) |
                                         I915_MAP_OVERRIDE);
 
        return PTR_ERR_OR_ZERO(*vaddr);
@@ -1364,10 +1372,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
            IS_DG2_G11(ce->engine->i915))
                cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
 
-       /* hsdes: 1809175790 */
-       if (!HAS_FLAT_CCS(ce->engine->i915))
-               cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                             cs, GEN12_GFX_CCS_AUX_NV);
+       cs = gen12_emit_aux_table_inv(ce->engine, cs);
 
        /* Wa_16014892111 */
        if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
@@ -1392,17 +1397,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
                                                    PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
                                                    0);
 
-       /* hsdes: 1809175790 */
-       if (!HAS_FLAT_CCS(ce->engine->i915)) {
-               if (ce->engine->class == VIDEO_DECODE_CLASS)
-                       cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                                     cs, GEN12_VD0_AUX_NV);
-               else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
-                       cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                                     cs, GEN12_VE0_AUX_NV);
-       }
-
-       return cs;
+       return gen12_emit_aux_table_inv(ce->engine, cs);
 }
 
 static void
index 6023288b0e2dd56153ad37c1b7b0d44f81e433f6..576e5ef0289ba56613dd76c589f50846052f2729 100644 (file)
@@ -366,7 +366,7 @@ static int emit_pte(struct i915_request *rq,
                    u64 offset,
                    int length)
 {
-       bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
+       bool has_64K_pages = HAS_64K_PAGES(rq->i915);
        const u64 encode = rq->context->vm->pte_encode(0, pat_index,
                                                       is_lmem ? PTE_LM : 0);
        struct intel_ring *ring = rq->ring;
@@ -375,7 +375,7 @@ static int emit_pte(struct i915_request *rq,
        u32 page_size;
        u32 *hdr, *cs;
 
-       GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
+       GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
 
        page_size = I915_GTT_PAGE_SIZE;
        dword_length = 0x400;
@@ -531,7 +531,7 @@ static int emit_copy_ccs(struct i915_request *rq,
                         u32 dst_offset, u8 dst_access,
                         u32 src_offset, u8 src_access, int size)
 {
-       struct drm_i915_private *i915 = rq->engine->i915;
+       struct drm_i915_private *i915 = rq->i915;
        int mocs = rq->engine->gt->mocs.uc_index << 1;
        u32 num_ccs_blks;
        u32 *cs;
@@ -581,7 +581,7 @@ static int emit_copy_ccs(struct i915_request *rq,
 static int emit_copy(struct i915_request *rq,
                     u32 dst_offset, u32 src_offset, int size)
 {
-       const int ver = GRAPHICS_VER(rq->engine->i915);
+       const int ver = GRAPHICS_VER(rq->i915);
        u32 instance = rq->engine->instance;
        u32 *cs;
 
@@ -917,7 +917,7 @@ out_ce:
 static int emit_clear(struct i915_request *rq, u32 offset, int size,
                      u32 value, bool is_lmem)
 {
-       struct drm_i915_private *i915 = rq->engine->i915;
+       struct drm_i915_private *i915 = rq->i915;
        int mocs = rq->engine->gt->mocs.uc_index << 1;
        const int ver = GRAPHICS_VER(i915);
        int ring_sz;
index 436756bfbb1a461a28e1e1dbe3d3236dd8cffb19..d07a4f97b94342d7d58e6de8663be9ab085a7f27 100644 (file)
@@ -8,6 +8,7 @@
 #include "gem/i915_gem_lmem.h"
 
 #include "i915_trace.h"
+#include "intel_gt.h"
 #include "intel_gtt.h"
 #include "gen6_ppgtt.h"
 #include "gen8_ppgtt.h"
@@ -210,8 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm,
                return;
 
        vm->clear_range(vm, vma_res->start, vma_res->vma_size);
-       if (vma_res->tlb)
-               vma_invalidate_tlb(vm, vma_res->tlb);
+       vma_invalidate_tlb(vm, vma_res->tlb);
 }
 
 static unsigned long pd_count(u64 size, int shift)
index 2a3217e2890fc7f2bfa7eb4ecd83e64f218a640d..f8512aee58a830fb9f723ae3c9e24c9888b04ffe 100644 (file)
@@ -220,7 +220,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
                resource_size_t lmem_range;
                u64 tile_stolen, flat_ccs_base;
 
-               lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+               lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
                lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
                lmem_size *= SZ_1G;
 
index e2152f75ba2effd1a67c8664a86119a2eec482df..cc6bd21a3e51f184234af52b629f465457e98a77 100644 (file)
@@ -35,9 +35,6 @@
 
 #define RESET_MAX_RETRIES 3
 
-/* XXX How to handle concurrent GGTT updates using tiling registers? */
-#define RESET_UNDER_STOP_MACHINE 0
-
 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
 {
        struct drm_i915_file_private *file_priv = ctx->file_priv;
index fb99143be98e7327af823de10635563bccaca566..59da4b7bd262058855d18023bbec71e8e0618793 100644 (file)
@@ -13,6 +13,7 @@
 #include "intel_engine_regs.h"
 #include "intel_gpu_commands.h"
 #include "intel_ring.h"
+#include "intel_gt.h"
 #include "intel_timeline.h"
 
 unsigned int intel_ring_update_space(struct intel_ring *ring)
@@ -56,7 +57,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
        if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
                addr = (void __force *)i915_vma_pin_iomap(vma);
        } else {
-               int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+               int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
 
                addr = i915_gem_object_pin_map(vma->obj, type);
        }
index 3fd795c3263fd22d9f861dcba56a24d00e2ddeaf..92085ffd23de0e054ec18583a444eab98a59fc7d 100644 (file)
@@ -805,7 +805,7 @@ static int mi_set_context(struct i915_request *rq,
 static int remap_l3_slice(struct i915_request *rq, int slice)
 {
 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
-       u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
+       u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
        int i;
 
        if (!remap_info)
index e92e626d4994e8d023e7108a5a23546d72ee713f..092542f53aad9c59d0fbdd4c73170c07c866ea09 100644 (file)
@@ -16,7 +16,9 @@
 #include "intel_gt.h"
 #include "intel_gt_clock_utils.h"
 #include "intel_gt_irq.h"
+#include "intel_gt_pm.h"
 #include "intel_gt_pm_irq.h"
+#include "intel_gt_print.h"
 #include "intel_gt_regs.h"
 #include "intel_mchbar_regs.h"
 #include "intel_pcode.h"
@@ -672,7 +674,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
 {
        struct intel_gt *gt = rps_to_gt(rps);
        struct intel_uncore *uncore = gt->uncore;
-       u32 threshold_up = 0, threshold_down = 0; /* in % */
        u32 ei_up = 0, ei_down = 0;
 
        lockdep_assert_held(&rps->power.mutex);
@@ -680,9 +681,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
        if (new_power == rps->power.mode)
                return;
 
-       threshold_up = 95;
-       threshold_down = 85;
-
        /* Note the units here are not exactly 1us, but 1280ns. */
        switch (new_power) {
        case LOW_POWER:
@@ -709,17 +707,22 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
 
        GT_TRACE(gt,
                 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
-                new_power, threshold_up, ei_up, threshold_down, ei_down);
+                new_power,
+                rps->power.up_threshold, ei_up,
+                rps->power.down_threshold, ei_down);
 
        set(uncore, GEN6_RP_UP_EI,
            intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
        set(uncore, GEN6_RP_UP_THRESHOLD,
-           intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
+           intel_gt_ns_to_pm_interval(gt,
+                                      ei_up * rps->power.up_threshold * 10));
 
        set(uncore, GEN6_RP_DOWN_EI,
            intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
        set(uncore, GEN6_RP_DOWN_THRESHOLD,
-           intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
+           intel_gt_ns_to_pm_interval(gt,
+                                      ei_down *
+                                      rps->power.down_threshold * 10));
 
        set(uncore, GEN6_RP_CONTROL,
            (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
@@ -731,8 +734,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
 
 skip_hw_write:
        rps->power.mode = new_power;
-       rps->power.up_threshold = threshold_up;
-       rps->power.down_threshold = threshold_down;
 }
 
 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
@@ -1559,10 +1560,12 @@ void intel_rps_enable(struct intel_rps *rps)
                return;
 
        GT_TRACE(rps_to_gt(rps),
-                "min:%x, max:%x, freq:[%d, %d]\n",
+                "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n",
                 rps->min_freq, rps->max_freq,
                 intel_gpu_freq(rps, rps->min_freq),
-                intel_gpu_freq(rps, rps->max_freq));
+                intel_gpu_freq(rps, rps->max_freq),
+                rps->power.up_threshold,
+                rps->power.down_threshold);
 
        GEM_BUG_ON(rps->max_freq < rps->min_freq);
        GEM_BUG_ON(rps->idle_freq > rps->max_freq);
@@ -2015,6 +2018,12 @@ void intel_rps_init(struct intel_rps *rps)
                }
        }
 
+       /* Set default thresholds in % */
+       rps->power.up_threshold = 95;
+       rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold;
+       rps->power.down_threshold = 85;
+       rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold;
+
        /* Finally allow us to boost to max by default */
        rps->boost_freq = rps->max_freq;
        rps->idle_freq = rps->min_freq;
@@ -2569,6 +2578,58 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
                return set_min_freq(rps, val);
 }
 
+u8 intel_rps_get_up_threshold(struct intel_rps *rps)
+{
+       return rps->power.up_threshold;
+}
+
+static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val)
+{
+       int ret;
+
+       if (val > 100)
+               return -EINVAL;
+
+       ret = mutex_lock_interruptible(&rps->lock);
+       if (ret)
+               return ret;
+
+       if (*threshold == val)
+               goto out_unlock;
+
+       *threshold = val;
+
+       /* Force reset. */
+       rps->last_freq = -1;
+       mutex_lock(&rps->power.mutex);
+       rps->power.mode = -1;
+       mutex_unlock(&rps->power.mutex);
+
+       intel_rps_set(rps, clamp(rps->cur_freq,
+                                rps->min_freq_softlimit,
+                                rps->max_freq_softlimit));
+
+out_unlock:
+       mutex_unlock(&rps->lock);
+
+       return ret;
+}
+
+int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold)
+{
+       return rps_set_threshold(rps, &rps->power.up_threshold, threshold);
+}
+
+u8 intel_rps_get_down_threshold(struct intel_rps *rps)
+{
+       return rps->power.down_threshold;
+}
+
+int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold)
+{
+       return rps_set_threshold(rps, &rps->power.down_threshold, threshold);
+}
+
 static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
index a3fa987aa91f132e9aa6013683e09a6dfcf5710b..92fb01f5a45242f44452972f3f6fc0db1423507b 100644 (file)
@@ -37,6 +37,10 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
 
 int intel_gpu_freq(struct intel_rps *rps, int val);
 int intel_freq_opcode(struct intel_rps *rps, int val);
+u8 intel_rps_get_up_threshold(struct intel_rps *rps);
+int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold);
+u8 intel_rps_get_down_threshold(struct intel_rps *rps);
+int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold);
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
 u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
 u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
index e8f3d18c12b838d37a1e06be16e7ef2aa06cb0fd..8c1dbcbcbc4f5b904ce9094f1a7f2c0c1f9e1cab 100644 (file)
@@ -29,7 +29,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
         * Standalone media shares the general MMIO space with the primary
         * GT.  We'll re-use the primary GT's mapping.
         */
-       uncore->regs = i915->uncore.regs;
+       uncore->regs = intel_uncore_regs(&i915->uncore);
        if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
                return -EIO;
 
index 1141f875f5bd0742bbffc9748067d6246dc6ce53..f602895f6d0dfe19d061a6fdbed2f0a619f28590 100644 (file)
@@ -302,7 +302,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
        u8 eu_en;
        u8 s_en;
 
-       if (IS_JSL_EHL(gt->i915))
+       if (IS_JASPERLAKE(gt->i915) || IS_ELKHARTLAKE(gt->i915))
                intel_sseu_set_info(sseu, 1, 4, 8);
        else
                intel_sseu_set_info(sseu, 1, 8, 8);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
new file mode 100644 (file)
index 0000000..139608c
--- /dev/null
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
+#include "intel_gt_regs.h"
+#include "intel_tlb.h"
+
+/*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+#define TLB_INVAL_TIMEOUT_US 100
+#define TLB_INVAL_TIMEOUT_MS 4
+
+/*
+ * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
+ * but are now considered MCR registers.  Since they exist within a GAM range,
+ * the primary instance of the register rolls up the status from each unit.
+ */
+static int wait_for_invalidate(struct intel_engine_cs *engine)
+{
+       if (engine->tlb_inv.mcr)
+               return intel_gt_mcr_wait_for_reg(engine->gt,
+                                                engine->tlb_inv.reg.mcr_reg,
+                                                engine->tlb_inv.done,
+                                                0,
+                                                TLB_INVAL_TIMEOUT_US,
+                                                TLB_INVAL_TIMEOUT_MS);
+       else
+               return __intel_wait_for_register_fw(engine->gt->uncore,
+                                                   engine->tlb_inv.reg.reg,
+                                                   engine->tlb_inv.done,
+                                                   0,
+                                                   TLB_INVAL_TIMEOUT_US,
+                                                   TLB_INVAL_TIMEOUT_MS,
+                                                   NULL);
+}
+
+static void mmio_invalidate_full(struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_uncore *uncore = gt->uncore;
+       struct intel_engine_cs *engine;
+       intel_engine_mask_t awake, tmp;
+       enum intel_engine_id id;
+       unsigned long flags;
+
+       if (GRAPHICS_VER(i915) < 8)
+               return;
+
+       intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+       intel_gt_mcr_lock(gt, &flags);
+       spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
+
+       awake = 0;
+       for_each_engine(engine, gt, id) {
+               if (!intel_engine_pm_is_awake(engine))
+                       continue;
+
+               if (engine->tlb_inv.mcr)
+                       intel_gt_mcr_multicast_write_fw(gt,
+                                                       engine->tlb_inv.reg.mcr_reg,
+                                                       engine->tlb_inv.request);
+               else
+                       intel_uncore_write_fw(uncore,
+                                             engine->tlb_inv.reg.reg,
+                                             engine->tlb_inv.request);
+
+               awake |= engine->mask;
+       }
+
+       GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+       /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+       if (awake &&
+           (IS_TIGERLAKE(i915) ||
+            IS_DG1(i915) ||
+            IS_ROCKETLAKE(i915) ||
+            IS_ALDERLAKE_S(i915) ||
+            IS_ALDERLAKE_P(i915)))
+               intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
+       spin_unlock(&uncore->lock);
+       intel_gt_mcr_unlock(gt, flags);
+
+       for_each_engine_masked(engine, gt, awake, tmp) {
+               if (wait_for_invalidate(engine))
+                       gt_err_ratelimited(gt,
+                                          "%s TLB invalidation did not complete in %ums!\n",
+                                          engine->name, TLB_INVAL_TIMEOUT_MS);
+       }
+
+       /*
+        * Use delayed put since a) we mostly expect a flurry of TLB
+        * invalidations so it is good to avoid paying the forcewake cost and
+        * b) it works around a bug in Icelake which cannot cope with too rapid
+        * transitions.
+        */
+       intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+       u32 cur = intel_gt_tlb_seqno(gt);
+
+       /* Only skip if a *full* TLB invalidate barrier has passed */
+       return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
+{
+       intel_wakeref_t wakeref;
+
+       if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+               return;
+
+       if (intel_gt_is_wedged(gt))
+               return;
+
+       if (tlb_seqno_passed(gt, seqno))
+               return;
+
+       with_intel_gt_pm_if_awake(gt, wakeref) {
+               mutex_lock(&gt->tlb.invalidate_lock);
+               if (tlb_seqno_passed(gt, seqno))
+                       goto unlock;
+
+               mmio_invalidate_full(gt);
+
+               write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+               mutex_unlock(&gt->tlb.invalidate_lock);
+       }
+}
+
+void intel_gt_init_tlb(struct intel_gt *gt)
+{
+       mutex_init(&gt->tlb.invalidate_lock);
+       seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
+}
+
+void intel_gt_fini_tlb(struct intel_gt *gt)
+{
+       mutex_destroy(&gt->tlb.invalidate_lock);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h
new file mode 100644 (file)
index 0000000..337327a
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+
+#ifndef INTEL_TLB_H
+#define INTEL_TLB_H
+
+#include <linux/seqlock.h>
+#include <linux/types.h>
+
+#include "intel_gt_types.h"
+
+void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
+
+void intel_gt_init_tlb(struct intel_gt *gt);
+void intel_gt_fini_tlb(struct intel_gt *gt);
+
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+       return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+       return intel_gt_tlb_seqno(gt) | 1;
+}
+
+#endif /* INTEL_TLB_H */
index 4d2dece96011513baa32819f3af802c25e861835..3ae0dbd39eaa3cfccd6f0dc617f34fe348b213b1 100644 (file)
@@ -123,6 +123,22 @@ static void wa_init_finish(struct i915_wa_list *wal)
                wal->wa_count, wal->name, wal->engine_name);
 }
 
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
+{
+       enum forcewake_domains fw = 0;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+               fw |= intel_uncore_forcewake_for_reg(uncore,
+                                                    wa->reg,
+                                                    FW_REG_READ |
+                                                    FW_REG_WRITE);
+
+       return fw;
+}
+
 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 {
        unsigned int addr = i915_mmio_reg_offset(wa->reg);
@@ -225,13 +241,13 @@ static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
 static void
 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
 {
-       wa_add(wal, reg, clear, set, clear, false);
+       wa_add(wal, reg, clear, set, clear | set, false);
 }
 
 static void
 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
 {
-       wa_mcr_add(wal, reg, clear, set, clear, false);
+       wa_mcr_add(wal, reg, clear, set, clear | set, false);
 }
 
 static void
@@ -404,7 +420,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
                     /* WaForceContextSaveRestoreNonCoherent:bdw */
                     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
                     /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                    (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+                    (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 }
 
 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -584,7 +600,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
        gen9_ctx_workarounds_init(engine, wal);
 
        /* WaToEnableHwFixForPushConstHWBug:kbl */
-       if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
+       if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
                wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
                             GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
@@ -621,10 +637,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
                                     struct i915_wa_list *wal)
 {
        /* Wa_1406697149 (WaDisableBankHangMode:icl) */
-       wa_write(wal,
-                GEN8_L3CNTLREG,
-                intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
-                GEN8_ERRDETBCTRL);
+       wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
 
        /* WaForceEnableNonCoherent:icl
         * This is not the same workaround as in early Gen9 platforms, where
@@ -653,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
        /* Wa_1604278689:icl,ehl */
        wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
        wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
-                        0, /* write-only register; skip validation */
+                        0,
                         0xFFFFFFFF);
 
        /* Wa_1406306137:icl,ehl */
@@ -670,38 +683,8 @@ static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
        wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
        wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
                             REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
-       wa_mcr_add(wal,
-                  XEHP_FF_MODE2,
-                  FF_MODE2_TDS_TIMER_MASK,
-                  FF_MODE2_TDS_TIMER_128,
-                  0, false);
-}
-
-/*
- * These settings aren't actually workarounds, but general tuning settings that
- * need to be programmed on several platforms.
- */
-static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
-                                    struct i915_wa_list *wal)
-{
-       /*
-        * Although some platforms refer to it as Wa_1604555607, we need to
-        * program it even on those that don't explicitly list that
-        * workaround.
-        *
-        * Note that the programming of this register is further modified
-        * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
-        * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
-        * value when read. The default value for this register is zero for all
-        * fields and there are no bit masks. So instead of doing a RMW we
-        * should just write TDS timer value. For the same reason read
-        * verification is ignored.
-        */
-       wa_add(wal,
-              GEN12_FF_MODE2,
-              FF_MODE2_TDS_TIMER_MASK,
-              FF_MODE2_TDS_TIMER_128,
-              0, false);
+       wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+                            FF_MODE2_TDS_TIMER_128);
 }
 
 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -709,8 +692,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       gen12_ctx_gt_tuning_init(engine, wal);
-
        /*
         * Wa_1409142259:tgl,dg1,adl-p
         * Wa_1409347922:tgl,dg1,adl-p
@@ -732,15 +713,27 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
                            GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 
        /*
-        * Wa_16011163337
+        * Wa_16011163337 - GS_TIMER
+        *
+        * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
+        * need to program it even on those that don't explicitly list that
+        * workaround.
+        *
+        * Note that the programming of GEN12_FF_MODE2 is further modified
+        * according to the FF_MODE2 guidance given by Wa_1608008084.
+        * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
+        * value when read from the CPU.
         *
-        * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
-        * to Wa_1608008084.
+        * The default value for this register is zero for all fields.
+        * So instead of doing a RMW we should just write the desired values
+        * for TDS and GS timers. Note that since the readback can't be trusted,
+        * the clear mask is just set to ~0 to make sure other bits are not
+        * inadvertently set. For the same reason read verification is ignored.
         */
        wa_add(wal,
               GEN12_FF_MODE2,
-              FF_MODE2_GS_TIMER_MASK,
-              FF_MODE2_GS_TIMER_224,
+              ~0,
+              FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
               0, false);
 
        if (!IS_DG1(i915)) {
@@ -987,6 +980,9 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
 int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
        struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+       struct intel_uncore *uncore = rq->engine->uncore;
+       enum forcewake_domains fw;
+       unsigned long flags;
        struct i915_wa *wa;
        unsigned int i;
        u32 *cs;
@@ -1003,13 +999,36 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
+       fw = wal_get_fw_for_rmw(uncore, wal);
+
+       intel_gt_mcr_lock(wal->gt, &flags);
+       spin_lock(&uncore->lock);
+       intel_uncore_forcewake_get__locked(uncore, fw);
+
        *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+               u32 val;
+
+               /* Skip reading the register if it's not really needed */
+               if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
+                       val = wa->set;
+               } else {
+                       val = wa->is_mcr ?
+                               intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
+                               intel_uncore_read_fw(uncore, wa->reg);
+                       val &= ~wa->clr;
+                       val |= wa->set;
+               }
+
                *cs++ = i915_mmio_reg_offset(wa->reg);
-               *cs++ = wa->set;
+               *cs++ = val;
        }
        *cs++ = MI_NOOP;
 
+       intel_uncore_forcewake_put__locked(uncore, fw);
+       spin_unlock(&uncore->lock);
+       intel_gt_mcr_unlock(wal->gt, flags);
+
        intel_ring_advance(rq, cs);
 
        ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
@@ -1173,7 +1192,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
                    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:skl */
-       if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
+       if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
                wa_write_or(wal,
                            GEN9_GAMT_ECO_REG_RW_IA,
                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
@@ -1185,7 +1204,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
        gen9_gt_workarounds_init(gt, wal);
 
        /* WaDisableDynamicCreditSharing:kbl */
-       if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
+       if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
                wa_write_or(wal,
                            GAMT_CHKN_BIT_REG,
                            GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1441,7 +1460,8 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
 
        /* Wa_1607087056:icl,ehl,jsl */
        if (IS_ICELAKE(i915) ||
-           IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+               ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+               IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
                wa_write_or(wal,
                            GEN11_SLICE_UNIT_LEVEL_CLKGATE,
                            L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1485,6 +1505,18 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
 
        /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
        wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
+
+       /*
+        * Wa_14015795083
+        *
+        * Firmware on some gen12 platforms locks the MISCCPCTL register,
+        * preventing i915 from modifying it for this workaround.  Skip the
+        * readback verification for this workaround on debug builds; if the
+        * workaround doesn't stick due to firmware behavior, it's not an error
+        * that we want CI to flag.
+        */
+       wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
+              0, 0, false);
 }
 
 static void
@@ -1710,7 +1742,6 @@ static void
 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
 {
        /* Wa_14018778641 / Wa_18018781329 */
-       wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
        wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
 
        /* Wa_22016670082 */
@@ -1743,8 +1774,6 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
         * GT, the media GT's versions are regular singleton registers.
         */
        wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
-       wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
-       wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
 
        debug_dump_steering(gt);
 }
@@ -1850,22 +1879,6 @@ void intel_gt_init_workarounds(struct intel_gt *gt)
        wa_init_finish(wal);
 }
 
-static enum forcewake_domains
-wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
-{
-       enum forcewake_domains fw = 0;
-       struct i915_wa *wa;
-       unsigned int i;
-
-       for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
-               fw |= intel_uncore_forcewake_for_reg(uncore,
-                                                    wa->reg,
-                                                    FW_REG_READ |
-                                                    FW_REG_WRITE);
-
-       return fw;
-}
-
 static bool
 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
          const char *name, const char *from)
@@ -2933,7 +2946,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
        struct drm_i915_private *i915 = engine->i915;
 
        /* WaKBLVECSSemaphoreWaitPoll:kbl */
-       if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
+       if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
                wa_write(wal,
                         RING_SEMA_WAIT_POLL(engine->mmio_base),
                         1);
@@ -3237,7 +3250,7 @@ wa_list_srm(struct i915_request *rq,
            const struct i915_wa_list *wal,
            struct i915_vma *vma)
 {
-       struct drm_i915_private *i915 = rq->engine->i915;
+       struct drm_i915_private *i915 = rq->i915;
        unsigned int i, count = 0;
        const struct i915_wa *wa;
        u32 srm, *cs;
@@ -3336,7 +3349,7 @@ retry:
 
        err = 0;
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
-               if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
+               if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
                        continue;
 
                if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
index 76fbae358072df64a90547ab7abee27f7ad15abd..47070cba7eb145688a37d09e0dfc26cead7ffe54 100644 (file)
@@ -88,8 +88,9 @@ static int __live_context_size(struct intel_engine_cs *engine)
                goto err;
 
        vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
-                                                i915_coherent_map_type(engine->i915,
-                                                                       ce->state->obj, false));
+                                                intel_gt_coherent_map_type(engine->gt,
+                                                                           ce->state->obj,
+                                                                           false));
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
                intel_context_unpin(ce);
index 78cdfc6f315f2a9cb20105f911972d99794ca71e..86cecf7a11054012a21343ee3a8865f6654d4246 100644 (file)
@@ -62,7 +62,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
                return PTR_ERR(cs);
 
        cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->i915) >= 8)
                cmd++;
        *cs++ = cmd;
        *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));
index 8b0d84f2aad228a3e966eaf19edfd39d81b12c6c..0dd4d00ee894eb50d5d7d3717bc4d99f2cc520b9 100644 (file)
@@ -73,7 +73,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
        h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
 
        vaddr = i915_gem_object_pin_map_unlocked(h->obj,
-                                                i915_coherent_map_type(gt->i915, h->obj, false));
+                                                intel_gt_coherent_map_type(gt, h->obj, false));
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
                goto err_unpin_hws;
@@ -119,7 +119,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
                return ERR_CAST(obj);
        }
 
-       vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
+       vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
        if (IS_ERR(vaddr)) {
                i915_gem_object_put(obj);
                i915_vm_put(vm);
index a78a3d2c2e16e85644df98cc7e244627c4d75271..5f826b6dcf5d6fc5467e1c82cd91302972899adf 100644 (file)
@@ -1292,9 +1292,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
        }
 
        lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
-                                              i915_coherent_map_type(engine->i915,
-                                                                     ce->state->obj,
-                                                                     false));
+                                              intel_gt_coherent_map_type(engine->gt,
+                                                                         ce->state->obj,
+                                                                         false));
        if (IS_ERR(lrc)) {
                err = PTR_ERR(lrc);
                goto err_B1;
index a8446ab825012f385cdeee0c1b16e6a6a8b2b405..d73e438fb85fab75a2feab9e930d4171d985f9b3 100644 (file)
@@ -137,7 +137,7 @@ static int read_mocs_table(struct i915_request *rq,
        if (!table)
                return 0;
 
-       if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
+       if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
                addr = global_mocs_offset() + gt->uncore->gsi_offset;
        else
                addr = mocs_offset(rq->engine);
index 2ceeadecc639cc5b24eebede5bce05bccc467298..a7189c2d660cc557c309565e9bfc69f26798e840 100644 (file)
@@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
        }
 
        cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->i915) >= 8)
                cmd++;
 
        *cs++ = cmd;
index 39c3ec12df1abb8218abd1d907ae81d12df7ca84..fa36cf920bdee9db8657d1a109a46edca3aaa36b 100644 (file)
@@ -459,12 +459,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+       if (GRAPHICS_VER(rq->i915) >= 8) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = addr;
                *cs++ = 0;
                *cs++ = value;
-       } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+       } else if (GRAPHICS_VER(rq->i915) >= 4) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = 0;
                *cs++ = addr;
index 3bd6b540257b46c89b65de46a992e5eb9c3a0d06..7e41f69fc818f904c5a7f78bc1801b3b194bce10 100644 (file)
@@ -6,6 +6,7 @@
 #include "i915_selftest.h"
 
 #include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 
 #include "gen8_engine_cs.h"
@@ -354,7 +355,7 @@ out_a:
 
 static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
 {
-       intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+       intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
 }
 
 static int invalidate_full(void *arg)
index 449c9ed44382200c1924446bb8fbff6c7107550c..bccc3a1200bc6e512a8b2cadbcfe9c79aad1fd3a 100644 (file)
@@ -33,7 +33,6 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
 
 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        enum i915_map_type map_type;
        struct file *file;
        void *ptr;
@@ -44,7 +43,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
                return file;
        }
 
-       map_type = i915_coherent_map_type(i915, obj, true);
+       map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
        ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
        if (IS_ERR(ptr))
                return ERR_CAST(ptr);
index 714f0c256118c3923f2e34aa7f7d1328757466c5..6d009a905269d0d85a5b52ebbd82aba083d6e7f7 100644 (file)
@@ -8,6 +8,74 @@
 
 #include <linux/types.h>
 
+struct intel_gsc_version {
+       u16 major;
+       u16 minor;
+       u16 hotfix;
+       u16 build;
+} __packed;
+
+struct intel_gsc_partition {
+       u32 offset;
+       u32 size;
+} __packed;
+
+struct intel_gsc_layout_pointers {
+       u8 rom_bypass_vector[16];
+
+       /* size of pointers layout not including ROM bypass vector */
+       u16 size;
+
+       /*
+        * bit0: Backup copy of layout pointers exist
+        * bits1-15: reserved
+        */
+       u8 flags;
+
+       u8 reserved;
+
+       u32 crc32;
+
+       struct intel_gsc_partition datap;
+       struct intel_gsc_partition boot1;
+       struct intel_gsc_partition boot2;
+       struct intel_gsc_partition boot3;
+       struct intel_gsc_partition boot4;
+       struct intel_gsc_partition boot5;
+       struct intel_gsc_partition temp_pages;
+} __packed;
+
+/* Boot partition structures */
+struct intel_gsc_bpdt_header {
+       u32 signature;
+#define INTEL_GSC_BPDT_HEADER_SIGNATURE 0x000055AA
+
+       u16 descriptor_count; /* num of entries after the header */
+
+       u8 version;
+       u8 configuration;
+
+       u32 crc32;
+
+       u32 build_version;
+       struct intel_gsc_version tool_version;
+} __packed;
+
+struct intel_gsc_bpdt_entry {
+       /*
+        * Bits 0-15: BPDT entry type
+        * Bits 16-17: reserved
+        * Bit 18: code sub-partition
+        * Bits 19-31: reserved
+        */
+       u32 type;
+#define INTEL_GSC_BPDT_ENTRY_TYPE_MASK GENMASK(15, 0)
+#define INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE 0x1
+
+       u32 sub_partition_offset; /* from the base of the BPDT header */
+       u32 sub_partition_size;
+} __packed;
+
 /* Code partition directory (CPD) structures */
 struct intel_gsc_cpd_header_v2 {
        u32 header_marker;
@@ -44,13 +112,6 @@ struct intel_gsc_cpd_entry {
        u8 reserved[4];
 } __packed;
 
-struct intel_gsc_version {
-       u16 major;
-       u16 minor;
-       u16 hotfix;
-       u16 build;
-} __packed;
-
 struct intel_gsc_manifest_header {
        u32 header_type; /* 0x4 for manifest type */
        u32 header_length; /* in dwords */
index 60e9c6c9e7756a22021a4e11eeb97630f75b00e6..e2e42b3e0d5d8bc52a466872a9a2523fbaae72ff 100644 (file)
  * Copyright Â© 2022 Intel Corporation
  */
 
+#include "gem/i915_gem_lmem.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_print.h"
 #include "gt/intel_ring.h"
+#include "intel_gsc_binary_headers.h"
 #include "intel_gsc_fw.h"
-
-#define GSC_FW_STATUS_REG                      _MMIO(0x116C40)
-#define GSC_FW_CURRENT_STATE                   REG_GENMASK(3, 0)
-#define   GSC_FW_CURRENT_STATE_RESET           0
-#define   GSC_FW_PROXY_STATE_NORMAL            5
-#define GSC_FW_INIT_COMPLETE_BIT               REG_BIT(9)
+#include "intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_reg.h"
 
 static bool gsc_is_in_reset(struct intel_uncore *uncore)
 {
-       u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+       u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
 
-       return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
-              GSC_FW_CURRENT_STATE_RESET;
+       return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
+                       HECI1_FWSTS1_CURRENT_STATE_RESET;
 }
 
-static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore)
+static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
 {
        intel_wakeref_t wakeref;
        u32 fw_status = 0;
 
-       with_intel_runtime_pm(uncore->rpm, wakeref)
-               fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+       if (needs_wakeref)
+               wakeref = intel_runtime_pm_get(uncore->rpm);
+
+       fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
 
+       if (needs_wakeref)
+               intel_runtime_pm_put(uncore->rpm, wakeref);
        return fw_status;
 }
 
-bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
+{
+       return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
+                            gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
+                                                 needs_wakeref)) ==
+              HECI1_FWSTS1_PROXY_STATE_NORMAL;
+}
+
+int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc)
 {
-       return REG_FIELD_GET(GSC_FW_CURRENT_STATE,
-                            gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore)) ==
-              GSC_FW_PROXY_STATE_NORMAL;
+       if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)))
+               return -ENODEV;
+       if (!intel_uc_fw_is_loadable(&gsc->fw))
+               return -ENODEV;
+       if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
+               return -ENOLINK;
+       if (!intel_gsc_uc_fw_proxy_init_done(gsc, true))
+               return -EAGAIN;
+
+       return 0;
 }
 
 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
 {
-       return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore) & GSC_FW_INIT_COMPLETE_BIT;
+       return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
+              HECI1_FWSTS1_INIT_COMPLETE;
+}
+
+static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
+{
+       return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
+}
+
+int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
+{
+       struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
+       struct intel_gt *gt = gsc_uc_to_gt(gsc);
+       const struct intel_gsc_layout_pointers *layout = data;
+       const struct intel_gsc_bpdt_header *bpdt_header = NULL;
+       const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
+       const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
+       const struct intel_gsc_cpd_entry *cpd_entry = NULL;
+       const struct intel_gsc_manifest_header *manifest;
+       size_t min_size = sizeof(*layout);
+       int i;
+
+       if (size < min_size) {
+               gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
+               return -ENODATA;
+       }
+
+       /*
+        * The GSC binary starts with the pointer layout, which contains the
+        * locations of the various partitions of the binary. The one we're
+        * interested in to get the version is the boot1 partition, where we can
+        * find a BPDT header followed by entries, one of which points to the
+        * RBE sub-section of the partition. From here, we can parse the CPD
+        * header and the following entries to find the manifest location
+        * (entry identified by the "RBEP.man" name), from which we can finally
+        * extract the version.
+        *
+        * --------------------------------------------------
+        * [  intel_gsc_layout_pointers                     ]
+        * [      ...                                       ]
+        * [      boot1.offset  >---------------------------]------o
+        * [      ...                                       ]      |
+        * --------------------------------------------------      |
+        *                                                         |
+        * --------------------------------------------------      |
+        * [  intel_gsc_bpdt_header                         ]<-----o
+        * --------------------------------------------------
+        * [  intel_gsc_bpdt_entry[]                        ]
+        * [      entry1                                    ]
+        * [      ...                                       ]
+        * [      entryX                                    ]
+        * [          type == GSC_RBE                       ]
+        * [          offset  >-----------------------------]------o
+        * [      ...                                       ]      |
+        * --------------------------------------------------      |
+        *                                                         |
+        * --------------------------------------------------      |
+        * [  intel_gsc_cpd_header_v2                       ]<-----o
+        * --------------------------------------------------
+        * [  intel_gsc_cpd_entry[]                         ]
+        * [      entry1                                    ]
+        * [      ...                                       ]
+        * [      entryX                                    ]
+        * [          "RBEP.man"                            ]
+        * [           ...                                  ]
+        * [           offset  >----------------------------]------o
+        * [      ...                                       ]      |
+        * --------------------------------------------------      |
+        *                                                         |
+        * --------------------------------------------------      |
+        * [ intel_gsc_manifest_header                      ]<-----o
+        * [  ...                                           ]
+        * [  intel_gsc_version     fw_version              ]
+        * [  ...                                           ]
+        * --------------------------------------------------
+        */
+
+       min_size = layout->boot1.offset + layout->boot1.size;
+       if (size < min_size) {
+               gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
+                      size, min_size);
+               return -ENODATA;
+       }
+
+       min_size = sizeof(*bpdt_header);
+       if (layout->boot1.size < min_size) {
+               gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
+                      layout->boot1.size, min_size);
+               return -ENODATA;
+       }
+
+       bpdt_header = data + layout->boot1.offset;
+       if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
+               gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
+                      bpdt_header->signature);
+               return -EINVAL;
+       }
+
+       min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
+       if (layout->boot1.size < min_size) {
+               gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
+                      layout->boot1.size, min_size);
+               return -ENODATA;
+       }
+
+       bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
+       for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
+               if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
+                   INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
+                       continue;
+
+               cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
+               min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
+               break;
+       }
+
+       if (!cpd_header) {
+               gt_err(gt, "couldn't find CPD header in GSC binary!\n");
+               return -ENODATA;
+       }
+
+       if (layout->boot1.size < min_size) {
+               gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
+                      layout->boot1.size, min_size);
+               return -ENODATA;
+       }
+
+       if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
+               gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
+                      cpd_header->header_marker);
+               return -EINVAL;
+       }
+
+       min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
+       if (layout->boot1.size < min_size) {
+               gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
+                      layout->boot1.size, min_size);
+               return -ENODATA;
+       }
+
+       cpd_entry = (void *)cpd_header + cpd_header->header_length;
+       for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
+               if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
+                       manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
+                       intel_uc_fw_version_from_gsc_manifest(&gsc->release,
+                                                             manifest);
+                       gsc->security_version = manifest->security_version;
+                       break;
+               }
+       }
+
+       return 0;
 }
 
 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
@@ -114,48 +282,25 @@ out_rq:
 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
 {
        struct intel_gt *gt = gsc_uc_to_gt(gsc);
-       struct drm_i915_private *i915 = gt->i915;
-       struct drm_i915_gem_object *obj;
-       void *src, *dst;
+       void *src;
 
        if (!gsc->local)
                return -ENODEV;
 
-       obj = gsc->local->obj;
-
-       if (obj->base.size < gsc->fw.size)
+       if (gsc->local->size < gsc->fw.size)
                return -ENOSPC;
 
-       /*
-        * Wa_22016122933: For MTL the shared memory needs to be mapped
-        * as WC on CPU side and UC (PAT index 2) on GPU side
-        */
-       if (IS_METEORLAKE(i915))
-               i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-
-       dst = i915_gem_object_pin_map_unlocked(obj,
-                                              i915_coherent_map_type(i915, obj, true));
-       if (IS_ERR(dst))
-               return PTR_ERR(dst);
-
        src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
-                                              i915_coherent_map_type(i915, gsc->fw.obj, true));
-       if (IS_ERR(src)) {
-               i915_gem_object_unpin_map(obj);
+                                              intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
+       if (IS_ERR(src))
                return PTR_ERR(src);
-       }
 
-       memset(dst, 0, obj->base.size);
-       memcpy(dst, src, gsc->fw.size);
+       memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
+       memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
 
-       /*
-        * Wa_22016122933: Making sure the data in dst is
-        * visible to GSC right away
-        */
        intel_guc_write_barrier(&gt->uc.guc);
 
        i915_gem_object_unpin_map(gsc->fw.obj);
-       i915_gem_object_unpin_map(obj);
 
        return 0;
 }
@@ -163,12 +308,94 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
 static int gsc_fw_wait(struct intel_gt *gt)
 {
        return intel_wait_for_register(gt->uncore,
-                                      GSC_FW_STATUS_REG,
-                                      GSC_FW_INIT_COMPLETE_BIT,
-                                      GSC_FW_INIT_COMPLETE_BIT,
+                                      HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
+                                      HECI1_FWSTS1_INIT_COMPLETE,
+                                      HECI1_FWSTS1_INIT_COMPLETE,
                                       500);
 }
 
+struct intel_gsc_mkhi_header {
+       u8  group_id;
+#define MKHI_GROUP_ID_GFX_SRV 0x30
+
+       u8  command;
+#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
+
+       u8  reserved;
+       u8  result;
+} __packed;
+
+struct mtl_gsc_ver_msg_in {
+       struct intel_gsc_mtl_header header;
+       struct intel_gsc_mkhi_header mkhi;
+} __packed;
+
+struct mtl_gsc_ver_msg_out {
+       struct intel_gsc_mtl_header header;
+       struct intel_gsc_mkhi_header mkhi;
+       u16 proj_major;
+       u16 compat_major;
+       u16 compat_minor;
+       u16 reserved[5];
+} __packed;
+
+#define GSC_VER_PKT_SZ SZ_4K
+
+static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
+{
+       struct intel_gt *gt = gsc_uc_to_gt(gsc);
+       struct mtl_gsc_ver_msg_in *msg_in;
+       struct mtl_gsc_ver_msg_out *msg_out;
+       struct i915_vma *vma;
+       u64 offset;
+       void *vaddr;
+       int err;
+
+       err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+                                            &vma, &vaddr);
+       if (err) {
+               gt_err(gt, "failed to allocate vma for GSC version query\n");
+               return err;
+       }
+
+       offset = i915_ggtt_offset(vma);
+       msg_in = vaddr;
+       msg_out = vaddr + GSC_VER_PKT_SZ;
+
+       intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
+                                             HECI_MEADDRESS_MKHI,
+                                             sizeof(*msg_in), 0);
+       msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
+       msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
+
+       err = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc,
+                                                 offset,
+                                                 sizeof(*msg_in),
+                                                 offset + GSC_VER_PKT_SZ,
+                                                 GSC_VER_PKT_SZ);
+       if (err) {
+               gt_err(gt,
+                      "failed to submit GSC request for compatibility version: %d\n",
+                      err);
+               goto out_vma;
+       }
+
+       if (msg_out->header.message_size != sizeof(*msg_out)) {
+               gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
+                      msg_out->header.message_size, sizeof(*msg_out),
+                      msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
+               err = -EPROTO;
+               goto out_vma;
+       }
+
+       gsc->fw.file_selected.ver.major = msg_out->compat_major;
+       gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
+
+out_vma:
+       i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+       return err;
+}
+
 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
 {
        struct intel_gt *gt = gsc_uc_to_gt(gsc);
@@ -226,10 +453,24 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
        if (err)
                goto fail;
 
+       err = gsc_fw_query_compatibility_version(gsc);
+       if (err)
+               goto fail;
+
+       /* we only support compatibility version 1.0 at the moment */
+       err = intel_uc_check_file_version(gsc_fw, NULL);
+       if (err)
+               goto fail;
+
        /* FW is not fully operational until we enable SW proxy */
        intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
 
-       gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
+       gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
+               gsc_fw->file_selected.path,
+               gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
+               gsc->release.major, gsc->release.minor,
+               gsc->release.patch, gsc->release.build,
+               gsc->security_version);
 
        return 0;
 
index fff8928218df8f3ed6614a524db11b4fa2c7dee7..bc9dd0de8aaf5ff8440f86470fd52c7620f6f58a 100644 (file)
@@ -9,10 +9,13 @@
 #include <linux/types.h>
 
 struct intel_gsc_uc;
+struct intel_uc_fw;
 struct intel_uncore;
 
+int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size);
 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
-bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc);
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref);
+int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc);
 
 #endif
index c659cc01f32f643302d0284593795358b220ea3c..0d3b22a7436595eb4886024d8616697548ac5d3d 100644 (file)
@@ -7,10 +7,11 @@
 
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_print.h"
-#include "intel_gsc_uc.h"
 #include "intel_gsc_fw.h"
-#include "i915_drv.h"
 #include "intel_gsc_proxy.h"
+#include "intel_gsc_uc.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
 
 static void gsc_work(struct work_struct *work)
 {
@@ -61,8 +62,18 @@ static void gsc_work(struct work_struct *work)
                }
 
                ret = intel_gsc_proxy_request_handler(gsc);
-               if (ret)
+               if (ret) {
+                       if (actions & GSC_ACTION_FW_LOAD) {
+                               /*
+                                * A proxy failure right after firmware load means the proxy-init
+                                * step has failed so mark GSC as not usable after this
+                                */
+                               drm_err(&gt->i915->drm,
+                                       "GSC proxy handler failed to init\n");
+                               intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+                       }
                        goto out_put;
+               }
 
                /* mark the GSC FW init as done the first time we run this */
                if (actions & GSC_ACTION_FW_LOAD) {
@@ -71,12 +82,13 @@ static void gsc_work(struct work_struct *work)
                         * complete the request handling cleanly, so we need to check the
                         * status register to check if the proxy init was actually successful
                         */
-                       if (intel_gsc_uc_fw_proxy_init_done(gsc)) {
+                       if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
                                drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n");
                                intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
                        } else {
                                drm_err(&gt->i915->drm,
                                        "GSC status reports proxy init not complete\n");
+                               intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
                        }
                }
        }
@@ -98,7 +110,7 @@ static bool gsc_engine_supported(struct intel_gt *gt)
        GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
 
        if (gt_is_root(gt))
-               mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+               mask = INTEL_INFO(gt->i915)->platform_engine_mask;
        else
                mask = gt->info.engine_mask;
 
@@ -133,26 +145,85 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
        }
 }
 
+static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size)
+{
+       struct intel_gt *gt = gsc_uc_to_gt(gsc);
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       void __iomem *vaddr;
+       int ret = 0;
+
+       /*
+        * The GSC FW doesn't immediately suspend after becoming idle, so there
+        * is a chance that it could still be awake after we successfully
+        * return from the  pci suspend function, even if there are no pending
+        * operations.
+        * The FW might therefore try to access memory for its suspend operation
+        * after the kernel has completed the HW suspend flow; this can cause
+        * issues if the FW is mapped in normal RAM memory, as some of the
+        * involved HW units might've already lost power.
+        * The driver must therefore avoid this situation and the recommended
+        * way to do so is to use stolen memory for the GSC memory allocation,
+        * because stolen memory takes a different path in HW and it is
+        * guaranteed to always work as long as the GPU itself is awake (which
+        * it must be if the GSC is awake).
+        */
+       obj = i915_gem_object_create_stolen(gt->i915, size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err;
+       }
+
+       vaddr = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
+               goto err;
+       }
+
+       i915_vma_make_unshrinkable(vma);
+
+       gsc->local = vma;
+       gsc->local_vaddr = vaddr;
+
+       return 0;
+
+err:
+       i915_gem_object_put(obj);
+       return ret;
+}
+
+static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc)
+{
+       struct i915_vma *vma = fetch_and_zero(&gsc->local);
+
+       if (!vma)
+               return;
+
+       gsc->local_vaddr = NULL;
+       i915_vma_unpin_iomap(vma);
+       i915_gem_object_put(vma->obj);
+}
+
 int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
 {
        static struct lock_class_key gsc_lock;
        struct intel_gt *gt = gsc_uc_to_gt(gsc);
        struct intel_engine_cs *engine = gt->engine[GSC0];
        struct intel_context *ce;
-       struct i915_vma *vma;
        int err;
 
        err = intel_uc_fw_init(&gsc->fw);
        if (err)
                goto out;
 
-       vma = intel_guc_allocate_vma(&gt->uc.guc, SZ_8M);
-       if (IS_ERR(vma)) {
-               err = PTR_ERR(vma);
+       err = gsc_allocate_and_map_vma(gsc, SZ_4M);
+       if (err)
                goto out_fw;
-       }
-
-       gsc->local = vma;
 
        ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
                                                I915_GEM_HWS_GSC_ADDR,
@@ -173,7 +244,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
        return 0;
 
 out_vma:
-       i915_vma_unpin_and_release(&gsc->local, 0);
+       gsc_unmap_and_free_vma(gsc);
 out_fw:
        intel_uc_fw_fini(&gsc->fw);
 out:
@@ -197,7 +268,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
        if (gsc->ce)
                intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
 
-       i915_vma_unpin_and_release(&gsc->local, 0);
+       gsc_unmap_and_free_vma(gsc);
 
        intel_uc_fw_fini(&gsc->fw);
 }
@@ -245,3 +316,45 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
 
        queue_work(gsc->wq, &gsc->work);
 }
+
+void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p)
+{
+       struct intel_gt *gt = gsc_uc_to_gt(gsc);
+       struct intel_uncore *uncore = gt->uncore;
+       intel_wakeref_t wakeref;
+
+       if (!intel_gsc_uc_is_supported(gsc)) {
+               drm_printf(p, "GSC not supported\n");
+               return;
+       }
+
+       if (!intel_gsc_uc_is_wanted(gsc)) {
+               drm_printf(p, "GSC disabled\n");
+               return;
+       }
+
+       drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path);
+       if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path)
+               drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path);
+       drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status));
+
+       drm_printf(p, "Release: %u.%u.%u.%u\n",
+                  gsc->release.major, gsc->release.minor,
+                  gsc->release.patch, gsc->release.build);
+
+       drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n",
+                  gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor,
+                  gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor);
+
+       drm_printf(p, "SVN: %u\n", gsc->security_version);
+
+       with_intel_runtime_pm(uncore->rpm, wakeref) {
+               u32 i;
+
+               for (i = 1; i <= 6; i++) {
+                       u32 status = intel_uncore_read(uncore,
+                                                      HECI_FWSTS(MTL_GSC_HECI1_BASE, i));
+                       drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status);
+               }
+       }
+}
index a2a0813b8a76d4b6568edc9662b97a09b399bae8..c8082cf200fc708bf0c3248953ddafe48d337975 100644 (file)
@@ -8,6 +8,7 @@
 
 #include "intel_uc_fw.h"
 
+struct drm_printer;
 struct i915_vma;
 struct intel_context;
 struct i915_gsc_proxy_component;
@@ -17,7 +18,26 @@ struct intel_gsc_uc {
        struct intel_uc_fw fw;
 
        /* GSC-specific additions */
+
+       /*
+        * The GSC has 3 version numbers:
+        * - Release version (incremented with each build)
+        * - Security version (incremented on security fix)
+        * - Compatibility version (incremented on interface change)
+        *
+        * The one we care about to use the binary is the last one, so that's
+        * the one we save inside the intel_uc_fw structure. The other two
+        * versions are only used for debug/info purposes, so we save them here.
+        *
+        * Note that the release and security versions are available in the
+        * binary header, while the compatibility version must be queried after
+        * loading the binary.
+        */
+       struct intel_uc_fw_ver release;
+       u32 security_version;
+
        struct i915_vma *local; /* private memory for GSC usage */
+       void __iomem *local_vaddr; /* pointer to access the private memory */
        struct intel_context *ce; /* for submission to GSC FW via GSC engine */
 
        /* for delayed load and proxy handling */
@@ -44,6 +64,7 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
 void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
 void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
 void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p);
 
 static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
 {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c
new file mode 100644 (file)
index 0000000..5baacd8
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_debugfs.h"
+#include "gt/intel_gt_print.h"
+#include "intel_gsc_uc.h"
+#include "intel_gsc_uc_debugfs.h"
+#include "i915_drv.h"
+
+static int gsc_info_show(struct seq_file *m, void *data)
+{
+       struct drm_printer p = drm_seq_file_printer(m);
+       struct intel_gsc_uc *gsc = m->private;
+
+       if (!intel_gsc_uc_is_supported(gsc))
+               return -ENODEV;
+
+       intel_gsc_uc_load_status(gsc, &p);
+
+       return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(gsc_info);
+
+void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc_uc, struct dentry *root)
+{
+       static const struct intel_gt_debugfs_file files[] = {
+               { "gsc_info", &gsc_info_fops, NULL },
+       };
+
+       if (!intel_gsc_uc_is_supported(gsc_uc))
+               return;
+
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gsc_uc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h
new file mode 100644 (file)
index 0000000..3415ad3
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GSC_UC_H
+#define DEBUGFS_GSC_UC_H
+
+struct intel_gsc_uc;
+struct dentry;
+
+void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc, struct dentry *root);
+
+#endif /* DEBUGFS_GSC_UC_H */
index ef70e304904a391b0b2ba7161edc985a94d75af0..09d3fbdad05a537e99410378e03052827d11c3cc 100644 (file)
@@ -17,6 +17,7 @@ struct intel_gsc_mtl_header {
 #define GSC_HECI_VALIDITY_MARKER 0xA578875A
 
        u8 heci_client_id;
+#define HECI_MEADDRESS_MKHI 7
 #define HECI_MEADDRESS_PROXY 10
 #define HECI_MEADDRESS_PXP 17
 #define HECI_MEADDRESS_HDCP 18
index 2eb891b270aecaca84ee84a917f97e09e26f8e41..569b5fe94c416f238ff072cd113f5d1225724bec 100644 (file)
@@ -745,10 +745,11 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
                return ERR_CAST(obj);
 
        /*
-        * Wa_22016122933: For MTL the shared memory needs to be mapped
-        * as WC on CPU side and UC (PAT index 2) on GPU side
+        * Wa_22016122933: For Media version 13.0, all Media GT shared
+        * memory needs to be mapped as WC on CPU side and UC (PAT
+        * index 2) on GPU side.
         */
-       if (IS_METEORLAKE(gt->i915))
+       if (intel_gt_needs_wa_22016122933(gt))
                i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
 
        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
@@ -792,8 +793,8 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
                return PTR_ERR(vma);
 
        vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
-                                                i915_coherent_map_type(guc_to_gt(guc)->i915,
-                                                                       vma->obj, true));
+                                                intel_gt_coherent_map_type(guc_to_gt(guc),
+                                                                           vma->obj, true));
        if (IS_ERR(vaddr)) {
                i915_vma_unpin_and_release(&vma, 0);
                return PTR_ERR(vaddr);
index f28a3a83742dc45073c5207003e7ead9629de5d3..97eadd08181d614a9f62d462c0c37ee2d5df411e 100644 (file)
@@ -960,10 +960,6 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
        /* now update descriptor */
        WRITE_ONCE(desc->head, head);
 
-       /*
-        * Wa_22016122933: Making sure the head update is
-        * visible to GuC right away
-        */
        intel_guc_write_barrier(ct_to_guc(ct));
 
        return available - len;
index 364d0d546ec82bd73f7671d0ca9f2c6598f102f2..0f79cb658518268efec6e0244307f0698dad4ed3 100644 (file)
@@ -251,9 +251,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
                if (ret == 0)
                        ret = -ENXIO;
        } else if (delta_ms > 200) {
-               guc_warn(guc, "excessive init time: %lldms! [freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d]\n",
-                        delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
-                        before_freq, status, count, ret);
+               guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
+                        delta_ms, status, count, ret);
+               guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+                        intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
+                        intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
        } else {
                guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
                        delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
index 852bea0208ce445ebf0f570cb36f8c5be757d033..cc9569af7f0cd71c809b4bbb1546d6e0bcabd797 100644 (file)
@@ -94,7 +94,7 @@ static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig
 
 static bool has_table(struct drm_i915_private *i915)
 {
-       if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
+       if (IS_ALDERLAKE_P(i915) && !IS_ALDERLAKE_P_N(i915))
                return true;
        if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
                return true;
index ee9f83af7cf68863fb1369367c853b5b832a43c5..477df260ae3ac07949b946c3de40aa6863ae29c3 100644 (file)
@@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
        ret = slpc_set_param(slpc,
                             SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
                             val);
-       if (ret)
+       if (ret) {
                guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
                                val, ERR_PTR(ret));
-       else
+       } else {
                slpc->ignore_eff_freq = val;
 
+               /* Set min to RPn when we disable efficient freq */
+               if (val)
+                       ret = slpc_set_param(slpc,
+                                            SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+                                            slpc->min_freq);
+       }
+
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
        mutex_unlock(&slpc->lock);
        return ret;
@@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
                return ret;
 
        if (!slpc->min_freq_softlimit) {
-               ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
-               if (unlikely(ret))
-                       return ret;
+               /* Min softlimit is initialized to RPn */
+               slpc->min_freq_softlimit = slpc->min_freq;
                slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
        } else {
                return intel_guc_slpc_set_min_freq(slpc,
@@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
                return ret;
        }
 
+       /* Set cached value of ignore efficient freq */
+       intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+
        /* Revert SLPC min/max to softlimits if necessary */
        ret = slpc_set_softlimits(slpc);
        if (unlikely(ret)) {
@@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
        /* Set cached media freq ratio mode */
        intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
 
-       /* Set cached value of ignore efficient freq */
-       intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
-
        return 0;
 }
 
index ddd146265beb42b399b2c9b49f459fbebddc9ee8..ba9e07fc2b57706d658ddc46eb46dd706842e310 100644 (file)
@@ -26,6 +26,7 @@
  * The kernel driver is only responsible for loading the HuC firmware and
  * triggering its security authentication. This is done differently depending
  * on the platform:
+ *
  * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
  *   and the authentication via GuC
  * - DG2: load and authentication are both performed via GSC.
@@ -33,6 +34,7 @@
  *   not-DG2 older platforms), while the authentication is done in 2-steps,
  *   a first auth for clear-media workloads via GuC and a second one for all
  *   workloads via GSC.
+ *
  * On platforms where the GuC does the authentication, to correctly do so the
  * HuC binary must be loaded before the GuC one.
  * Loading the HuC is optional; however, not using the HuC might negatively
@@ -265,7 +267,7 @@ static bool vcs_supported(struct intel_gt *gt)
        GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
 
        if (gt_is_root(gt))
-               mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+               mask = INTEL_INFO(gt->i915)->platform_engine_mask;
        else
                mask = gt->info.engine_mask;
 
@@ -308,9 +310,9 @@ void intel_huc_init_early(struct intel_huc *huc)
                huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
                huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
        } else {
-               huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE);
-               huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE;
-               huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE;
+               huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
+               huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
+               huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
        }
 }
 
index e608152fecfcc55ae285c22344ec7d0557d1be3c..b648238cc6757e67a99722a030a10c9112d267af 100644 (file)
@@ -27,7 +27,6 @@ struct mtl_huc_auth_msg_out {
 int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
 {
        struct intel_gt *gt = huc_to_gt(huc);
-       struct drm_i915_private *i915 = gt->i915;
        struct drm_i915_gem_object *obj;
        struct mtl_huc_auth_msg_in *msg_in;
        struct mtl_huc_auth_msg_out *msg_out;
@@ -43,7 +42,7 @@ int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
        pkt_offset = i915_ggtt_offset(huc->heci_pkt);
 
        pkt_vaddr = i915_gem_object_pin_map_unlocked(obj,
-                                                    i915_coherent_map_type(i915, obj, true));
+                                                    intel_gt_coherent_map_type(gt, obj, true));
        if (IS_ERR(pkt_vaddr))
                return PTR_ERR(pkt_vaddr);
 
@@ -107,15 +106,6 @@ out_unpin:
        return err;
 }
 
-static void get_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, const void *data)
-{
-       const struct intel_gsc_manifest_header *manifest = data;
-
-       ver->major = manifest->fw_version.major;
-       ver->minor = manifest->fw_version.minor;
-       ver->patch = manifest->fw_version.hotfix;
-}
-
 static bool css_valid(const void *data, size_t size)
 {
        const struct uc_css_header *css = data;
@@ -227,8 +217,8 @@ int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, s
 
        for (i = 0; i < header->num_of_entries; i++, entry++) {
                if (strcmp(entry->name, "HUCP.man") == 0)
-                       get_version_from_gsc_manifest(&huc_fw->file_selected.ver,
-                                                     data + entry_offset(entry));
+                       intel_uc_fw_version_from_gsc_manifest(&huc_fw->file_selected.ver,
+                                                             data + entry_offset(entry));
 
                if (strcmp(entry->name, "huc_fw") == 0) {
                        u32 offset = entry_offset(entry);
index 18250fb64bd881f09f0c1db0ff5a49cc9344f7c0..98b103375b7ab0bdccd6fe85e6d2a3a709121a7c 100644 (file)
@@ -43,7 +43,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
        }
 
        /* Intermediate platforms are HuC authentication only */
-       if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
+       if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) {
                i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
                return;
        }
index 2f93cc4e408a87a1807324cf435c779a5ffc0ca9..6d541c866edb9ee72f462cb7072268edbf718882 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "gt/intel_gt_debugfs.h"
 #include "intel_guc_debugfs.h"
+#include "intel_gsc_uc_debugfs.h"
 #include "intel_huc_debugfs.h"
 #include "intel_uc.h"
 #include "intel_uc_debugfs.h"
@@ -58,6 +59,7 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
 
        intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
 
+       intel_gsc_uc_debugfs_register(&uc->gsc, root);
        intel_guc_debugfs_register(&uc->guc, root);
        intel_huc_debugfs_register(&uc->huc, root);
 }
index 944725e624149695569e33dd97a3312e698573a7..8be005de1d28e020e8ed50c84c9ef4e476ac3213 100644 (file)
 #include <drm/drm_print.h>
 
 #include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_print.h"
+#include "intel_gsc_binary_headers.h"
+#include "intel_gsc_fw.h"
 #include "intel_uc_fw.h"
 #include "intel_uc_fw_abi.h"
 #include "i915_drv.h"
@@ -277,7 +280,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
         * ADL-S, otherwise the GuC might attempt to fetch a config table that
         * does not exist.
         */
-       if (IS_ADLP_N(i915))
+       if (IS_ALDERLAKE_P_N(i915))
                p = INTEL_ALDERLAKE_S;
 
        GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
@@ -468,6 +471,17 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
        }
 }
 
+void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
+                                          const void *data)
+{
+       const struct intel_gsc_manifest_header *manifest = data;
+
+       ver->major = manifest->fw_version.major;
+       ver->minor = manifest->fw_version.minor;
+       ver->patch = manifest->fw_version.hotfix;
+       ver->build = manifest->fw_version.build;
+}
+
 /**
  * intel_uc_fw_init_early - initialize the uC object and select the firmware
  * @uc_fw: uC firmware
@@ -668,13 +682,18 @@ static int check_gsc_manifest(struct intel_gt *gt,
                              const struct firmware *fw,
                              struct intel_uc_fw *uc_fw)
 {
-       if (uc_fw->type != INTEL_UC_FW_TYPE_HUC) {
-               gt_err(gt, "trying to GSC-parse a non-HuC binary");
+       switch (uc_fw->type) {
+       case INTEL_UC_FW_TYPE_HUC:
+               intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+               break;
+       case INTEL_UC_FW_TYPE_GSC:
+               intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+               break;
+       default:
+               MISSING_CASE(uc_fw->type);
                return -EINVAL;
        }
 
-       intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
-
        if (uc_fw->dma_start_offset) {
                u32 delta = uc_fw->dma_start_offset;
 
@@ -734,10 +753,6 @@ static int check_fw_header(struct intel_gt *gt,
 {
        int err = 0;
 
-       /* GSC FW version is queried after the FW is loaded */
-       if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
-               return 0;
-
        if (uc_fw->has_gsc_headers)
                err = check_gsc_manifest(gt, fw, uc_fw);
        else
@@ -773,6 +788,80 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
        return 0;
 }
 
+static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
+                                          struct intel_uc_fw_file *huc_selected)
+{
+       struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+       struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
+       struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
+       bool new_huc, new_guc;
+
+       /* we can only do this check after having fetched both GuC and HuC */
+       GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
+
+       /*
+        * Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
+        * requires GuC 70.7.0 or newer. Older HuC binaries will instead require
+        * GuC < 70.7.0.
+        */
+       new_huc = huc_ver->major > 8 ||
+                 (huc_ver->major == 8 && huc_ver->minor > 5) ||
+                 (huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
+
+       new_guc = guc_ver->major > 70 ||
+                 (guc_ver->major == 70 && guc_ver->minor >= 7);
+
+       if (new_huc != new_guc) {
+               UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
+                          huc_ver->major, huc_ver->minor, huc_ver->patch,
+                          guc_ver->major, guc_ver->minor, guc_ver->patch);
+               gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
+               return -ENOEXEC;
+       }
+
+       return 0;
+}
+
+int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
+{
+       struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+       struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
+       struct intel_uc_fw_file *selected = &uc_fw->file_selected;
+       int ret;
+
+       /*
+        * MTL has some compatibility issues with early GuC/HuC binaries
+        * not working with newer ones. This is specific to MTL and we
+        * don't expect it to extend to other platforms.
+        */
+       if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
+               ret = check_mtl_huc_guc_compatibility(gt, selected);
+               if (ret)
+                       return ret;
+       }
+
+       if (!wanted->ver.major || !selected->ver.major)
+               return 0;
+
+       /* Check the file's major version was as it claimed */
+       if (selected->ver.major != wanted->ver.major) {
+               UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+                          intel_uc_fw_type_repr(uc_fw->type), selected->path,
+                          selected->ver.major, selected->ver.minor,
+                          wanted->ver.major, wanted->ver.minor);
+               if (!intel_uc_fw_is_overridden(uc_fw))
+                       return -ENOEXEC;
+       } else if (old_ver) {
+               if (selected->ver.minor < wanted->ver.minor)
+                       *old_ver = true;
+               else if ((selected->ver.minor == wanted->ver.minor) &&
+                        (selected->ver.patch < wanted->ver.patch))
+                       *old_ver = true;
+       }
+
+       return 0;
+}
+
 /**
  * intel_uc_fw_fetch - fetch uC firmware
  * @uc_fw: uC firmware
@@ -840,25 +929,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
                        goto fail;
        }
 
-       if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
-               /* Check the file's major version was as it claimed */
-               if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
-                       UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
-                                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
-                                  uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
-                                  uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
-                       if (!intel_uc_fw_is_overridden(uc_fw)) {
-                               err = -ENOEXEC;
-                               goto fail;
-                       }
-               } else {
-                       if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
-                               old_ver = true;
-                       else if ((uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
-                                (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
-                               old_ver = true;
-               }
-       }
+       err = intel_uc_check_file_version(uc_fw, &old_ver);
+       if (err)
+               goto fail;
 
        if (old_ver && uc_fw->file_selected.ver.major) {
                /* Preserve the version that was really wanted */
@@ -1125,7 +1198,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
                return PTR_ERR(vma);
 
        vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
-                                                i915_coherent_map_type(gt->i915, vma->obj, true));
+                                                intel_gt_coherent_map_type(gt, vma->obj, true));
        if (IS_ERR(vaddr)) {
                i915_vma_unpin_and_release(&vma, 0);
                err = PTR_ERR(vaddr);
index 054f0281197185049f0f3901fed203d8ac298887..9a431726c8d5b1abb5c00b58cbbde2a3764c866a 100644 (file)
@@ -70,6 +70,7 @@ struct intel_uc_fw_ver {
        u32 major;
        u32 minor;
        u32 patch;
+       u32 build;
 };
 
 /*
@@ -289,6 +290,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
        return __intel_uc_fw_get_upload_size(uc_fw);
 }
 
+void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
+                                          const void *data);
+int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver);
 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
                            enum intel_uc_fw_type type,
                            bool needs_ggtt_mapping);
index 1fd760539f77bc67835288c484eaf62412524056..bfb72143566f6e1b7591e8ad5aff8a18a99dc73b 100644 (file)
@@ -204,9 +204,9 @@ static int intel_guc_steal_guc_ids(void *arg)
                if (IS_ERR(rq)) {
                        ret = PTR_ERR(rq);
                        rq = NULL;
-                       if (ret != -EAGAIN) {
-                               guc_err(guc, "Failed to create request %d: %pe\n",
-                                       context_index, ERR_PTR(ret));
+                       if ((ret != -EAGAIN) || !last) {
+                               guc_err(guc, "Failed to create %srequest %d: %pe\n",
+                                       last ? "" : "first ", context_index, ERR_PTR(ret));
                                goto err_spin_rq;
                        }
                } else {
index f4055804aad1feb470422a5f808454359390e771..a5c8005ec484c32b3225a7ae6a18d94f0524fb14 100644 (file)
@@ -974,7 +974,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
        context_page_num = rq->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
 
-       if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
+       if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
                context_page_num = 19;
 
        context_base = (void *) ctx->lrc_reg_state -
index 8ef93889061a6367e3558137a816efa0e32599da..5ec293011d99029f211301d2789272603f7f3dde 100644 (file)
@@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
                }
        } while (unlikely(is_barrier(active)));
 
-       if (!__i915_active_fence_set(active, fence))
+       fence = __i915_active_fence_set(active, fence);
+       if (!fence)
                __i915_active_acquire(ref);
+       else
+               dma_fence_put(fence);
 
 out:
        i915_active_release(ref);
@@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
                return NULL;
        }
 
-       rcu_read_lock();
        prev = __i915_active_fence_set(active, fence);
-       if (prev)
-               prev = dma_fence_get_rcu(prev);
-       else
+       if (!prev)
                __i915_active_acquire(ref);
-       rcu_read_unlock();
 
        return prev;
 }
@@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
  *
  * Records the new @fence as the last active fence along its timeline in
  * this active tracker, moving the tracking callbacks from the previous
- * fence onto this one. Returns the previous fence (if not already completed),
- * which the caller must ensure is executed before the new fence. To ensure
- * that the order of fences within the timeline of the i915_active_fence is
- * understood, it should be locked by the caller.
+ * fence onto this one. Gets and returns a reference to the previous fence
+ * (if not already completed), which the caller must put after making sure
+ * that it is executed before the new fence. To ensure that the order of
+ * fences within the timeline of the i915_active_fence is understood, it
+ * should be locked by the caller.
  */
 struct dma_fence *
 __i915_active_fence_set(struct i915_active_fence *active,
@@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
        struct dma_fence *prev;
        unsigned long flags;
 
-       if (fence == rcu_access_pointer(active->fence))
+       /*
+        * In case of fences embedded in i915_requests, their memory is
+        * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
+        * by new requests.  Then, there is a risk of passing back a pointer
+        * to a new, completely unrelated fence that reuses the same memory
+        * while tracked under a different active tracker.  Combined with i915
+        * perf open/close operations that build await dependencies between
+        * engine kernel context requests and user requests from different
+        * timelines, this can lead to dependency loops and infinite waits.
+        *
+        * As a countermeasure, we try to get a reference to the active->fence
+        * first, so if we succeed and pass it back to our user then it is not
+        * released and potentially reused by an unrelated request before the
+        * user has a chance to set up an await dependency on it.
+        */
+       prev = i915_active_fence_get(active);
+       if (fence == prev)
                return fence;
 
        GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
         * Consider that we have two threads arriving (A and B), with
         * C already resident as the active->fence.
         *
-        * A does the xchg first, and so it sees C or NULL depending
-        * on the timing of the interrupt handler. If it is NULL, the
-        * previous fence must have been signaled and we know that
-        * we are first on the timeline. If it is still present,
-        * we acquire the lock on that fence and serialise with the interrupt
-        * handler, in the process removing it from any future interrupt
-        * callback. A will then wait on C before executing (if present).
-        *
-        * As B is second, it sees A as the previous fence and so waits for
-        * it to complete its transition and takes over the occupancy for
-        * itself -- remembering that it needs to wait on A before executing.
+        * Both A and B have got a reference to C or NULL, depending on the
+        * timing of the interrupt handler.  Let's assume that if A has got C
+        * then it has locked C first (before B).
         *
         * Note the strong ordering of the timeline also provides consistent
         * nesting rules for the fence->lock; the inner lock is always the
         * older lock.
         */
        spin_lock_irqsave(fence->lock, flags);
-       prev = xchg(__active_fence_slot(active), fence);
-       if (prev) {
-               GEM_BUG_ON(prev == fence);
+       if (prev)
                spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+
+       /*
+        * A does the cmpxchg first, and so it sees C or NULL, as before, or
+        * something else, depending on the timing of other threads and/or
+        * interrupt handler.  If not the same as before then A unlocks C if
+        * applicable and retries, starting from an attempt to get a new
+        * active->fence.  Meanwhile, B follows the same path as A.
+        * Once A succeeds with cmpxch, B fails again, retires, gets A from
+        * active->fence, locks it as soon as A completes, and possibly
+        * succeeds with cmpxchg.
+        */
+       while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
+               if (prev) {
+                       spin_unlock(prev->lock);
+                       dma_fence_put(prev);
+               }
+               spin_unlock_irqrestore(fence->lock, flags);
+
+               prev = i915_active_fence_get(active);
+               GEM_BUG_ON(prev == fence);
+
+               spin_lock_irqsave(fence->lock, flags);
+               if (prev)
+                       spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+       }
+
+       /*
+        * If prev is NULL then the previous fence must have been signaled
+        * and we know that we are first on the timeline.  If it is still
+        * present then, having the lock on that fence already acquired, we
+        * serialise with the interrupt handler, in the process of removing it
+        * from any future interrupt callback.  A will then wait on C before
+        * executing (if present).
+        *
+        * As B is second, it sees A as the previous fence and so waits for
+        * it to complete its transition and takes over the occupancy for
+        * itself -- remembering that it needs to wait on A before executing.
+        */
+       if (prev) {
                __list_del_entry(&active->cb.node);
                spin_unlock(prev->lock); /* serialise with prev->cb_list */
        }
@@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
        int err = 0;
 
        /* Must maintain timeline ordering wrt previous active requests */
-       rcu_read_lock();
        fence = __i915_active_fence_set(active, &rq->fence);
-       if (fence) /* but the previous fence may not belong to that timeline! */
-               fence = dma_fence_get_rcu(fence);
-       rcu_read_unlock();
        if (fence) {
                err = i915_request_await_dma_fence(rq, fence);
                dma_fence_put(fence);
index 76ccd4e03e3128f7ae968ba436f8aaec5e421e8c..4de44cf1026dcee0528771c71f6efd98e2d97345 100644 (file)
@@ -67,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
 
        intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
+       intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
        i915_print_iommu_status(i915, &p);
        intel_gt_info_print(&to_gt(i915)->info, &p);
        intel_driver_caps_print(&i915->caps, &p);
index 0ad0c5885ec27664373e120737696373e338cc0d..b870c0df081a0e1caca7833d6e21f671c8f20333 100644 (file)
@@ -175,7 +175,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 {
        bool pre = false;
 
-       pre |= IS_HSW_EARLY_SDV(dev_priv);
+       pre |= IS_HASWELL_EARLY_SDV(dev_priv);
        pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
        pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
        pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
@@ -711,6 +711,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 
                intel_device_info_print(INTEL_INFO(dev_priv),
                                        RUNTIME_INFO(dev_priv), &p);
+               intel_display_device_info_print(DISPLAY_INFO(dev_priv),
+                                               DISPLAY_RUNTIME_INFO(dev_priv), &p);
                i915_print_iommu_status(dev_priv, &p);
                for_each_gt(gt, dev_priv, i)
                        intel_gt_info_print(&gt->info, &p);
@@ -1818,8 +1820,6 @@ static const struct drm_driver i915_drm_driver = {
        .postclose = i915_driver_postclose,
        .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = i915_gem_prime_import,
 
        .dumb_create = i915_gem_dumb_create,
index b4cf6f0f636d6e5172b491cd07510766dd8f6919..7a8ce7239bc9e3b04074d991ec6678764acd20e2 100644 (file)
@@ -203,9 +203,8 @@ struct drm_i915_private {
        /* i915 device parameters */
        struct i915_params params;
 
-       const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+       const struct intel_device_info *__info; /* Use INTEL_INFO() to access. */
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
-       struct intel_display_runtime_info __display_runtime; /* Access with DISPLAY_RUNTIME_INFO() */
        struct intel_driver_caps caps;
 
        struct i915_dsm dsm;
@@ -324,7 +323,6 @@ struct drm_i915_private {
        /*
         * i915->gt[0] == &i915->gt0
         */
-#define I915_MAX_GT 2
        struct intel_gt *gt[I915_MAX_GT];
 
        struct kobject *sysfs_gt;
@@ -416,10 +414,10 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
             (engine__) && (engine__)->uabi_class == (class__); \
             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
 
-#define INTEL_INFO(i915)       (&(i915)->__info)
-#define DISPLAY_INFO(i915)     (INTEL_INFO(i915)->display)
+#define INTEL_INFO(i915)       ((i915)->__info)
 #define RUNTIME_INFO(i915)     (&(i915)->__runtime)
-#define DISPLAY_RUNTIME_INFO(i915)     (&(i915)->__display_runtime)
+#define DISPLAY_INFO(i915)     ((i915)->display.info.__device_info)
+#define DISPLAY_RUNTIME_INFO(i915)     (&(i915)->display.info.__runtime_info)
 #define DRIVER_CAPS(i915)      (&(i915)->caps)
 
 #define INTEL_DEVID(i915)      (RUNTIME_INFO(i915)->device_id)
@@ -563,8 +561,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_COFFEELAKE(i915)    IS_PLATFORM(i915, INTEL_COFFEELAKE)
 #define IS_COMETLAKE(i915)     IS_PLATFORM(i915, INTEL_COMETLAKE)
 #define IS_ICELAKE(i915)       IS_PLATFORM(i915, INTEL_ICELAKE)
-#define IS_JSL_EHL(i915)       (IS_PLATFORM(i915, INTEL_JASPERLAKE) || \
-                               IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+#define IS_JASPERLAKE(i915)    IS_PLATFORM(i915, INTEL_JASPERLAKE)
+#define IS_ELKHARTLAKE(i915)   IS_PLATFORM(i915, INTEL_ELKHARTLAKE)
 #define IS_TIGERLAKE(i915)     IS_PLATFORM(i915, INTEL_TIGERLAKE)
 #define IS_ROCKETLAKE(i915)    IS_PLATFORM(i915, INTEL_ROCKETLAKE)
 #define IS_DG1(i915)        IS_PLATFORM(i915, INTEL_DG1)
@@ -585,105 +583,77 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
        IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
 #define IS_DG2_G12(i915) \
        IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
-#define IS_ADLS_RPLS(i915) \
+#define IS_RAPTORLAKE_S(i915) \
        IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_N(i915) \
+#define IS_ALDERLAKE_P_N(i915) \
        IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
-#define IS_ADLP_RPLP(i915) \
+#define IS_RAPTORLAKE_P(i915) \
        IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_RPLU(i915) \
+#define IS_RAPTORLAKE_U(i915) \
        IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
-#define IS_HSW_EARLY_SDV(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_EARLY_SDV(i915) (IS_HASWELL(i915) && \
                                    (INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(i915) \
+#define IS_BROADWELL_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_BDW_ULX(i915) \
+#define IS_BROADWELL_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BDW_GT3(i915)       (IS_BROADWELL(i915) && \
+#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \
                                 INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_ULT(i915) \
+#define IS_HASWELL_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HSW_GT3(i915)       (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT3(i915)   (IS_HASWELL(i915) && \
                                 INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_GT1(i915)       (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT1(i915)   (IS_HASWELL(i915) && \
                                 INTEL_INFO(i915)->gt == 1)
 /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(i915) \
+#define IS_HASWELL_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_ULT(i915) \
+#define IS_SKYLAKE_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_SKL_ULX(i915) \
+#define IS_SKYLAKE_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_KBL_ULT(i915) \
+#define IS_KABYLAKE_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_KBL_ULX(i915) \
+#define IS_KABYLAKE_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT2(i915)       (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT2(i915)   (IS_SKYLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 2)
-#define IS_SKL_GT3(i915)       (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT3(i915)   (IS_SKYLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 3)
-#define IS_SKL_GT4(i915)       (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT4(i915)   (IS_SKYLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 4)
-#define IS_KBL_GT2(i915)       (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT2(i915)  (IS_KABYLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 2)
-#define IS_KBL_GT3(i915)       (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT3(i915)  (IS_KABYLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 3)
-#define IS_CFL_ULT(i915) \
+#define IS_COFFEELAKE_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CFL_ULX(i915) \
+#define IS_COFFEELAKE_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CFL_GT2(i915)       (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT2(i915)        (IS_COFFEELAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 2)
-#define IS_CFL_GT3(i915)       (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT3(i915)        (IS_COFFEELAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 3)
 
-#define IS_CML_ULT(i915) \
+#define IS_COMETLAKE_ULT(i915) \
        IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CML_ULX(i915) \
+#define IS_COMETLAKE_ULX(i915) \
        IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CML_GT2(i915)       (IS_COMETLAKE(i915) && \
+#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \
                                 INTEL_INFO(i915)->gt == 2)
 
 #define IS_ICL_WITH_PORT_F(i915) \
        IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
 
-#define IS_TGL_UY(i915) \
+#define IS_TIGERLAKE_UY(i915) \
        IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
 
-#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
 
-#define IS_KBL_GRAPHICS_STEP(i915, since, until) \
-       (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, since, until))
-#define IS_KBL_DISPLAY_STEP(i915, since, until) \
-       (IS_KABYLAKE(i915) && IS_DISPLAY_STEP(i915, since, until))
 
-#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
-       (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
-       (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
 
-#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
-       (IS_TIGERLAKE(__i915) && \
-        IS_DISPLAY_STEP(__i915, since, until))
-
-#define IS_RKL_DISPLAY_STEP(p, since, until) \
-       (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
 
-#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
-       (IS_ALDERLAKE_S(__i915) && \
-        IS_DISPLAY_STEP(__i915, since, until))
 
-#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
-       (IS_ALDERLAKE_S(__i915) && \
-        IS_GRAPHICS_STEP(__i915, since, until))
 
-#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
-       (IS_ALDERLAKE_P(__i915) && \
-        IS_DISPLAY_STEP(__i915, since, until))
-
-#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
-       (IS_ALDERLAKE_P(__i915) && \
-        IS_GRAPHICS_STEP(__i915, since, until))
 
 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
        (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
@@ -801,7 +771,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 /* WaRsDisableCoarsePowerGating:skl,cnl */
 #define NEEDS_WaRsDisableCoarsePowerGating(i915)                       \
-       (IS_SKL_GT3(i915) || IS_SKL_GT4(i915))
+       (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915))
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -839,7 +809,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
  */
 #define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages)
 
-#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 #define HAS_EXTRA_GT_LIST(i915)   (INTEL_INFO(i915)->extra_gt_list)
@@ -862,7 +832,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 /* DPF == dynamic parity feature */
 #define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
-#define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \
+#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \
                                 2 : HAS_L3_DPF(i915))
 
 /* Only valid when HAS_DISPLAY() is true */
index 0c38bfb60c9a86e275f4c7eeccfe5ad6b1a18328..4008bb09fdb5d85ab53867ee5a0413efb236448e 100644 (file)
@@ -649,6 +649,8 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
        struct drm_printer p = i915_error_printer(m);
 
        intel_device_info_print(&error->device_info, &error->runtime_info, &p);
+       intel_display_device_info_print(&error->display_device_info,
+                                       &error->display_runtime_info, &p);
        intel_driver_caps_print(&error->driver_caps, &p);
 }
 
@@ -1173,9 +1175,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
 
                        drm_clflush_pages(&page, 1);
 
-                       s = kmap(page);
+                       s = kmap_local_page(page);
                        ret = compress_page(compress, s, dst, false);
-                       kunmap(page);
+                       kunmap_local(s);
 
                        drm_clflush_pages(&page, 1);
 
@@ -1983,6 +1985,10 @@ static void capture_gen(struct i915_gpu_coredump *error)
        memcpy(&error->runtime_info,
               RUNTIME_INFO(i915),
               sizeof(error->runtime_info));
+       memcpy(&error->display_device_info, DISPLAY_INFO(i915),
+              sizeof(error->display_device_info));
+       memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
+              sizeof(error->display_runtime_info));
        error->driver_caps = i915->caps;
 }
 
index a78c061ce26fb0067b0c51c2658670d7eacb91c6..9f5971f5e980145d940bb5c59701471071d393cd 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <drm/drm_mm.h>
 
+#include "display/intel_display_device.h"
 #include "gt/intel_engine.h"
 #include "gt/intel_gt_types.h"
 #include "gt/uc/intel_uc_fw.h"
@@ -209,6 +210,8 @@ struct i915_gpu_coredump {
 
        struct intel_device_info device_info;
        struct intel_runtime_info runtime_info;
+       struct intel_display_device_info display_device_info;
+       struct intel_display_runtime_info display_runtime_info;
        struct intel_driver_caps driver_caps;
        struct i915_params params;
 
index 82fbabcdd7a594f97d4c8a3526ca5b67979610cf..1bfcfbe6e30b8edfde1d8022ddff2c9712bc33a6 100644 (file)
@@ -423,7 +423,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 static irqreturn_t ilk_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       void __iomem * const regs = i915->uncore.regs;
+       void __iomem * const regs = intel_uncore_regs(&i915->uncore);
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
@@ -511,7 +511,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private *dev_priv = arg;
-       void __iomem * const regs = dev_priv->uncore.regs;
+       void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
        u32 master_ctl;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -561,7 +561,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       void __iomem * const regs = i915->uncore.regs;
+       void __iomem * const regs = intel_uncore_regs(&i915->uncore);
        struct intel_gt *gt = to_gt(i915);
        u32 master_ctl;
        u32 gu_misc_iir;
@@ -619,7 +619,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private * const i915 = arg;
        struct intel_gt *gt = to_gt(i915);
-       void __iomem * const regs = gt->uncore->regs;
+       void __iomem * const regs = intel_uncore_regs(gt->uncore);
        u32 master_tile_ctl, master_ctl;
        u32 gu_misc_iir;
 
@@ -711,7 +711,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
 
-       gen8_master_intr_disable(uncore->regs);
+       gen8_master_intr_disable(intel_uncore_regs(uncore));
 
        gen8_gt_irq_reset(to_gt(dev_priv));
        gen8_display_irq_reset(dev_priv);
@@ -727,7 +727,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
        struct intel_gt *gt = to_gt(dev_priv);
        struct intel_uncore *uncore = gt->uncore;
 
-       gen11_master_intr_disable(dev_priv->uncore.regs);
+       gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
 
        gen11_gt_irq_reset(gt);
        gen11_display_irq_reset(dev_priv);
@@ -742,7 +742,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
        struct intel_gt *gt;
        unsigned int i;
 
-       dg1_master_intr_disable(dev_priv->uncore.regs);
+       dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
 
        for_each_gt(gt, dev_priv, i)
                gen11_gt_irq_reset(gt);
@@ -772,45 +772,9 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
 
 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       struct intel_uncore *uncore = &dev_priv->uncore;
-       u32 display_mask, extra_mask;
-
-       if (GRAPHICS_VER(dev_priv) >= 7) {
-               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
-                               DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
-               extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
-                             DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
-                             DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
-                             DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
-                             DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
-                             DE_DP_A_HOTPLUG_IVB);
-       } else {
-               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                               DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
-                               DE_PIPEA_CRC_DONE | DE_POISON);
-               extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
-                             DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
-                             DE_PLANE_FLIP_DONE(PLANE_A) |
-                             DE_PLANE_FLIP_DONE(PLANE_B) |
-                             DE_DP_A_HOTPLUG);
-       }
-
-       if (IS_HASWELL(dev_priv)) {
-               gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
-               display_mask |= DE_EDP_PSR_INT_HSW;
-       }
-
-       if (IS_IRONLAKE_M(dev_priv))
-               extra_mask |= DE_PCU_EVENT;
-
-       dev_priv->irq_mask = ~display_mask;
-
-       ibx_irq_postinstall(dev_priv);
-
        gen5_gt_irq_postinstall(to_gt(dev_priv));
 
-       GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
-                     display_mask | extra_mask);
+       ilk_de_irq_postinstall(dev_priv);
 }
 
 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -828,15 +792,10 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-               icp_irq_postinstall(dev_priv);
-       else if (HAS_PCH_SPLIT(dev_priv))
-               ibx_irq_postinstall(dev_priv);
-
        gen8_gt_irq_postinstall(to_gt(dev_priv));
        gen8_de_irq_postinstall(dev_priv);
 
-       gen8_master_intr_enable(dev_priv->uncore.regs);
+       gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
 }
 
 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -845,15 +804,12 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
        struct intel_uncore *uncore = gt->uncore;
        u32 gu_misc_masked = GEN11_GU_MISC_GSE;
 
-       if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-               icp_irq_postinstall(dev_priv);
-
        gen11_gt_irq_postinstall(gt);
        gen11_de_irq_postinstall(dev_priv);
 
        GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
 
-       gen11_master_intr_enable(uncore->regs);
+       gen11_master_intr_enable(intel_uncore_regs(uncore));
        intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
 }
 
@@ -869,18 +825,9 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
 
        GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
 
-       if (HAS_DISPLAY(dev_priv)) {
-               if (DISPLAY_VER(dev_priv) >= 14)
-                       mtp_irq_postinstall(dev_priv);
-               else
-                       icp_irq_postinstall(dev_priv);
+       dg1_de_irq_postinstall(dev_priv);
 
-               gen8_de_irq_postinstall(dev_priv);
-               intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
-                                  GEN11_DISPLAY_IRQ_ENABLE);
-       }
-
-       dg1_master_intr_enable(uncore->regs);
+       dg1_master_intr_enable(intel_uncore_regs(uncore));
        intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
 }
 
@@ -1343,23 +1290,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
        if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
                to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
-
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-
-       dev_priv->drm.vblank_disable_immediate = true;
-
-       /* Most platforms treat the display irq block as an always-on
-        * power domain. vlv/chv can disable it at runtime and need
-        * special care to avoid writing any of the display block registers
-        * outside of the power domain. We defer setting up the display irqs
-        * in this case to the runtime pm.
-        */
-       dev_priv->display_irqs_enabled = true;
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               dev_priv->display_irqs_enabled = false;
-
-       intel_hotplug_irq_init(dev_priv);
 }
 
 /**
index 3d7a5db9833b731e02e17d37dec97e07e0e11a99..fcacdc21643cfa469e3f9d599918f75f07d6ba54 100644 (file)
@@ -38,9 +38,6 @@
 #include "i915_reg.h"
 #include "intel_pci_config.h"
 
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
-
 #define PLATFORM(x) .platform = (x)
 #define GEN(x) \
        .__runtime.graphics.ip.ver = (x), \
@@ -84,7 +81,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
        .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
 
 #define GEN_DEFAULT_REGIONS \
-       .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
+       .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
 
 #define I830_FEATURES \
        GEN(2), \
@@ -93,7 +90,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
        .has_3d_pipeline = 1, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
-       .__runtime.platform_engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
@@ -108,7 +105,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
        .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
-       .__runtime.platform_engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
@@ -140,7 +137,7 @@ static const struct intel_device_info i865g_info = {
 #define GEN3_FEATURES \
        GEN(3), \
        .gpu_reset_clobbers_display = true, \
-       .__runtime.platform_engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -203,7 +200,7 @@ static const struct intel_device_info pnv_m_info = {
 #define GEN4_FEATURES \
        GEN(4), \
        .gpu_reset_clobbers_display = true, \
-       .__runtime.platform_engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -231,7 +228,7 @@ static const struct intel_device_info i965gm_info = {
 static const struct intel_device_info g45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_G45),
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
 };
 
@@ -239,13 +236,13 @@ static const struct intel_device_info gm45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_GM45),
        .is_mobile = 1,
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
 };
 
 #define GEN5_FEATURES \
        GEN(5), \
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@ -271,7 +268,7 @@ static const struct intel_device_info ilk_m_info = {
 
 #define GEN6_FEATURES \
        GEN(6), \
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_3d_pipeline = 1, \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
@@ -319,7 +316,7 @@ static const struct intel_device_info snb_m_gt2_info = {
 
 #define GEN7_FEATURES  \
        GEN(7), \
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_3d_pipeline = 1, \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
@@ -387,7 +384,7 @@ static const struct intel_device_info vlv_info = {
        .__runtime.ppgtt_size = 31,
        .has_snoop = true,
        .has_coherent_ggtt = false,
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
        GEN_DEFAULT_PAGE_SIZES,
        GEN_DEFAULT_REGIONS,
        LEGACY_CACHELEVEL,
@@ -395,7 +392,7 @@ static const struct intel_device_info vlv_info = {
 
 #define G75_FEATURES  \
        GEN7_FEATURES, \
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
        .has_rc6p = 0 /* RC6p removed-by HSW */, \
        .has_runtime_pm = 1
 
@@ -453,7 +450,7 @@ static const struct intel_device_info bdw_rsvd_info = {
 static const struct intel_device_info bdw_gt3_info = {
        BDW_PLATFORM,
        .gt = 3,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -461,7 +458,7 @@ static const struct intel_device_info chv_info = {
        PLATFORM(INTEL_CHERRYVIEW),
        GEN(8),
        .is_lp = 1,
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
        .has_64bit_reloc = 1,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
@@ -505,7 +502,7 @@ static const struct intel_device_info skl_gt2_info = {
 
 #define SKL_GT3_PLUS_PLATFORM \
        SKL_PLATFORM, \
-       .__runtime.platform_engine_mask = \
+       .platform_engine_mask = \
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
 
 
@@ -522,7 +519,7 @@ static const struct intel_device_info skl_gt4_info = {
 #define GEN9_LP_FEATURES \
        GEN(9), \
        .is_lp = 1, \
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
        .has_3d_pipeline = 1, \
        .has_64bit_reloc = 1, \
        .has_runtime_pm = 1, \
@@ -568,7 +565,7 @@ static const struct intel_device_info kbl_gt2_info = {
 static const struct intel_device_info kbl_gt3_info = {
        KBL_PLATFORM,
        .gt = 3,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -589,7 +586,7 @@ static const struct intel_device_info cfl_gt2_info = {
 static const struct intel_device_info cfl_gt3_info = {
        CFL_PLATFORM,
        .gt = 3,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -622,21 +619,21 @@ static const struct intel_device_info cml_gt2_info = {
 static const struct intel_device_info icl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ICELAKE),
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
 static const struct intel_device_info ehl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ELKHARTLAKE),
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
        .__runtime.ppgtt_size = 36,
 };
 
 static const struct intel_device_info jsl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_JASPERLAKE),
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
        .__runtime.ppgtt_size = 36,
 };
 
@@ -651,19 +648,19 @@ static const struct intel_device_info jsl_info = {
 static const struct intel_device_info tgl_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_TIGERLAKE),
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
 static const struct intel_device_info rkl_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_ROCKETLAKE),
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
 };
 
 #define DGFX_FEATURES \
-       .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
+       .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
        .has_llc = 0, \
        .has_pxp = 0, \
        .has_snoop = 1, \
@@ -676,7 +673,7 @@ static const struct intel_device_info dg1_info = {
        .__runtime.graphics.ip.rel = 10,
        PLATFORM(INTEL_DG1),
        .require_force_probe = 1,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
                BIT(VCS0) | BIT(VCS2),
        /* Wa_16011227922 */
@@ -686,7 +683,7 @@ static const struct intel_device_info dg1_info = {
 static const struct intel_device_info adl_s_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_ALDERLAKE_S),
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
        .dma_mask_size = 39,
 };
@@ -694,7 +691,7 @@ static const struct intel_device_info adl_s_info = {
 static const struct intel_device_info adl_p_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_ALDERLAKE_P),
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
        .__runtime.ppgtt_size = 48,
        .dma_mask_size = 39,
@@ -746,7 +743,7 @@ static const struct intel_device_info xehpsdv_info = {
        PLATFORM(INTEL_XEHPSDV),
        .has_64k_pages = 1,
        .has_media_ratio_mode = 1,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) |
                BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
                BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
@@ -766,7 +763,7 @@ static const struct intel_device_info xehpsdv_info = {
        .has_guc_deprivilege = 1, \
        .has_heci_pxp = 1, \
        .has_media_ratio_mode = 1, \
-       .__runtime.platform_engine_mask = \
+       .platform_engine_mask = \
                BIT(RCS0) | BIT(BCS0) | \
                BIT(VECS0) | BIT(VECS1) | \
                BIT(VCS0) | BIT(VCS2) | \
@@ -801,7 +798,7 @@ static const struct intel_device_info pvc_info = {
        PLATFORM(INTEL_PONTEVECCHIO),
        .has_flat_ccs = 0,
        .max_pat_index = 7,
-       .__runtime.platform_engine_mask =
+       .platform_engine_mask =
                BIT(BCS0) |
                BIT(VCS0) |
                BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
@@ -838,16 +835,14 @@ static const struct intel_device_info mtl_info = {
        .has_snoop = 1,
        .max_pat_index = 4,
        .has_pxp = 1,
-       .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
-       .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+       .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+       .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
        .require_force_probe = 1,
        MTL_CACHELEVEL,
 };
 
 #undef PLATFORM
 
-__diag_pop();
-
 /*
  * Make sure any device matches here are from most specific to most
  * general.  For example, since the Quanta match is based on the subsystem
index 0a111b281578ba0fc95b7902923e44ed2ae894bf..04bc1f4a111504c7ced76345c288aecf17ba5356 100644 (file)
@@ -868,8 +868,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
                        oa_report_id_clear(stream, report32);
                        oa_timestamp_clear(stream, report32);
                } else {
+                       u8 *oa_buf_end = stream->oa_buffer.vaddr +
+                                        OA_BUFFER_SIZE;
+                       u32 part = oa_buf_end - (u8 *)report32;
+
                        /* Zero out the entire report */
-                       memset(report32, 0, report_size);
+                       if (report_size <= part) {
+                               memset(report32, 0, report_size);
+                       } else {
+                               memset(report32, 0, part);
+                               memset(oa_buf_base, 0, report_size - part);
+                       }
                }
        }
 
@@ -1310,7 +1319,7 @@ __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
        u32 *cs, cmd;
 
        cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
-       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->i915) >= 8)
                cmd++;
 
        cs = intel_ring_begin(rq, 4);
@@ -4422,6 +4431,7 @@ static const struct i915_range mtl_oam_b_counters[] = {
 static const struct i915_range xehp_oa_b_counters[] = {
        { .start = 0xdc48, .end = 0xdc48 },     /* OAA_ENABLE_REG */
        { .start = 0xdd00, .end = 0xdd48 },     /* OAG_LCE0_0 - OAA_LENABLE_REG */
+       {}
 };
 
 static const struct i915_range gen7_oa_mux_regs[] = {
index 7a4f462e8b70bc115cbf25cb2378b31e995965ab..aefad14ab27a42da906cfea782057e1452327c1d 100644 (file)
 #define HECI_H_GS1(base)       _MMIO((base) + 0xc4c)
 #define   HECI_H_GS1_ER_PREP   REG_BIT(0)
 
-#define HECI_FWSTS5(base)              _MMIO((base) + 0xc68)
-#define   HECI_FWSTS5_HUC_AUTH_DONE    (1 << 19)
+/*
+ * The FWSTS register values are FW defined and can be different between
+ * HECI1 and HECI2
+ */
+#define HECI_FWSTS1                            0xc40
+#define   HECI1_FWSTS1_CURRENT_STATE                   REG_GENMASK(3, 0)
+#define   HECI1_FWSTS1_CURRENT_STATE_RESET             0
+#define   HECI1_FWSTS1_PROXY_STATE_NORMAL              5
+#define   HECI1_FWSTS1_INIT_COMPLETE                   REG_BIT(9)
+#define HECI_FWSTS2                            0xc48
+#define HECI_FWSTS3                            0xc60
+#define HECI_FWSTS4                            0xc64
+#define HECI_FWSTS5                            0xc68
+#define   HECI1_FWSTS5_HUC_AUTH_DONE   (1 << 19)
+#define HECI_FWSTS6                            0xc6c
+
+/* the FWSTS regs are 1-based, so we use -base for index 0 to get an invalid reg */
+#define HECI_FWSTS(base, x) _MMIO((base) + _PICK(x, -(base), \
+                                                   HECI_FWSTS1, \
+                                                   HECI_FWSTS2, \
+                                                   HECI_FWSTS3, \
+                                                   HECI_FWSTS4, \
+                                                   HECI_FWSTS5, \
+                                                   HECI_FWSTS6))
 
 #define HSW_GTT_CACHE_EN       _MMIO(0x4024)
 #define   GTT_CACHE_EN_ALL     0xF0007FFF
 
 #define SHPD_FILTER_CNT                                _MMIO(0xc4038)
 #define   SHPD_FILTER_CNT_500_ADJ              0x001D9
+#define   SHPD_FILTER_CNT_250                  0x000F8
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
index 894068bb37b6f1b647c7b7bb55bba08f4a379e47..7c7da284990df7cbde55ea63119a5691399b4623 100644 (file)
@@ -1220,7 +1220,7 @@ emit_semaphore_wait(struct i915_request *to,
        /*
         * If this or its dependents are waiting on an external fence
         * that may fail catastrophically, then we want to avoid using
-        * sempahores as they bypass the fence signaling metadata, and we
+        * semaphores as they bypass the fence signaling metadata, and we
         * lose the fence->error propagation.
         */
        if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
@@ -1353,7 +1353,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
 {
        mark_external(rq);
        return i915_sw_fence_await_dma_fence(&rq->submit, fence,
-                                            i915_fence_context_timeout(rq->engine->i915,
+                                            i915_fence_context_timeout(rq->i915,
                                                                        fence->context),
                                             I915_FENCE_GFP);
 }
@@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
 
        request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
 
+       /*
+        * Users have to put a reference potentially got by
+        * __i915_active_fence_set() to the returned request
+        * when no longer needed
+        */
        return to_request(__i915_active_fence_set(&timeline->last_request,
                                                  &rq->fence));
 }
@@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
                                                         0);
        }
 
+       /*
+        * Users have to put the reference to prev potentially got
+        * by __i915_active_fence_set() when no longer needed
+        */
        return prev;
 }
 
@@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
                prev = __i915_request_ensure_ordering(rq, timeline);
        else
                prev = __i915_request_ensure_parallel_ordering(rq, timeline);
+       if (prev)
+               i915_request_put(prev);
 
        /*
         * Make sure that no request gazumped us - if it was allocated after
index f6f9228a135185efaa1f0d5e6c42a340d73d3691..ce1cbee1b39dd04de2a5534532f7254a513e64c8 100644 (file)
@@ -277,7 +277,7 @@ TRACE_EVENT(i915_request_queue,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = rq->engine->i915->drm.primary->index;
+                          __entry->dev = rq->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
                           __entry->ctx = rq->fence.context;
@@ -304,7 +304,7 @@ DECLARE_EVENT_CLASS(i915_request,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = rq->engine->i915->drm.primary->index;
+                          __entry->dev = rq->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
                           __entry->ctx = rq->fence.context;
@@ -353,7 +353,7 @@ TRACE_EVENT(i915_request_in,
                            ),
 
            TP_fast_assign(
-                          __entry->dev = rq->engine->i915->drm.primary->index;
+                          __entry->dev = rq->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
                           __entry->ctx = rq->fence.context;
@@ -382,7 +382,7 @@ TRACE_EVENT(i915_request_out,
                            ),
 
            TP_fast_assign(
-                          __entry->dev = rq->engine->i915->drm.primary->index;
+                          __entry->dev = rq->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
                           __entry->ctx = rq->fence.context;
@@ -623,7 +623,7 @@ TRACE_EVENT(i915_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
-                          __entry->dev = rq->engine->i915->drm.primary->index;
+                          __entry->dev = rq->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
                           __entry->ctx = rq->fence.context;
index ffb425ba591c7935649d80da8d6387090aab9e22..e52089564d7923612da3d1700182a19b70d23208 100644 (file)
@@ -34,6 +34,7 @@
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
+#include "gt/intel_tlb.h"
 
 #include "i915_drv.h"
 #include "i915_gem_evict.h"
@@ -74,14 +75,14 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
        char buf[512];
 
        if (!vma->node.stack) {
-               drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+               drm_dbg(vma->obj->base.dev,
                        "vma.node [%08llx + %08llx] %s: unknown owner\n",
                        vma->node.start, vma->node.size, reason);
                return;
        }
 
        stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
-       drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+       drm_dbg(vma->obj->base.dev,
                "vma.node [%08llx + %08llx] %s: inserted at %s\n",
                vma->node.start, vma->node.size, reason, buf);
 }
@@ -805,7 +806,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
         * attempt to find space.
         */
        if (size > end - 2 * guard) {
-               drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+               drm_dbg(vma->obj->base.dev,
                        "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
                        size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
                return -ENOSPC;
@@ -1339,6 +1340,12 @@ err_unpin:
 
 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
 {
+       struct intel_gt *gt;
+       int id;
+
+       if (!tlb)
+               return;
+
        /*
         * Before we release the pages that were bound by this vma, we
         * must invalidate all the TLBs that may still have a reference
@@ -1347,7 +1354,9 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
         * the most recent TLB invalidation seqno, and if we have not yet
         * flushed the TLBs upon release, perform a full invalidation.
         */
-       WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+       for_each_gt(gt, vm->i915, id)
+               WRITE_ONCE(tlb[id],
+                          intel_gt_next_invalidate_tlb_full(vm->gt));
 }
 
 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
@@ -1629,6 +1638,26 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        return err;
 }
 
+/**
+ * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
+ * @obj: i915 GEM object
+ * This function clears scanout flags for objects ggtt vmas. These flags are set
+ * when object is pinned for display use and this function to clear them all is
+ * targeted to be called by frontbuffer tracking code when the frontbuffer is
+ * about to be released.
+ */
+void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       spin_lock(&obj->vma.lock);
+       for_each_ggtt_vma(vma, obj) {
+               i915_vma_clear_scanout(vma);
+               vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+       }
+       spin_unlock(&obj->vma.lock);
+}
+
 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
 {
        /*
@@ -1908,7 +1937,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
        if (flags & EXEC_OBJECT_WRITE) {
                struct intel_frontbuffer *front;
 
-               front = __intel_frontbuffer_get(obj);
+               front = i915_gem_object_get_frontbuffer(obj);
                if (unlikely(front)) {
                        if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
                                i915_active_add_request(&front->write, rq);
@@ -1994,7 +2023,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
 
        if (async)
                unbind_fence = i915_vma_resource_unbind(vma_res,
-                                                       &vma->obj->mm.tlb);
+                                                       vma->obj->mm.tlb);
        else
                unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
 
@@ -2011,7 +2040,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
                        dma_fence_put(unbind_fence);
                        unbind_fence = NULL;
                }
-               vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
+               vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
        }
 
        /*
index 9a9729205d5be6c8eeaf4a73a33c1cf1b3fffdab..e356dfb883d343820ed45f499bfb040a55da3357 100644 (file)
@@ -418,6 +418,11 @@ i915_vma_unpin_fence(struct i915_vma *vma)
                __i915_vma_unpin_fence(vma);
 }
 
+static inline int i915_vma_fence_id(const struct i915_vma *vma)
+{
+       return vma->fence ? vma->fence->id : -1;
+}
+
 void i915_vma_parked(struct intel_gt *gt);
 
 static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
@@ -435,6 +440,8 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
        clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
 }
 
+void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj);
+
 #define for_each_until(cond) if (cond) break; else
 
 /**
index a27600bc597663b06ee3d549371de37c71896080..81a4d32734e946bb3c927f27ef7fa9f1b3688c6e 100644 (file)
@@ -456,12 +456,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
        intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
 
        /* WaDisableSDEUnitClockGating:kbl */
-       if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+       if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
                intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6,
                                 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
        /* WaDisableGamClockGating:kbl */
-       if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+       if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
                intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1,
                                 0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
 
@@ -559,9 +559,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
 
 static void hsw_init_clock_gating(struct drm_i915_private *i915)
 {
+       enum pipe pipe;
+
        /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
        intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS);
 
+       /* WaPsrDPAMaskVBlankInSRD:hsw */
+       intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
+
+       for_each_pipe(i915, pipe) {
+               /* WaPsrDPRSUnmaskVBlankInSRD:hsw */
+               intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
+                                0, HSW_UNMASK_VBL_TO_REGS_IN_SRD);
+       }
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                         0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
index 6e49caf241a5f54e1bdf7f46669d12c7e3274c6d..ea0ec6174ce50cfc845e8a5c09be9ce93733e974 100644 (file)
@@ -93,9 +93,6 @@ void intel_device_info_print(const struct intel_device_info *info,
                             const struct intel_runtime_info *runtime,
                             struct drm_printer *p)
 {
-       const struct intel_display_runtime_info *display_runtime =
-               &info->display->__runtime_defaults;
-
        if (runtime->graphics.ip.rel)
                drm_printf(p, "graphics version: %u.%02u\n",
                           runtime->graphics.ip.ver,
@@ -112,21 +109,13 @@ void intel_device_info_print(const struct intel_device_info *info,
                drm_printf(p, "media version: %u\n",
                           runtime->media.ip.ver);
 
-       if (display_runtime->ip.rel)
-               drm_printf(p, "display version: %u.%02u\n",
-                          display_runtime->ip.ver,
-                          display_runtime->ip.rel);
-       else
-               drm_printf(p, "display version: %u\n",
-                          display_runtime->ip.ver);
-
        drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
        drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
        drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
        drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
 
        drm_printf(p, "gt: %d\n", info->gt);
-       drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
+       drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions);
        drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
        drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
        drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
@@ -138,15 +127,6 @@ void intel_device_info_print(const struct intel_device_info *info,
 #undef PRINT_FLAG
 
        drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
-
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display->name))
-       DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-
-       drm_printf(p, "has_hdcp: %s\n", str_yes_no(display_runtime->has_hdcp));
-       drm_printf(p, "has_dmc: %s\n", str_yes_no(display_runtime->has_dmc));
-       drm_printf(p, "has_dsc: %s\n", str_yes_no(display_runtime->has_dsc));
-
        drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
 }
 
@@ -260,15 +240,19 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
        if (find_devid(devid, subplatform_ult_ids,
                       ARRAY_SIZE(subplatform_ult_ids))) {
                mask = BIT(INTEL_SUBPLATFORM_ULT);
+               if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+                       DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D);
        } else if (find_devid(devid, subplatform_ulx_ids,
                              ARRAY_SIZE(subplatform_ulx_ids))) {
                mask = BIT(INTEL_SUBPLATFORM_ULX);
                if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                        /* ULX machines are also considered ULT. */
                        mask |= BIT(INTEL_SUBPLATFORM_ULT);
+                       DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D);
                }
        } else if (find_devid(devid, subplatform_portf_ids,
                              ARRAY_SIZE(subplatform_portf_ids))) {
+               DISPLAY_RUNTIME_INFO(i915)->port_mask |= BIT(PORT_F);
                mask = BIT(INTEL_SUBPLATFORM_PORTF);
        } else if (find_devid(devid, subplatform_uy_ids,
                           ARRAY_SIZE(subplatform_uy_ids))) {
@@ -380,13 +364,6 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915)
        intel_device_info_subplatform_init(i915);
 }
 
-/* FIXME: Remove this, and make device info a const pointer to rodata. */
-static struct intel_device_info *
-mkwrite_device_info(struct drm_i915_private *i915)
-{
-       return (struct intel_device_info *)INTEL_INFO(i915);
-}
-
 static const struct intel_display_device_info no_display = {};
 
 /**
@@ -407,7 +384,6 @@ static const struct intel_display_device_info no_display = {};
  */
 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
        struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
 
        if (HAS_DISPLAY(dev_priv))
@@ -417,7 +393,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        if (!HAS_DISPLAY(dev_priv)) {
                dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
                                                   DRIVER_ATOMIC);
-               info->display = &no_display;
+               dev_priv->display.info.__device_info = &no_display;
        }
 
        /* Disable nuclear pageflip by default on pre-g4x */
@@ -447,26 +423,24 @@ void intel_device_info_driver_create(struct drm_i915_private *i915,
                                     u16 device_id,
                                     const struct intel_device_info *match_info)
 {
-       struct intel_device_info *info;
        struct intel_runtime_info *runtime;
        u16 ver, rel, step;
 
-       /* Setup the write-once "constant" device info */
-       info = mkwrite_device_info(i915);
-       memcpy(info, match_info, sizeof(*info));
+       /* Setup INTEL_INFO() */
+       i915->__info = match_info;
 
        /* Initialize initial runtime info from static const data and pdev. */
        runtime = RUNTIME_INFO(i915);
        memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
 
        /* Probe display support */
-       info->display = intel_display_device_probe(i915, info->has_gmd_id,
-                                                  &ver, &rel, &step);
+       i915->display.info.__device_info = intel_display_device_probe(i915, HAS_GMD_ID(i915),
+                                                                     &ver, &rel, &step);
        memcpy(DISPLAY_RUNTIME_INFO(i915),
               &DISPLAY_INFO(i915)->__runtime_defaults,
               sizeof(*DISPLAY_RUNTIME_INFO(i915)));
 
-       if (info->has_gmd_id) {
+       if (HAS_GMD_ID(i915)) {
                DISPLAY_RUNTIME_INFO(i915)->ip.ver = ver;
                DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
                DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
index 069291b3bd37017110456efbd63607f166f028eb..dbfe6443457b5df15113dd8bb12f91dc528f01bf 100644 (file)
@@ -29,8 +29,6 @@
 
 #include "intel_step.h"
 
-#include "display/intel_display_device.h"
-
 #include "gt/intel_engine_types.h"
 #include "gt/intel_context_types.h"
 #include "gt/intel_sseu.h"
@@ -212,8 +210,6 @@ struct intel_runtime_info {
 
        u16 device_id;
 
-       intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
-
        u32 rawclk_freq;
 
        struct intel_step_info step;
@@ -223,8 +219,6 @@ struct intel_runtime_info {
        enum intel_ppgtt_type ppgtt_type;
        unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
 
-       u32 memory_regions; /* regions supported by the HW */
-
        bool has_pooled_eu;
 };
 
@@ -237,12 +231,13 @@ struct intel_device_info {
 
        u8 gt; /* GT number, 0 if undefined */
 
+       intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
+       u32 memory_regions; /* regions supported by the HW */
+
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
 
-       const struct intel_display_device_info *display;
-
        /*
         * Initial runtime info. Do not access outside of i915_driver_create().
         */
index 8a9ff6227e5367105d2ad50f66d31455e883a532..c02a6f156a000b733200ab6b629e608da07ed5d5 100644 (file)
@@ -192,16 +192,16 @@ void intel_step_init(struct drm_i915_private *i915)
        } else if (IS_XEHPSDV(i915)) {
                revids = xehpsdv_revids;
                size = ARRAY_SIZE(xehpsdv_revids);
-       } else if (IS_ADLP_N(i915)) {
+       } else if (IS_ALDERLAKE_P_N(i915)) {
                revids = adlp_n_revids;
                size = ARRAY_SIZE(adlp_n_revids);
-       } else if (IS_ADLP_RPLP(i915)) {
+       } else if (IS_RAPTORLAKE_P(i915)) {
                revids = adlp_rplp_revids;
                size = ARRAY_SIZE(adlp_rplp_revids);
        } else if (IS_ALDERLAKE_P(i915)) {
                revids = adlp_revids;
                size = ARRAY_SIZE(adlp_revids);
-       } else if (IS_ADLS_RPLS(i915)) {
+       } else if (IS_RAPTORLAKE_S(i915)) {
                revids = adls_rpls_revids;
                size = ARRAY_SIZE(adls_rpls_revids);
        } else if (IS_ALDERLAKE_S(i915)) {
@@ -213,13 +213,13 @@ void intel_step_init(struct drm_i915_private *i915)
        } else if (IS_ROCKETLAKE(i915)) {
                revids = rkl_revids;
                size = ARRAY_SIZE(rkl_revids);
-       } else if (IS_TGL_UY(i915)) {
+       } else if (IS_TIGERLAKE_UY(i915)) {
                revids = tgl_uy_revids;
                size = ARRAY_SIZE(tgl_uy_revids);
        } else if (IS_TIGERLAKE(i915)) {
                revids = tgl_revids;
                size = ARRAY_SIZE(tgl_revids);
-       } else if (IS_JSL_EHL(i915)) {
+       } else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
                revids = jsl_ehl_revids;
                size = ARRAY_SIZE(jsl_ehl_revids);
        } else if (IS_ICELAKE(i915)) {
index 796ebfe6c5507293c34fa3fb96a340f63a47b1a0..dfefad5a5fec9e6fd1da6dd9bf9ec64905d8e958 100644 (file)
@@ -1925,25 +1925,31 @@ __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
                        i915_mmio_reg_offset(reg));
 }
 
-static inline void
-unclaimed_reg_debug(struct intel_uncore *uncore,
-                   const i915_reg_t reg,
-                   const bool read,
-                   const bool before)
+static inline bool __must_check
+unclaimed_reg_debug_header(struct intel_uncore *uncore,
+                          const i915_reg_t reg, const bool read)
 {
        if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
-               return;
+               return false;
 
        /* interrupts are disabled and re-enabled around uncore->lock usage */
        lockdep_assert_held(&uncore->lock);
 
-       if (before) {
-               spin_lock(&uncore->debug->lock);
-               __unclaimed_previous_reg_debug(uncore, reg, read);
-       } else {
-               __unclaimed_reg_debug(uncore, reg, read);
-               spin_unlock(&uncore->debug->lock);
-       }
+       spin_lock(&uncore->debug->lock);
+       __unclaimed_previous_reg_debug(uncore, reg, read);
+
+       return true;
+}
+
+static inline void
+unclaimed_reg_debug_footer(struct intel_uncore *uncore,
+                          const i915_reg_t reg, const bool read)
+{
+       /* interrupts are disabled and re-enabled around uncore->lock usage */
+       lockdep_assert_held(&uncore->lock);
+
+       __unclaimed_reg_debug(uncore, reg, read);
+       spin_unlock(&uncore->debug->lock);
 }
 
 #define __vgpu_read(x) \
@@ -2001,13 +2007,15 @@ __gen2_read(64)
 #define GEN6_READ_HEADER(x) \
        u32 offset = i915_mmio_reg_offset(reg); \
        unsigned long irqflags; \
+       bool unclaimed_reg_debug; \
        u##x val = 0; \
        assert_rpm_wakelock_held(uncore->rpm); \
        spin_lock_irqsave(&uncore->lock, irqflags); \
-       unclaimed_reg_debug(uncore, reg, true, true)
+       unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
 
 #define GEN6_READ_FOOTER \
-       unclaimed_reg_debug(uncore, reg, true, false); \
+       if (unclaimed_reg_debug) \
+               unclaimed_reg_debug_footer(uncore, reg, true);  \
        spin_unlock_irqrestore(&uncore->lock, irqflags); \
        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
        return val
@@ -2105,13 +2113,15 @@ __gen2_write(32)
 #define GEN6_WRITE_HEADER \
        u32 offset = i915_mmio_reg_offset(reg); \
        unsigned long irqflags; \
+       bool unclaimed_reg_debug; \
        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
        assert_rpm_wakelock_held(uncore->rpm); \
        spin_lock_irqsave(&uncore->lock, irqflags); \
-       unclaimed_reg_debug(uncore, reg, false, true)
+       unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
 
 #define GEN6_WRITE_FOOTER \
-       unclaimed_reg_debug(uncore, reg, false, false); \
+       if (unclaimed_reg_debug) \
+               unclaimed_reg_debug_footer(uncore, reg, false); \
        spin_unlock_irqrestore(&uncore->lock, irqflags)
 
 #define __gen6_write(x) \
index 9ea1f4864a3a40bba981bf1495f67d7dddc4e2e0..f419c311a0dea4d439c615d20e1e1308b7b77df6 100644 (file)
@@ -496,6 +496,11 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
        return (reg_val & mask) != expected_val ? -EINVAL : 0;
 }
 
+static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
+{
+       return uncore->regs;
+}
+
 /*
  * The raw_reg_{read,write} macros are intended as a micro-optimization for
  * interrupt handlers so that the pointer indirection on uncore->regs can
index bb2e15329f346c584b85f782584f67ad6ee73b15..38ec754d0ec8ee23ecd443419c4bb132821b0f0d 100644 (file)
@@ -162,8 +162,8 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9
         * for HuC authentication. For now, its limited to DG2.
         */
        if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
-           intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc))
-               return &i915->gt0;
+           intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
+               return to_gt(i915);
 
        return NULL;
 }
@@ -188,8 +188,8 @@ static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_p
         * Else we rely on mei-pxp module but only on legacy platforms
         * prior to having separate media GTs and has a valid VDBOX.
         */
-       if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0))
-               return &i915->gt0;
+       if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
+               return to_gt(i915);
 
        return NULL;
 }
index f13890ec7db15a17f02be4d870bbb39a9aac2b87..2a600184a0771030600e34cc90a85c553adfae8f 100644 (file)
@@ -6,6 +6,7 @@
 #include "gem/i915_gem_internal.h"
 
 #include "gt/intel_context.h"
+#include "gt/intel_gt.h"
 #include "gt/uc/intel_gsc_fw.h"
 #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
 
@@ -197,7 +198,7 @@ bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
         * are out of order) will suffice.
         */
        if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) &&
-           intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc))
+           intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true))
                return true;
 
        return false;
@@ -336,7 +337,7 @@ gsccs_create_buffer(struct intel_gt *gt,
        }
 
        /* return a virtual pointer */
-       *map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+       *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
        if (IS_ERR(*map)) {
                drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
                err = PTR_ERR(*map);
index 1ce07d7e876909cc8944c0ad87f7177042f498be..80bb0018986525f16d410e565ff4db98d470a9f9 100644 (file)
@@ -11,6 +11,7 @@
 #include "gem/i915_gem_lmem.h"
 
 #include "i915_drv.h"
+#include "gt/intel_gt.h"
 
 #include "intel_pxp.h"
 #include "intel_pxp_cmd_interface_42.h"
@@ -245,7 +246,9 @@ static int alloc_streaming_command(struct intel_pxp *pxp)
        }
 
        /* map the lmem into the virtual memory pointer */
-       cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+       cmd = i915_gem_object_pin_map_unlocked(obj,
+                                              intel_gt_coherent_map_type(pxp->ctrl_gt,
+                                                                         obj, true));
        if (IS_ERR(cmd)) {
                drm_err(&i915->drm, "Failed to map gsc message page!\n");
                err = PTR_ERR(cmd);
index d4608b220123ce623789527e568a5b85c45e6197..403134a7acec305b2a210bd0c1318e0be237f276 100644 (file)
@@ -168,7 +168,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
                return PTR_ERR(cs);
 
        len = 5;
-       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->i915) >= 8)
                len++;
 
        *cs++ = GFX_OP_PIPE_CONTROL(len);
index 39da0fb0d6d26f531f2207ebfc446d2e93f19fb7..ee79e0809a6ddf079c975ed0a7775ec24b819baa 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/random.h>
 
 #include "gt/intel_gt_pm.h"
+#include "gt/uc/intel_gsc_fw.h"
+
 #include "i915_driver.h"
 #include "i915_drv.h"
 #include "i915_selftest.h"
@@ -127,6 +129,31 @@ static void set_default_test_all(struct selftest *st, unsigned int count)
                st[i].enabled = true;
 }
 
+static bool
+__gsc_proxy_init_progressing(struct intel_gsc_uc *gsc)
+{
+       return intel_gsc_uc_fw_proxy_get_status(gsc) == -EAGAIN;
+}
+
+static void
+__wait_gsc_proxy_completed(struct drm_i915_private *i915)
+{
+       bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY) &&
+                            i915->media_gt &&
+                            HAS_ENGINE(i915->media_gt, GSC0) &&
+                            intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw));
+       /*
+        * The gsc proxy component depends on the kernel component driver load ordering
+        * and in corner cases (the first time after an IFWI flash), init-completion
+        * firmware flows take longer.
+        */
+       unsigned long timeout_ms = 8000;
+
+       if (need_to_wait && wait_for(!__gsc_proxy_init_progressing(&i915->media_gt->uc.gsc),
+                                    timeout_ms))
+               pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
+}
+
 static int __run_selftests(const char *name,
                           struct selftest *st,
                           unsigned int count,
@@ -206,6 +233,8 @@ int i915_live_selftests(struct pci_dev *pdev)
        if (!i915_selftest.live)
                return 0;
 
+       __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+
        err = run_selftests(live, pdev_to_i915(pdev));
        if (err) {
                i915_selftest.live = err;
@@ -227,6 +256,8 @@ int i915_perf_selftests(struct pci_dev *pdev)
        if (!i915_selftest.perf)
                return 0;
 
+       __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+
        err = run_selftests(perf, pdev_to_i915(pdev));
        if (err) {
                i915_selftest.perf = err;
index 618d9386d554941d0530494273fec50ceeb2fc66..0f064930ef116428100b2faa6455f2d29ffa9d51 100644 (file)
@@ -97,7 +97,7 @@ int igt_spinner_pin(struct igt_spinner *spin,
        if (!spin->batch) {
                unsigned int mode;
 
-               mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
+               mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
                vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
                if (IS_ERR(vaddr))
                        return PTR_ERR(vaddr);
@@ -159,15 +159,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
        batch = spin->batch;
 
-       if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+       if (GRAPHICS_VER(rq->i915) >= 8) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = lower_32_bits(hws_address(hws, rq));
                *batch++ = upper_32_bits(hws_address(hws, rq));
-       } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
+       } else if (GRAPHICS_VER(rq->i915) >= 6) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = 0;
                *batch++ = hws_address(hws, rq);
-       } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+       } else if (GRAPHICS_VER(rq->i915) >= 4) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *batch++ = 0;
                *batch++ = hws_address(hws, rq);
@@ -179,11 +179,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
        *batch++ = arbitration_command;
 
-       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->i915) >= 8)
                *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
-       else if (IS_HASWELL(rq->engine->i915))
+       else if (IS_HASWELL(rq->i915))
                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
-       else if (GRAPHICS_VER(rq->engine->i915) >= 6)
+       else if (GRAPHICS_VER(rq->i915) >= 6)
                *batch++ = MI_BATCH_BUFFER_START;
        else
                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -201,7 +201,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
        }
 
        flags = 0;
-       if (GRAPHICS_VER(rq->engine->i915) <= 5)
+       if (GRAPHICS_VER(rq->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
        err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
 
index e4281508d5808b5c511ca85c2b17941ff88687e8..03ea75cd84dd5c3784b7bf018296262bff1d4e9c 100644 (file)
@@ -210,7 +210,7 @@ static int live_forcewake_ops(void *arg)
 
        for_each_engine(engine, gt, id) {
                i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
-               u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+               u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset;
                enum forcewake_domains fw_domains;
                u32 val;
 
index 4de6a4e8280d59539f535e76c53e9ba7fd8e3bca..da0b269606c51db5fddbcd1751696d8c7435f541 100644 (file)
@@ -114,7 +114,7 @@ static struct dev_pm_domain pm_domain = {
 
 static void mock_gt_probe(struct drm_i915_private *i915)
 {
-       i915->gt[0] = &i915->gt0;
+       i915->gt[0] = to_gt(i915);
        i915->gt[0]->name = "Mock GT";
 }
 
@@ -123,8 +123,8 @@ static const struct intel_device_info mock_info = {
        .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K |
                                 I915_GTT_PAGE_SIZE_64K |
                                 I915_GTT_PAGE_SIZE_2M),
-       .__runtime.memory_regions = REGION_SMEM,
-       .__runtime.platform_engine_mask = BIT(0),
+       .memory_regions = REGION_SMEM,
+       .platform_engine_mask = BIT(0),
 
        /* simply use legacy cache level for mock device */
        .max_pat_index = 3,
index 9f0651d48d410d48ccb078e23cb4473b11c7b4ab..15492b69f6983809382c4ce98e332a8968da724a 100644 (file)
@@ -704,7 +704,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
        if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
                return;
 
-       edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+       edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
 
        /* NB: We can't write IDICR yet because we don't have gt funcs set up */
 
index 6d0204942f7a5bf03131a76f9797d2ffbf309bcf..49c7fb16e934f56d158ea90d124497a1a56e0b07 100644 (file)
@@ -47,11 +47,9 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 
        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
-       if (mchbar_addr &&
+       if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
            pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
                return 0;
-#endif
 
        /* Get some space for it */
        i915->gmch.mch_res.name = "i915 MCHBAR";
index ba9843cb1b13d8c963c87f07c527c7f530ad3d8e..19a8f27c404e1957df052cb4015e8201d2102691 100644 (file)
@@ -32,21 +32,21 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                drm_WARN_ON(&dev_priv->drm,
                            !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
                drm_WARN_ON(&dev_priv->drm,
-                           IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+                           IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
                return PCH_LPT;
        case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
                drm_WARN_ON(&dev_priv->drm,
                            !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
                drm_WARN_ON(&dev_priv->drm,
-                           !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+                           !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
                return PCH_LPT;
        case INTEL_PCH_WPT_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
                drm_WARN_ON(&dev_priv->drm,
                            !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
                drm_WARN_ON(&dev_priv->drm,
-                           IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+                           IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
                /* WPT is LPT compatible */
                return PCH_LPT;
        case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
@@ -54,7 +54,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                drm_WARN_ON(&dev_priv->drm,
                            !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
                drm_WARN_ON(&dev_priv->drm,
-                           !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+                           !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
                /* WPT is LPT compatible */
                return PCH_LPT;
        case INTEL_PCH_SPT_DEVICE_ID_TYPE:
@@ -115,7 +115,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                return PCH_ICP;
        case INTEL_PCH_MCC_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
-               drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+               drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+                                             IS_ELKHARTLAKE(dev_priv)));
                /* MCC is TGP compatible */
                return PCH_TGP;
        case INTEL_PCH_TGP_DEVICE_ID_TYPE:
@@ -127,7 +128,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                return PCH_TGP;
        case INTEL_PCH_JSP_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
-               drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+               drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+                                             IS_ELKHARTLAKE(dev_priv)));
                /* JSP is ICP compatible */
                return PCH_ICP;
        case INTEL_PCH_ADP_DEVICE_ID_TYPE:
@@ -177,7 +179,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
                id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
        else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
                id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
-       else if (IS_JSL_EHL(dev_priv))
+       else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
                id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
        else if (IS_ICELAKE(dev_priv))
                id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
@@ -186,7 +188,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
                id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
        else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
                id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
-       else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+       else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
                id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
        else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
index 5d1779ab65c055c4b7dc27a1b6e351e873abca96..4f3af0dfb34499f6a502bf935a56e8c8956be75e 100644 (file)
@@ -4,8 +4,9 @@
  */
 
 #include <linux/clk.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <drm/drm_bridge_connector.h>
@@ -198,7 +199,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
 
        dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
        if (!dcss->of_port) {
-               dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
+               dev_err(dev, "no port@0 node in %pOF\n", dev->of_node);
                ret = -ENODEV;
                goto clks_err;
        }
index 4f2291610139083d1d0205c333f291b913edd4ec..c68b0d93ae9e91dcc2460d4d1ff39ea6e424ae20 100644 (file)
@@ -66,6 +66,7 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
        mdrv->kms = dcss_kms_attach(mdrv->dcss);
        if (IS_ERR(mdrv->kms)) {
                err = PTR_ERR(mdrv->kms);
+               dev_err_probe(dev, err, "Failed to initialize KMS\n");
                goto dcss_shutoff;
        }
 
index 80142d9a4a55218e1c43375c25d5543a07bbb708..dade8b59feaef7b13bc9d14c6391ebaf99a070aa 100644 (file)
@@ -618,6 +618,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                width = ipu_src_rect_width(new_state);
        else
                width = drm_rect_width(&new_state->src) >> 16;
+       height = drm_rect_height(&new_state->src) >> 16;
 
        eba = drm_plane_state_to_eba(new_state, 0);
 
@@ -628,9 +629,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        if (ipu_state->use_pre) {
                axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
                ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
-                                         drm_rect_height(&new_state->src) >> 16,
-                                         fb->pitches[0], fb->format->format,
-                                         fb->modifier, &eba);
+                                         height, fb->pitches[0],
+                                         fb->format->format, fb->modifier,
+                                         &eba);
        }
 
        if (!old_state->fb ||
@@ -684,7 +685,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
 
        ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
 
-       height = drm_rect_height(&new_state->src) >> 16;
        info = drm_format_info(fb->format->format);
        ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
                             &burstsize, &num_bursts);
@@ -747,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
 
                ipu_cpmem_zero(ipu_plane->alpha_ch);
-               ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
-                                        drm_rect_height(&new_state->src) >> 16);
+               ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, height);
                ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
                ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
                ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
index 277ead6a459a4f46484466555c583b7e15b6df11..22b65f4a0e3034f5768460a0db5dbc43b3e344ef 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #define IMX21LCDC_LSSAR         0x0000 /* LCDC Screen Start Address Register */
index a53f475d33df32214da166cd2449cf8591c537b4..b440e0cdc05794f566ea2cecc25d8834be45ff2b 100644 (file)
@@ -9,6 +9,8 @@ config DRM_INGENIC
        select DRM_PANEL_BRIDGE
        select DRM_KMS_HELPER
        select DRM_GEM_DMA_HELPER
+       select REGMAP
+       select REGMAP_MMIO
        select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
        help
          Choose this option for DRM support for the Ingenic SoCs.
index 5ec75e9ba499ae4abc493b4312fae383b3a3fe0a..8dbd4847d3a6de57ff5e2dd4c92fc920e0032bc2 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
index 7a43505011a571c862dafcc41d55c5529dac01d0..6d236547f611d71984d2f0fc4d382823a9a1cd1d 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/time.h>
 
index 39cab4a55f5728d2f8991bce764afed4b6c844d7..10fd9154cc4653f8a9360099ad1c36219cf4f3bb 100644 (file)
@@ -2,7 +2,8 @@
 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
 
 #include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
@@ -276,10 +277,7 @@ static const struct drm_driver lima_drm_driver = {
        .patchlevel         = 0,
 
        .gem_create_object  = lima_gem_create_object,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 };
 
 struct lima_block_reader {
@@ -441,7 +439,7 @@ err_out0:
        return err;
 }
 
-static int lima_pdev_remove(struct platform_device *pdev)
+static void lima_pdev_remove(struct platform_device *pdev)
 {
        struct lima_device *ldev = platform_get_drvdata(pdev);
        struct drm_device *ddev = ldev->ddev;
@@ -459,7 +457,6 @@ static int lima_pdev_remove(struct platform_device *pdev)
 
        drm_dev_put(ddev);
        lima_sched_slab_fini();
-       return 0;
 }
 
 static const struct of_device_id dt_match[] = {
@@ -476,7 +473,7 @@ static const struct dev_pm_ops lima_pm_ops = {
 
 static struct platform_driver lima_platform_driver = {
        .probe      = lima_pdev_probe,
-       .remove     = lima_pdev_remove,
+       .remove_new = lima_pdev_remove,
        .driver     = {
                .name   = "lima",
                .pm     = &lima_pm_ops,
index 10252dc11a22de3166051bfdaa60aebc75282578..4f9736e5f929beecb84d2abee435ff64baa1bb25 100644 (file)
@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
 
        new_size = min(new_size, bo->base.base.size);
 
-       mutex_lock(&bo->base.pages_lock);
+       dma_resv_lock(bo->base.base.resv, NULL);
 
        if (bo->base.pages) {
                pages = bo->base.pages;
@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
                                       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
                if (!pages) {
-                       mutex_unlock(&bo->base.pages_lock);
+                       dma_resv_unlock(bo->base.base.resv);
                        return -ENOMEM;
                }
 
@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
                struct page *page = shmem_read_mapping_page(mapping, i);
 
                if (IS_ERR(page)) {
-                       mutex_unlock(&bo->base.pages_lock);
+                       dma_resv_unlock(bo->base.base.resv);
                        return PTR_ERR(page);
                }
                pages[i] = page;
        }
 
-       mutex_unlock(&bo->base.pages_lock);
+       dma_resv_unlock(bo->base.base.resv);
 
        ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
                                        new_size, GFP_KERNEL);
index c35c453fd0255427b2d5be2d8a30c71027baf586..749debd3d6a5772cf45c4b3a15212cb59b0e6988 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/types.h>
 
@@ -466,7 +466,7 @@ error_early:
        return ret;
 }
 
-static int logicvc_drm_remove(struct platform_device *pdev)
+static void logicvc_drm_remove(struct platform_device *pdev)
 {
        struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
        struct device *dev = &pdev->dev;
@@ -480,8 +480,6 @@ static int logicvc_drm_remove(struct platform_device *pdev)
        logicvc_clocks_unprepare(logicvc);
 
        of_reserved_mem_device_release(dev);
-
-       return 0;
 }
 
 static const struct of_device_id logicvc_drm_of_table[] = {
@@ -493,7 +491,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
 
 static struct platform_driver logicvc_drm_platform_driver = {
        .probe          = logicvc_drm_probe,
-       .remove         = logicvc_drm_remove,
+       .remove_new     = logicvc_drm_remove,
        .driver         = {
                .name           = "logicvc-drm",
                .of_match_table = logicvc_drm_of_table,
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
new file mode 100644 (file)
index 0000000..df6946d
--- /dev/null
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DRM_LOONGSON
+       tristate "DRM support for Loongson Graphics"
+       depends on DRM && PCI && MMU
+       select DRM_KMS_HELPER
+       select DRM_TTM
+       select I2C
+       select I2C_ALGOBIT
+       help
+         This is a DRM driver for Loongson Graphics, it may including
+         LS7A2000, LS7A1000, LS2K2000 and LS2K1000 etc. Loongson LS7A
+         series are bridge chipset, while Loongson LS2K series are SoC.
+
+         If "M" is selected, the module will be called loongson.
+
+         If in doubt, say "N".
diff --git a/drivers/gpu/drm/loongson/Makefile b/drivers/gpu/drm/loongson/Makefile
new file mode 100644 (file)
index 0000000..91e72bd
--- /dev/null
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+loongson-y := \
+       lsdc_benchmark.o \
+       lsdc_crtc.o \
+       lsdc_debugfs.o \
+       lsdc_drv.o \
+       lsdc_gem.o \
+       lsdc_gfxpll.o \
+       lsdc_i2c.o \
+       lsdc_irq.o \
+       lsdc_output_7a1000.o \
+       lsdc_output_7a2000.o \
+       lsdc_plane.o \
+       lsdc_pixpll.o \
+       lsdc_probe.o \
+       lsdc_ttm.o
+
+loongson-y += loongson_device.o \
+             loongson_module.o
+
+obj-$(CONFIG_DRM_LOONGSON) += loongson.o
diff --git a/drivers/gpu/drm/loongson/loongson_device.c b/drivers/gpu/drm/loongson/loongson_device.c
new file mode 100644 (file)
index 0000000..9986c8a
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+
+#include "lsdc_drv.h"
+
+static const struct lsdc_kms_funcs ls7a1000_kms_funcs = {
+       .create_i2c = lsdc_create_i2c_chan,
+       .irq_handler = ls7a1000_dc_irq_handler,
+       .output_init = ls7a1000_output_init,
+       .cursor_plane_init = ls7a1000_cursor_plane_init,
+       .primary_plane_init = lsdc_primary_plane_init,
+       .crtc_init = ls7a1000_crtc_init,
+};
+
+static const struct lsdc_kms_funcs ls7a2000_kms_funcs = {
+       .create_i2c = lsdc_create_i2c_chan,
+       .irq_handler = ls7a2000_dc_irq_handler,
+       .output_init = ls7a2000_output_init,
+       .cursor_plane_init = ls7a2000_cursor_plane_init,
+       .primary_plane_init = lsdc_primary_plane_init,
+       .crtc_init = ls7a2000_crtc_init,
+};
+
+static const struct loongson_gfx_desc ls7a1000_gfx = {
+       .dc = {
+               .num_of_crtc = 2,
+               .max_pixel_clk = 200000,
+               .max_width = 2048,
+               .max_height = 2048,
+               .num_of_hw_cursor = 1,
+               .hw_cursor_w = 32,
+               .hw_cursor_h = 32,
+               .pitch_align = 256,
+               .has_vblank_counter = false,
+               .funcs = &ls7a1000_kms_funcs,
+       },
+       .conf_reg_base = LS7A1000_CONF_REG_BASE,
+       .gfxpll = {
+               .reg_offset = LS7A1000_PLL_GFX_REG,
+               .reg_size = 8,
+       },
+       .pixpll = {
+               [0] = {
+                       .reg_offset = LS7A1000_PIXPLL0_REG,
+                       .reg_size = 8,
+               },
+               [1] = {
+                       .reg_offset = LS7A1000_PIXPLL1_REG,
+                       .reg_size = 8,
+               },
+       },
+       .chip_id = CHIP_LS7A1000,
+       .model = "LS7A1000 bridge chipset",
+};
+
+static const struct loongson_gfx_desc ls7a2000_gfx = {
+       .dc = {
+               .num_of_crtc = 2,
+               .max_pixel_clk = 350000,
+               .max_width = 4096,
+               .max_height = 4096,
+               .num_of_hw_cursor = 2,
+               .hw_cursor_w = 64,
+               .hw_cursor_h = 64,
+               .pitch_align = 64,
+               .has_vblank_counter = true,
+               .funcs = &ls7a2000_kms_funcs,
+       },
+       .conf_reg_base = LS7A2000_CONF_REG_BASE,
+       .gfxpll = {
+               .reg_offset = LS7A2000_PLL_GFX_REG,
+               .reg_size = 8,
+       },
+       .pixpll = {
+               [0] = {
+                       .reg_offset = LS7A2000_PIXPLL0_REG,
+                       .reg_size = 8,
+               },
+               [1] = {
+                       .reg_offset = LS7A2000_PIXPLL1_REG,
+                       .reg_size = 8,
+               },
+       },
+       .chip_id = CHIP_LS7A2000,
+       .model = "LS7A2000 bridge chipset",
+};
+
+static const struct lsdc_desc *__chip_id_desc_table[] = {
+       [CHIP_LS7A1000] = &ls7a1000_gfx.dc,
+       [CHIP_LS7A2000] = &ls7a2000_gfx.dc,
+       [CHIP_LS_LAST] = NULL,
+};
+
+const struct lsdc_desc *
+lsdc_device_probe(struct pci_dev *pdev, enum loongson_chip_id chip_id)
+{
+       return __chip_id_desc_table[chip_id];
+}
diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c
new file mode 100644 (file)
index 0000000..d2a51bd
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+
+#include <video/nomodeset.h>
+
+#include "loongson_module.h"
+
+static int loongson_modeset = -1;
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, loongson_modeset, int, 0400);
+
+int loongson_vblank = 1;
+MODULE_PARM_DESC(vblank, "Disable/Enable hw vblank support");
+module_param_named(vblank, loongson_vblank, int, 0400);
+
+static int __init loongson_module_init(void)
+{
+       if (!loongson_modeset || video_firmware_drivers_only())
+               return -ENODEV;
+
+       return pci_register_driver(&lsdc_pci_driver);
+}
+module_init(loongson_module_init);
+
+static void __exit loongson_module_exit(void)
+{
+       pci_unregister_driver(&lsdc_pci_driver);
+}
+module_exit(loongson_module_exit);
diff --git a/drivers/gpu/drm/loongson/loongson_module.h b/drivers/gpu/drm/loongson/loongson_module.h
new file mode 100644 (file)
index 0000000..931c175
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LOONGSON_MODULE_H__
+#define __LOONGSON_MODULE_H__
+
+extern int loongson_vblank;
+extern struct pci_driver lsdc_pci_driver;
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.c b/drivers/gpu/drm/loongson/lsdc_benchmark.c
new file mode 100644 (file)
index 0000000..b088646
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_debugfs.h>
+
+#include "lsdc_benchmark.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+typedef void (*lsdc_copy_proc_t)(struct lsdc_bo *src_bo,
+                                struct lsdc_bo *dst_bo,
+                                unsigned int size,
+                                int n);
+
+static void lsdc_copy_gtt_to_vram_cpu(struct lsdc_bo *src_bo,
+                                     struct lsdc_bo *dst_bo,
+                                     unsigned int size,
+                                     int n)
+{
+       lsdc_bo_kmap(src_bo);
+       lsdc_bo_kmap(dst_bo);
+
+       while (n--)
+               memcpy_toio(dst_bo->kptr, src_bo->kptr, size);
+
+       lsdc_bo_kunmap(src_bo);
+       lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_copy_vram_to_gtt_cpu(struct lsdc_bo *src_bo,
+                                     struct lsdc_bo *dst_bo,
+                                     unsigned int size,
+                                     int n)
+{
+       lsdc_bo_kmap(src_bo);
+       lsdc_bo_kmap(dst_bo);
+
+       while (n--)
+               memcpy_fromio(dst_bo->kptr, src_bo->kptr, size);
+
+       lsdc_bo_kunmap(src_bo);
+       lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_copy_gtt_to_gtt_cpu(struct lsdc_bo *src_bo,
+                                    struct lsdc_bo *dst_bo,
+                                    unsigned int size,
+                                    int n)
+{
+       lsdc_bo_kmap(src_bo);
+       lsdc_bo_kmap(dst_bo);
+
+       while (n--)
+               memcpy(dst_bo->kptr, src_bo->kptr, size);
+
+       lsdc_bo_kunmap(src_bo);
+       lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_benchmark_copy(struct lsdc_device *ldev,
+                               unsigned int size,
+                               unsigned int n,
+                               u32 src_domain,
+                               u32 dst_domain,
+                               lsdc_copy_proc_t copy_proc,
+                               struct drm_printer *p)
+{
+       struct drm_device *ddev = &ldev->base;
+       struct lsdc_bo *src_bo;
+       struct lsdc_bo *dst_bo;
+       unsigned long start_jiffies;
+       unsigned long end_jiffies;
+       unsigned int throughput;
+       unsigned int time;
+
+       src_bo = lsdc_bo_create_kernel_pinned(ddev, src_domain, size);
+       dst_bo = lsdc_bo_create_kernel_pinned(ddev, dst_domain, size);
+
+       start_jiffies = jiffies;
+
+       copy_proc(src_bo, dst_bo, size, n);
+
+       end_jiffies = jiffies;
+
+       lsdc_bo_free_kernel_pinned(src_bo);
+       lsdc_bo_free_kernel_pinned(dst_bo);
+
+       time = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+       throughput = (n * (size >> 10)) / time;
+
+       drm_printf(p,
+                  "Copy bo of %uKiB %u times from %s to %s in %ums: %uMB/s\n",
+                  size >> 10, n,
+                  lsdc_domain_to_str(src_domain),
+                  lsdc_domain_to_str(dst_domain),
+                  time, throughput);
+}
+
+int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p)
+{
+       unsigned int buffer_size = 1920 * 1080 * 4;
+       unsigned int iteration = 60;
+
+       lsdc_benchmark_copy(ldev,
+                           buffer_size,
+                           iteration,
+                           LSDC_GEM_DOMAIN_GTT,
+                           LSDC_GEM_DOMAIN_GTT,
+                           lsdc_copy_gtt_to_gtt_cpu,
+                           p);
+
+       lsdc_benchmark_copy(ldev,
+                           buffer_size,
+                           iteration,
+                           LSDC_GEM_DOMAIN_GTT,
+                           LSDC_GEM_DOMAIN_VRAM,
+                           lsdc_copy_gtt_to_vram_cpu,
+                           p);
+
+       lsdc_benchmark_copy(ldev,
+                           buffer_size,
+                           iteration,
+                           LSDC_GEM_DOMAIN_VRAM,
+                           LSDC_GEM_DOMAIN_GTT,
+                           lsdc_copy_vram_to_gtt_cpu,
+                           p);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.h b/drivers/gpu/drm/loongson/lsdc_benchmark.h
new file mode 100644 (file)
index 0000000..3611027
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_BENCHMARK_H__
+#define __LSDC_BENCHMARK_H__
+
+#include "lsdc_drv.h"
+
+int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c
new file mode 100644 (file)
index 0000000..827acab
--- /dev/null
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * After the CRTC soft reset, the vblank counter would be reset to zero.
+ * But the address and other settings in the CRTC register remain the same
+ * as before.
+ */
+
+static void lsdc_crtc0_soft_reset(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+       val &= CFG_VALID_BITS_MASK;
+
+       /* Soft reset bit, active low */
+       val &= ~CFG_RESET_N;
+
+       val &= ~CFG_PIX_FMT_MASK;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+
+       udelay(1);
+
+       val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+
+       /* Wait about a vblank time */
+       mdelay(20);
+}
+
+static void lsdc_crtc1_soft_reset(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+       val &= CFG_VALID_BITS_MASK;
+
+       /* Soft reset bit, active low */
+       val &= ~CFG_RESET_N;
+
+       val &= ~CFG_PIX_FMT_MASK;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+
+       udelay(1);
+
+       val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+
+       /* Wait about a vblank time */
+       msleep(20);
+}
+
+static void lsdc_crtc0_enable(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+       /*
+        * This may happen in extremely rare cases, but a soft reset can
+        * bring it back to normal. We add a warning here, hoping to catch
+        * something if it happens.
+        */
+       if (val & CRTC_ANCHORED) {
+               drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
+               return lsdc_crtc0_soft_reset(lcrtc);
+       }
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val | CFG_OUTPUT_ENABLE);
+}
+
+static void lsdc_crtc0_disable(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_clr(ldev, LSDC_CRTC0_CFG_REG, CFG_OUTPUT_ENABLE);
+
+       udelay(9);
+}
+
+static void lsdc_crtc1_enable(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       /*
+        * This may happen in extremely rare cases, but a soft reset can
+        * bring it back to normal. We add a warning here, hoping to catch
+        * something if it happens.
+        */
+       val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+       if (val & CRTC_ANCHORED) {
+               drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
+               return lsdc_crtc1_soft_reset(lcrtc);
+       }
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val | CFG_OUTPUT_ENABLE);
+}
+
+static void lsdc_crtc1_disable(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_clr(ldev, LSDC_CRTC1_CFG_REG, CFG_OUTPUT_ENABLE);
+
+       udelay(9);
+}
+
+/* All Loongson display controllers have hardware scanout position recoders */
+
+static void lsdc_crtc0_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_CRTC0_SCAN_POS_REG);
+
+       *hpos = val >> 16;
+       *vpos = val & 0xffff;
+}
+
+static void lsdc_crtc1_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_CRTC1_SCAN_POS_REG);
+
+       *hpos = val >> 16;
+       *vpos = val & 0xffff;
+}
+
+static void lsdc_crtc0_enable_vblank(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
+}
+
+static void lsdc_crtc0_disable_vblank(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
+}
+
+static void lsdc_crtc1_enable_vblank(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
+}
+
+static void lsdc_crtc1_disable_vblank(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
+}
+
+static void lsdc_crtc0_flip(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_PAGE_FLIP);
+}
+
+static void lsdc_crtc1_flip(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_PAGE_FLIP);
+}
+
+/*
+ * CRTC0 clone from CRTC1 or CRTC1 clone from CRTC0 using hardware logic
+ * This may be useful for custom cloning (TWIN) applications. Saving the
+ * bandwidth compared with the clone (mirroring) display mode provided by
+ * drm core.
+ */
+
+static void lsdc_crtc0_clone(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_HW_CLONE);
+}
+
+static void lsdc_crtc1_clone(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_HW_CLONE);
+}
+
+static void lsdc_crtc0_set_mode(struct lsdc_crtc *lcrtc,
+                               const struct drm_display_mode *mode)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_HDISPLAY_REG,
+                   (mode->crtc_htotal << 16) | mode->crtc_hdisplay);
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_VDISPLAY_REG,
+                   (mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_HSYNC_REG,
+                   (mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_VSYNC_REG,
+                   (mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
+}
+
+static void lsdc_crtc1_set_mode(struct lsdc_crtc *lcrtc,
+                               const struct drm_display_mode *mode)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_HDISPLAY_REG,
+                   (mode->crtc_htotal << 16) | mode->crtc_hdisplay);
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_VDISPLAY_REG,
+                   (mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_HSYNC_REG,
+                   (mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_VSYNC_REG,
+                   (mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
+}
+
+/*
+ * This is required for S3 support.
+ * After resuming from suspend, LSDC_CRTCx_CFG_REG (x = 0 or 1) is filled
+ * with garbage value, which causes the CRTC hang there.
+ *
+ * This function provides minimal settings for the affected registers.
+ * This overrides the firmware's settings on startup, making the CRTC work
+ * on our own, similar to the functional of GPU POST (Power On Self Test).
+ * Only touch CRTC hardware-related parts.
+ */
+
+static void lsdc_crtc0_reset(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
+}
+
+static void lsdc_crtc1_reset(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
+}
+
+static const struct lsdc_crtc_hw_ops ls7a1000_crtc_hw_ops[2] = {
+       {
+               .enable = lsdc_crtc0_enable,
+               .disable = lsdc_crtc0_disable,
+               .enable_vblank = lsdc_crtc0_enable_vblank,
+               .disable_vblank = lsdc_crtc0_disable_vblank,
+               .flip = lsdc_crtc0_flip,
+               .clone = lsdc_crtc0_clone,
+               .set_mode = lsdc_crtc0_set_mode,
+               .get_scan_pos = lsdc_crtc0_scan_pos,
+               .soft_reset = lsdc_crtc0_soft_reset,
+               .reset = lsdc_crtc0_reset,
+       },
+       {
+               .enable = lsdc_crtc1_enable,
+               .disable = lsdc_crtc1_disable,
+               .enable_vblank = lsdc_crtc1_enable_vblank,
+               .disable_vblank = lsdc_crtc1_disable_vblank,
+               .flip = lsdc_crtc1_flip,
+               .clone = lsdc_crtc1_clone,
+               .set_mode = lsdc_crtc1_set_mode,
+               .get_scan_pos = lsdc_crtc1_scan_pos,
+               .soft_reset = lsdc_crtc1_soft_reset,
+               .reset = lsdc_crtc1_reset,
+       },
+};
+
+/*
+ * The 32-bit hardware vblank counter has been available since LS7A2000
+ * and LS2K2000. The counter increases even though the CRTC is disabled,
+ * it will be reset only if the CRTC is being soft reset.
+ * Those registers are also readable for ls7a1000, but its value does not
+ * change.
+ */
+
+static u32 lsdc_crtc0_get_vblank_count(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       return lsdc_rreg32(ldev, LSDC_CRTC0_VSYNC_COUNTER_REG);
+}
+
+static u32 lsdc_crtc1_get_vblank_count(struct lsdc_crtc *lcrtc)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+
+       return lsdc_rreg32(ldev, LSDC_CRTC1_VSYNC_COUNTER_REG);
+}
+
+/*
+ * The DMA step bit fields are available since LS7A2000/LS2K2000, for
+ * supporting odd resolutions. But a large DMA step save the bandwidth.
+ * The larger, the better. Behavior of writing those bits on LS7A1000
+ * or LS2K1000 is underfined.
+ */
+
+static void lsdc_crtc0_set_dma_step(struct lsdc_crtc *lcrtc,
+                                   enum lsdc_dma_steps dma_step)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+       val &= ~CFG_DMA_STEP_MASK;
+       val |= dma_step << CFG_DMA_STEP_SHIFT;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+}
+
+static void lsdc_crtc1_set_dma_step(struct lsdc_crtc *lcrtc,
+                                   enum lsdc_dma_steps dma_step)
+{
+       struct lsdc_device *ldev = lcrtc->ldev;
+       u32 val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+       val &= ~CFG_DMA_STEP_MASK;
+       val |= dma_step << CFG_DMA_STEP_SHIFT;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+}
+
+static const struct lsdc_crtc_hw_ops ls7a2000_crtc_hw_ops[2] = {
+       {
+               .enable = lsdc_crtc0_enable,
+               .disable = lsdc_crtc0_disable,
+               .enable_vblank = lsdc_crtc0_enable_vblank,
+               .disable_vblank = lsdc_crtc0_disable_vblank,
+               .flip = lsdc_crtc0_flip,
+               .clone = lsdc_crtc0_clone,
+               .set_mode = lsdc_crtc0_set_mode,
+               .soft_reset = lsdc_crtc0_soft_reset,
+               .get_scan_pos = lsdc_crtc0_scan_pos,
+               .set_dma_step = lsdc_crtc0_set_dma_step,
+               .get_vblank_counter = lsdc_crtc0_get_vblank_count,
+               .reset = lsdc_crtc0_reset,
+       },
+       {
+               .enable = lsdc_crtc1_enable,
+               .disable = lsdc_crtc1_disable,
+               .enable_vblank = lsdc_crtc1_enable_vblank,
+               .disable_vblank = lsdc_crtc1_disable_vblank,
+               .flip = lsdc_crtc1_flip,
+               .clone = lsdc_crtc1_clone,
+               .set_mode = lsdc_crtc1_set_mode,
+               .get_scan_pos = lsdc_crtc1_scan_pos,
+               .soft_reset = lsdc_crtc1_soft_reset,
+               .set_dma_step = lsdc_crtc1_set_dma_step,
+               .get_vblank_counter = lsdc_crtc1_get_vblank_count,
+               .reset = lsdc_crtc1_reset,
+       },
+};
+
+static void lsdc_crtc_reset(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+       struct lsdc_crtc_state *priv_crtc_state;
+
+       if (crtc->state)
+               crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+       priv_crtc_state = kzalloc(sizeof(*priv_crtc_state), GFP_KERNEL);
+
+       if (!priv_crtc_state)
+               __drm_atomic_helper_crtc_reset(crtc, NULL);
+       else
+               __drm_atomic_helper_crtc_reset(crtc, &priv_crtc_state->base);
+
+       /* Reset the CRTC hardware, this is required for S3 support */
+       ops->reset(lcrtc);
+}
+
+static void lsdc_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+                                          struct drm_crtc_state *state)
+{
+       struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+
+       __drm_atomic_helper_crtc_destroy_state(&priv_state->base);
+
+       kfree(priv_state);
+}
+
+static struct drm_crtc_state *
+lsdc_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc_state *new_priv_state;
+       struct lsdc_crtc_state *old_priv_state;
+
+       new_priv_state = kzalloc(sizeof(*new_priv_state), GFP_KERNEL);
+       if (!new_priv_state)
+               return NULL;
+
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &new_priv_state->base);
+
+       old_priv_state = to_lsdc_crtc_state(crtc->state);
+
+       memcpy(&new_priv_state->pparms, &old_priv_state->pparms,
+              sizeof(new_priv_state->pparms));
+
+       return &new_priv_state->base;
+}
+
+static u32 lsdc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+       /* 32-bit hardware vblank counter */
+       return lcrtc->hw_ops->get_vblank_counter(lcrtc);
+}
+
+static int lsdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+       if (!lcrtc->has_vblank)
+               return -EINVAL;
+
+       lcrtc->hw_ops->enable_vblank(lcrtc);
+
+       return 0;
+}
+
+static void lsdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+       if (!lcrtc->has_vblank)
+               return;
+
+       lcrtc->hw_ops->disable_vblank(lcrtc);
+}
+
+/*
+ * CRTC related debugfs
+ * Primary planes and cursor planes belong to the CRTC as well.
+ * For the sake of convenience, plane-related registers are also add here.
+ */
+
+#define REG_DEF(reg) { \
+       .name = __stringify_1(LSDC_##reg##_REG), \
+       .offset = LSDC_##reg##_REG, \
+}
+
+static const struct lsdc_reg32 lsdc_crtc_regs_array[2][21] = {
+       [0] = {
+               REG_DEF(CRTC0_CFG),
+               REG_DEF(CRTC0_FB_ORIGIN),
+               REG_DEF(CRTC0_DVO_CONF),
+               REG_DEF(CRTC0_HDISPLAY),
+               REG_DEF(CRTC0_HSYNC),
+               REG_DEF(CRTC0_VDISPLAY),
+               REG_DEF(CRTC0_VSYNC),
+               REG_DEF(CRTC0_GAMMA_INDEX),
+               REG_DEF(CRTC0_GAMMA_DATA),
+               REG_DEF(CRTC0_SYNC_DEVIATION),
+               REG_DEF(CRTC0_VSYNC_COUNTER),
+               REG_DEF(CRTC0_SCAN_POS),
+               REG_DEF(CRTC0_STRIDE),
+               REG_DEF(CRTC0_FB1_ADDR_HI),
+               REG_DEF(CRTC0_FB1_ADDR_LO),
+               REG_DEF(CRTC0_FB0_ADDR_HI),
+               REG_DEF(CRTC0_FB0_ADDR_LO),
+               REG_DEF(CURSOR0_CFG),
+               REG_DEF(CURSOR0_POSITION),
+               REG_DEF(CURSOR0_BG_COLOR),
+               REG_DEF(CURSOR0_FG_COLOR),
+       },
+       [1] = {
+               REG_DEF(CRTC1_CFG),
+               REG_DEF(CRTC1_FB_ORIGIN),
+               REG_DEF(CRTC1_DVO_CONF),
+               REG_DEF(CRTC1_HDISPLAY),
+               REG_DEF(CRTC1_HSYNC),
+               REG_DEF(CRTC1_VDISPLAY),
+               REG_DEF(CRTC1_VSYNC),
+               REG_DEF(CRTC1_GAMMA_INDEX),
+               REG_DEF(CRTC1_GAMMA_DATA),
+               REG_DEF(CRTC1_SYNC_DEVIATION),
+               REG_DEF(CRTC1_VSYNC_COUNTER),
+               REG_DEF(CRTC1_SCAN_POS),
+               REG_DEF(CRTC1_STRIDE),
+               REG_DEF(CRTC1_FB1_ADDR_HI),
+               REG_DEF(CRTC1_FB1_ADDR_LO),
+               REG_DEF(CRTC1_FB0_ADDR_HI),
+               REG_DEF(CRTC1_FB0_ADDR_LO),
+               REG_DEF(CURSOR1_CFG),
+               REG_DEF(CURSOR1_POSITION),
+               REG_DEF(CURSOR1_BG_COLOR),
+               REG_DEF(CURSOR1_FG_COLOR),
+       },
+};
+
+static int lsdc_crtc_show_regs(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+       struct lsdc_device *ldev = lcrtc->ldev;
+       unsigned int i;
+
+       for (i = 0; i < lcrtc->nreg; i++) {
+               const struct lsdc_reg32 *preg = &lcrtc->preg[i];
+               u32 offset = preg->offset;
+
+               seq_printf(m, "%s (0x%04x): 0x%08x\n",
+                          preg->name, offset, lsdc_rreg32(ldev, offset));
+       }
+
+       return 0;
+}
+
+static int lsdc_crtc_show_scan_position(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+       int x, y;
+
+       lcrtc->hw_ops->get_scan_pos(lcrtc, &x, &y);
+       seq_printf(m, "Scanout position: x: %08u, y: %08u\n", x, y);
+
+       return 0;
+}
+
+static int lsdc_crtc_show_vblank_counter(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+
+       if (lcrtc->hw_ops->get_vblank_counter)
+               seq_printf(m, "%s vblank counter: %08u\n\n", lcrtc->base.name,
+                          lcrtc->hw_ops->get_vblank_counter(lcrtc));
+
+       return 0;
+}
+
+static int lsdc_pixpll_show_clock(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+       struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+       const struct lsdc_pixpll_funcs *funcs = pixpll->funcs;
+       struct drm_crtc *crtc = &lcrtc->base;
+       struct drm_display_mode *mode = &crtc->state->mode;
+       struct drm_printer printer = drm_seq_file_printer(m);
+       unsigned int out_khz;
+
+       out_khz = funcs->get_rate(pixpll);
+
+       seq_printf(m, "%s: %dx%d@%d\n", crtc->name,
+                  mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode));
+
+       seq_printf(m, "Pixel clock required: %d kHz\n", mode->clock);
+       seq_printf(m, "Actual frequency output: %u kHz\n", out_khz);
+       seq_printf(m, "Diff: %d kHz\n", out_khz - mode->clock);
+
+       funcs->print(pixpll, &printer);
+
+       return 0;
+}
+
+static struct drm_info_list lsdc_crtc_debugfs_list[2][4] = {
+       [0] = {
+               { "regs", lsdc_crtc_show_regs, 0, NULL },
+               { "pixclk", lsdc_pixpll_show_clock, 0, NULL },
+               { "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
+               { "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
+       },
+       [1] = {
+               { "regs", lsdc_crtc_show_regs, 0, NULL },
+               { "pixclk", lsdc_pixpll_show_clock, 0, NULL },
+               { "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
+               { "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
+       },
+};
+
+/* operate manually */
+
+static int lsdc_crtc_man_op_show(struct seq_file *m, void *data)
+{
+       seq_puts(m, "soft_reset: soft reset this CRTC\n");
+       seq_puts(m, "enable: enable this CRTC\n");
+       seq_puts(m, "disable: disable this CRTC\n");
+       seq_puts(m, "flip: trigger the page flip\n");
+       seq_puts(m, "clone: clone the another crtc with hardware logic\n");
+
+       return 0;
+}
+
+static int lsdc_crtc_man_op_open(struct inode *inode, struct file *file)
+{
+       struct drm_crtc *crtc = inode->i_private;
+
+       return single_open(file, lsdc_crtc_man_op_show, crtc);
+}
+
+static ssize_t lsdc_crtc_man_op_write(struct file *file,
+                                     const char __user *ubuf,
+                                     size_t len,
+                                     loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct lsdc_crtc *lcrtc = m->private;
+       const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+       char buf[16];
+
+       if (len > sizeof(buf) - 1)
+               return -EINVAL;
+
+       if (copy_from_user(buf, ubuf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+
+       if (sysfs_streq(buf, "soft_reset"))
+               ops->soft_reset(lcrtc);
+       else if (sysfs_streq(buf, "enable"))
+               ops->enable(lcrtc);
+       else if (sysfs_streq(buf, "disable"))
+               ops->disable(lcrtc);
+       else if (sysfs_streq(buf, "flip"))
+               ops->flip(lcrtc);
+       else if (sysfs_streq(buf, "clone"))
+               ops->clone(lcrtc);
+
+       return len;
+}
+
+static const struct file_operations lsdc_crtc_man_op_fops = {
+       .owner = THIS_MODULE,
+       .open = lsdc_crtc_man_op_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = lsdc_crtc_man_op_write,
+};
+
+static int lsdc_crtc_late_register(struct drm_crtc *crtc)
+{
+       struct lsdc_display_pipe *dispipe = crtc_to_display_pipe(crtc);
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       struct drm_minor *minor = crtc->dev->primary;
+       unsigned int index = dispipe->index;
+       unsigned int i;
+
+       lcrtc->preg = lsdc_crtc_regs_array[index];
+       lcrtc->nreg = ARRAY_SIZE(lsdc_crtc_regs_array[index]);
+       lcrtc->p_info_list = lsdc_crtc_debugfs_list[index];
+       lcrtc->n_info_list = ARRAY_SIZE(lsdc_crtc_debugfs_list[index]);
+
+       for (i = 0; i < lcrtc->n_info_list; ++i)
+               lcrtc->p_info_list[i].data = lcrtc;
+
+       drm_debugfs_create_files(lcrtc->p_info_list, lcrtc->n_info_list,
+                                crtc->debugfs_entry, minor);
+
+       /* Manual operations supported */
+       debugfs_create_file("ops", 0644, crtc->debugfs_entry, lcrtc,
+                           &lsdc_crtc_man_op_fops);
+
+       return 0;
+}
+
+static void lsdc_crtc_atomic_print_state(struct drm_printer *p,
+                                        const struct drm_crtc_state *state)
+{
+       const struct lsdc_crtc_state *priv_state;
+       const struct lsdc_pixpll_parms *pparms;
+
+       priv_state = container_of_const(state, struct lsdc_crtc_state, base);
+       pparms = &priv_state->pparms;
+
+       drm_printf(p, "\tInput clock divider = %u\n", pparms->div_ref);
+       drm_printf(p, "\tMedium clock multiplier = %u\n", pparms->loopc);
+       drm_printf(p, "\tOutput clock divider = %u\n", pparms->div_out);
+}
+
+static const struct drm_crtc_funcs ls7a1000_crtc_funcs = {
+       .reset = lsdc_crtc_reset,
+       .destroy = drm_crtc_cleanup,
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       .atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
+       .atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
+       .late_register = lsdc_crtc_late_register,
+       .enable_vblank = lsdc_crtc_enable_vblank,
+       .disable_vblank = lsdc_crtc_disable_vblank,
+       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+       .atomic_print_state = lsdc_crtc_atomic_print_state,
+};
+
+static const struct drm_crtc_funcs ls7a2000_crtc_funcs = {
+       .reset = lsdc_crtc_reset,
+       .destroy = drm_crtc_cleanup,
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       .atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
+       .atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
+       .late_register = lsdc_crtc_late_register,
+       .get_vblank_counter = lsdc_crtc_get_vblank_counter,
+       .enable_vblank = lsdc_crtc_enable_vblank,
+       .disable_vblank = lsdc_crtc_disable_vblank,
+       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+       .atomic_print_state = lsdc_crtc_atomic_print_state,
+};
+
+static enum drm_mode_status
+lsdc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+       struct drm_device *ddev = crtc->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       const struct lsdc_desc *descp = ldev->descp;
+       unsigned int pitch;
+
+       if (mode->hdisplay > descp->max_width)
+               return MODE_BAD_HVALUE;
+
+       if (mode->vdisplay > descp->max_height)
+               return MODE_BAD_VVALUE;
+
+       if (mode->clock > descp->max_pixel_clk) {
+               drm_dbg_kms(ddev, "mode %dx%d, pixel clock=%d is too high\n",
+                           mode->hdisplay, mode->vdisplay, mode->clock);
+               return MODE_CLOCK_HIGH;
+       }
+
+       /* 4 for DRM_FORMAT_XRGB8888 */
+       pitch = mode->hdisplay * 4;
+
+       if (pitch % descp->pitch_align) {
+               drm_dbg_kms(ddev, "align to %u bytes is required: %u\n",
+                           descp->pitch_align, pitch);
+               return MODE_BAD_WIDTH;
+       }
+
+       return MODE_OK;
+}
+
+static int lsdc_pixpll_atomic_check(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *state)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+       const struct lsdc_pixpll_funcs *pfuncs = pixpll->funcs;
+       struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+       unsigned int clock = state->mode.clock;
+       int ret;
+
+       ret = pfuncs->compute(pixpll, clock, &priv_state->pparms);
+       if (ret) {
+               drm_warn(crtc->dev, "Failed to find PLL params for %ukHz\n",
+                        clock);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int lsdc_crtc_helper_atomic_check(struct drm_crtc *crtc,
+                                        struct drm_atomic_state *state)
+{
+       struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+       if (!crtc_state->enable)
+               return 0;
+
+       return lsdc_pixpll_atomic_check(crtc, crtc_state);
+}
+
+static void lsdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       const struct lsdc_crtc_hw_ops *crtc_hw_ops = lcrtc->hw_ops;
+       struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+       const struct lsdc_pixpll_funcs *pixpll_funcs = pixpll->funcs;
+       struct drm_crtc_state *state = crtc->state;
+       struct drm_display_mode *mode = &state->mode;
+       struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+
+       pixpll_funcs->update(pixpll, &priv_state->pparms);
+
+       if (crtc_hw_ops->set_dma_step) {
+               unsigned int width_in_bytes = mode->hdisplay * 4;
+               enum lsdc_dma_steps dma_step;
+
+               /*
+                * Using DMA step as large as possible, for improving
+                * hardware DMA efficiency.
+                */
+               if (width_in_bytes % 256 == 0)
+                       dma_step = LSDC_DMA_STEP_256_BYTES;
+               else if (width_in_bytes % 128 == 0)
+                       dma_step = LSDC_DMA_STEP_128_BYTES;
+               else if (width_in_bytes % 64 == 0)
+                       dma_step = LSDC_DMA_STEP_64_BYTES;
+               else  /* width_in_bytes % 32 == 0 */
+                       dma_step = LSDC_DMA_STEP_32_BYTES;
+
+               crtc_hw_ops->set_dma_step(lcrtc, dma_step);
+       }
+
+       crtc_hw_ops->set_mode(lcrtc, mode);
+}
+
+static void lsdc_crtc_send_vblank(struct drm_crtc *crtc)
+{
+       struct drm_device *ddev = crtc->dev;
+       unsigned long flags;
+
+       if (!crtc->state || !crtc->state->event)
+               return;
+
+       drm_dbg(ddev, "Send vblank manually\n");
+
+       spin_lock_irqsave(&ddev->event_lock, flags);
+       drm_crtc_send_vblank_event(crtc, crtc->state->event);
+       crtc->state->event = NULL;
+       spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void lsdc_crtc_atomic_enable(struct drm_crtc *crtc,
+                                   struct drm_atomic_state *state)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+       if (lcrtc->has_vblank)
+               drm_crtc_vblank_on(crtc);
+
+       lcrtc->hw_ops->enable(lcrtc);
+}
+
+static void lsdc_crtc_atomic_disable(struct drm_crtc *crtc,
+                                    struct drm_atomic_state *state)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+       if (lcrtc->has_vblank)
+               drm_crtc_vblank_off(crtc);
+
+       lcrtc->hw_ops->disable(lcrtc);
+
+       /*
+        * Make sure we issue a vblank event after disabling the CRTC if
+        * someone was waiting it.
+        */
+       lsdc_crtc_send_vblank(crtc);
+}
+
+static void lsdc_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_atomic_state *state)
+{
+       spin_lock_irq(&crtc->dev->event_lock);
+       if (crtc->state->event) {
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+               else
+                       drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+       }
+       spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool lsdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+                                          bool in_vblank_irq,
+                                          int *vpos,
+                                          int *hpos,
+                                          ktime_t *stime,
+                                          ktime_t *etime,
+                                          const struct drm_display_mode *mode)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+       int vsw, vbp, vactive_start, vactive_end, vfp_end;
+       int x, y;
+
+       vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+       vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+       vactive_start = vsw + vbp + 1;
+       vactive_end = vactive_start + mode->crtc_vdisplay;
+
+       /* last scan line before VSYNC */
+       vfp_end = mode->crtc_vtotal;
+
+       if (stime)
+               *stime = ktime_get();
+
+       ops->get_scan_pos(lcrtc, &x, &y);
+
+       if (y > vactive_end)
+               y = y - vfp_end - vactive_start;
+       else
+               y -= vactive_start;
+
+       *vpos = y;
+       *hpos = 0;
+
+       if (etime)
+               *etime = ktime_get();
+
+       return true;
+}
+
+static const struct drm_crtc_helper_funcs lsdc_crtc_helper_funcs = {
+       .mode_valid = lsdc_crtc_mode_valid,
+       .mode_set_nofb = lsdc_crtc_mode_set_nofb,
+       .atomic_enable = lsdc_crtc_atomic_enable,
+       .atomic_disable = lsdc_crtc_atomic_disable,
+       .atomic_check = lsdc_crtc_helper_atomic_check,
+       .atomic_flush = lsdc_crtc_atomic_flush,
+       .get_scanout_position = lsdc_crtc_get_scanout_position,
+};
+
+int ls7a1000_crtc_init(struct drm_device *ddev,
+                      struct drm_crtc *crtc,
+                      struct drm_plane *primary,
+                      struct drm_plane *cursor,
+                      unsigned int index,
+                      bool has_vblank)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       int ret;
+
+       ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
+       if (ret) {
+               drm_err(ddev, "pixel pll init failed: %d\n", ret);
+               return ret;
+       }
+
+       lcrtc->ldev = to_lsdc(ddev);
+       lcrtc->has_vblank = has_vblank;
+       lcrtc->hw_ops = &ls7a1000_crtc_hw_ops[index];
+
+       ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
+                                       &ls7a1000_crtc_funcs,
+                                       "LS-CRTC-%d", index);
+       if (ret) {
+               drm_err(ddev, "crtc init with planes failed: %d\n", ret);
+               return ret;
+       }
+
+       drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
+
+       ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+       if (ret)
+               return ret;
+
+       drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+
+       return 0;
+}
+
+int ls7a2000_crtc_init(struct drm_device *ddev,
+                      struct drm_crtc *crtc,
+                      struct drm_plane *primary,
+                      struct drm_plane *cursor,
+                      unsigned int index,
+                      bool has_vblank)
+{
+       struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+       int ret;
+
+       ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
+       if (ret) {
+               drm_err(ddev, "crtc init with pll failed: %d\n", ret);
+               return ret;
+       }
+
+       lcrtc->ldev = to_lsdc(ddev);
+       lcrtc->has_vblank = has_vblank;
+       lcrtc->hw_ops = &ls7a2000_crtc_hw_ops[index];
+
+       ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
+                                       &ls7a2000_crtc_funcs,
+                                       "LS-CRTC-%u", index);
+       if (ret) {
+               drm_err(ddev, "crtc init with planes failed: %d\n", ret);
+               return ret;
+       }
+
+       drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
+
+       ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+       if (ret)
+               return ret;
+
+       drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_debugfs.c b/drivers/gpu/drm/loongson/lsdc_debugfs.c
new file mode 100644 (file)
index 0000000..b9c2e6b
--- /dev/null
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_debugfs.h>
+
+#include "lsdc_benchmark.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_probe.h"
+#include "lsdc_ttm.h"
+
+/* device level debugfs */
+
+static int lsdc_identify(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+       const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
+       u8 impl, rev;
+
+       loongson_cpu_get_prid(&impl, &rev);
+
+       seq_printf(m, "Running on cpu 0x%x, cpu revision: 0x%x\n",
+                  impl, rev);
+
+       seq_printf(m, "Contained in: %s\n", gfx->model);
+
+       return 0;
+}
+
+static int lsdc_show_mm(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *ddev = node->minor->dev;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       drm_mm_print(&ddev->vma_offset_manager->vm_addr_space_mm, &p);
+
+       return 0;
+}
+
+static int lsdc_show_gfxpll_clock(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+       struct drm_printer printer = drm_seq_file_printer(m);
+       struct loongson_gfxpll *gfxpll = ldev->gfxpll;
+
+       gfxpll->funcs->print(gfxpll, &printer, true);
+
+       return 0;
+}
+
+static int lsdc_show_benchmark(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+       struct drm_printer printer = drm_seq_file_printer(m);
+
+       lsdc_show_benchmark_copy(ldev, &printer);
+
+       return 0;
+}
+
+static int lsdc_pdev_enable_io_mem(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+       u16 cmd;
+
+       pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
+
+       seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
+
+       cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+
+       pci_write_config_word(ldev->dc, PCI_COMMAND, cmd);
+
+       pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
+
+       seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
+
+       return 0;
+}
+
+static struct drm_info_list lsdc_debugfs_list[] = {
+       { "benchmark",   lsdc_show_benchmark, 0, NULL },
+       { "bos",         lsdc_show_buffer_object, 0, NULL },
+       { "chips",       lsdc_identify, 0, NULL },
+       { "clocks",      lsdc_show_gfxpll_clock, 0, NULL },
+       { "dc_enable",   lsdc_pdev_enable_io_mem, 0, NULL },
+       { "mm",          lsdc_show_mm, 0, NULL },
+};
+
+void lsdc_debugfs_init(struct drm_minor *minor)
+{
+       struct drm_device *ddev = minor->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       unsigned int n = ARRAY_SIZE(lsdc_debugfs_list);
+       unsigned int i;
+
+       for (i = 0; i < n; ++i)
+               lsdc_debugfs_list[i].data = ldev;
+
+       drm_debugfs_create_files(lsdc_debugfs_list, n, minor->debugfs_root, minor);
+
+       lsdc_ttm_debugfs_init(ldev);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
new file mode 100644 (file)
index 0000000..188ec82
--- /dev/null
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "loongson_module.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+#define DRIVER_AUTHOR               "Sui Jingfeng <suijingfeng@loongson.cn>"
+#define DRIVER_NAME                 "loongson"
+#define DRIVER_DESC                 "drm driver for loongson graphics"
+#define DRIVER_DATE                 "20220701"
+#define DRIVER_MAJOR                1
+#define DRIVER_MINOR                0
+#define DRIVER_PATCHLEVEL           0
+
+DEFINE_DRM_GEM_FOPS(lsdc_gem_fops);
+
+static const struct drm_driver lsdc_drm_driver = {
+       .driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_GEM | DRIVER_ATOMIC,
+       .fops = &lsdc_gem_fops,
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+
+       .debugfs_init = lsdc_debugfs_init,
+       .dumb_create = lsdc_dumb_create,
+       .dumb_map_offset = lsdc_dumb_map_offset,
+       .gem_prime_import_sg_table = lsdc_prime_import_sg_table,
+};
+
+static const struct drm_mode_config_funcs lsdc_mode_config_funcs = {
+       .fb_create = drm_gem_fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+/* Display related */
+
+static int lsdc_modeset_init(struct lsdc_device *ldev,
+                            unsigned int num_crtc,
+                            const struct lsdc_kms_funcs *funcs,
+                            bool has_vblank)
+{
+       struct drm_device *ddev = &ldev->base;
+       struct lsdc_display_pipe *dispipe;
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < num_crtc; i++) {
+               dispipe = &ldev->dispipe[i];
+
+               /* We need an index before crtc is initialized */
+               dispipe->index = i;
+
+               ret = funcs->create_i2c(ddev, dispipe, i);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < num_crtc; i++) {
+               struct i2c_adapter *ddc = NULL;
+
+               dispipe = &ldev->dispipe[i];
+               if (dispipe->li2c)
+                       ddc = &dispipe->li2c->adapter;
+
+               ret = funcs->output_init(ddev, dispipe, ddc, i);
+               if (ret)
+                       return ret;
+
+               ldev->num_output++;
+       }
+
+       for (i = 0; i < num_crtc; i++) {
+               dispipe = &ldev->dispipe[i];
+
+               ret = funcs->primary_plane_init(ddev, &dispipe->primary.base, i);
+               if (ret)
+                       return ret;
+
+               ret = funcs->cursor_plane_init(ddev, &dispipe->cursor.base, i);
+               if (ret)
+                       return ret;
+
+               ret = funcs->crtc_init(ddev, &dispipe->crtc.base,
+                                      &dispipe->primary.base,
+                                      &dispipe->cursor.base,
+                                      i, has_vblank);
+               if (ret)
+                       return ret;
+       }
+
+       drm_info(ddev, "Total %u outputs\n", ldev->num_output);
+
+       return 0;
+}
+
+static const struct drm_mode_config_helper_funcs lsdc_mode_config_helper_funcs = {
+       .atomic_commit_tail = drm_atomic_helper_commit_tail,
+};
+
+static int lsdc_mode_config_init(struct drm_device *ddev,
+                                const struct lsdc_desc *descp)
+{
+       int ret;
+
+       ret = drmm_mode_config_init(ddev);
+       if (ret)
+               return ret;
+
+       ddev->mode_config.funcs = &lsdc_mode_config_funcs;
+       ddev->mode_config.min_width = 1;
+       ddev->mode_config.min_height = 1;
+       ddev->mode_config.max_width = descp->max_width * LSDC_NUM_CRTC;
+       ddev->mode_config.max_height = descp->max_height * LSDC_NUM_CRTC;
+       ddev->mode_config.preferred_depth = 24;
+       ddev->mode_config.prefer_shadow = 1;
+
+       ddev->mode_config.cursor_width = descp->hw_cursor_h;
+       ddev->mode_config.cursor_height = descp->hw_cursor_h;
+
+       ddev->mode_config.helper_private = &lsdc_mode_config_helper_funcs;
+
+       if (descp->has_vblank_counter)
+               ddev->max_vblank_count = 0xffffffff;
+
+       return ret;
+}
+
+/*
+ * The GPU and display controller in the LS7A1000/LS7A2000/LS2K2000 are
+ * separated PCIE devices. They are two devices, not one. Bar 2 of the GPU
+ * device contains the base address and size of the VRAM, both the GPU and
+ * the DC could access the on-board VRAM.
+ */
+static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
+                                  struct pci_dev *pdev_dc,
+                                  const struct lsdc_desc *descp)
+{
+       struct drm_device *ddev = &ldev->base;
+       struct pci_dev *pdev_gpu;
+       resource_size_t base, size;
+
+       /*
+        * The GPU has 00:06.0 as its BDF, while the DC has 00:06.1
+        * This is true for the LS7A1000, LS7A2000 and LS2K2000.
+        */
+       pdev_gpu = pci_get_domain_bus_and_slot(pci_domain_nr(pdev_dc->bus),
+                                              pdev_dc->bus->number,
+                                              PCI_DEVFN(6, 0));
+       if (!pdev_gpu) {
+               drm_err(ddev, "No GPU device, then no VRAM\n");
+               return -ENODEV;
+       }
+
+       base = pci_resource_start(pdev_gpu, 2);
+       size = pci_resource_len(pdev_gpu, 2);
+
+       ldev->vram_base = base;
+       ldev->vram_size = size;
+       ldev->gpu = pdev_gpu;
+
+       drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
+                (u64)base, (u32)(size >> 20));
+
+       return 0;
+}
+
+static struct lsdc_device *
+lsdc_create_device(struct pci_dev *pdev,
+                  const struct lsdc_desc *descp,
+                  const struct drm_driver *driver)
+{
+       struct lsdc_device *ldev;
+       struct drm_device *ddev;
+       int ret;
+
+       ldev = devm_drm_dev_alloc(&pdev->dev, driver, struct lsdc_device, base);
+       if (IS_ERR(ldev))
+               return ldev;
+
+       ldev->dc = pdev;
+       ldev->descp = descp;
+
+       ddev = &ldev->base;
+
+       loongson_gfxpll_create(ddev, &ldev->gfxpll);
+
+       ret = lsdc_get_dedicated_vram(ldev, pdev, descp);
+       if (ret) {
+               drm_err(ddev, "Init VRAM failed: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base,
+                                                          ldev->vram_size,
+                                                          driver);
+       if (ret) {
+               drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       ret = lsdc_ttm_init(ldev);
+       if (ret) {
+               drm_err(ddev, "Memory manager init failed: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       lsdc_gem_init(ddev);
+
+       /* Bar 0 of the DC device contains the MMIO register's base address */
+       ldev->reg_base = pcim_iomap(pdev, 0, 0);
+       if (!ldev->reg_base)
+               return ERR_PTR(-ENODEV);
+
+       spin_lock_init(&ldev->reglock);
+
+       ret = lsdc_mode_config_init(ddev, descp);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ret = lsdc_modeset_init(ldev, descp->num_of_crtc, descp->funcs,
+                               loongson_vblank);
+       if (ret)
+               return ERR_PTR(ret);
+
+       drm_mode_config_reset(ddev);
+
+       return ldev;
+}
+
+/* For multiple GPU driver instance co-exixt in the system */
+
+static unsigned int lsdc_vga_set_decode(struct pci_dev *pdev, bool state)
+{
+       return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       const struct lsdc_desc *descp;
+       struct drm_device *ddev;
+       struct lsdc_device *ldev;
+       int ret;
+
+       descp = lsdc_device_probe(pdev, ent->driver_data);
+       if (IS_ERR_OR_NULL(descp))
+               return -ENODEV;
+
+       pci_set_master(pdev);
+
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       dev_info(&pdev->dev, "Found %s, revision: %u",
+                to_loongson_gfx(descp)->model, pdev->revision);
+
+       ldev = lsdc_create_device(pdev, descp, &lsdc_drm_driver);
+       if (IS_ERR(ldev))
+               return PTR_ERR(ldev);
+
+       ddev = &ldev->base;
+
+       pci_set_drvdata(pdev, ddev);
+
+       vga_client_register(pdev, lsdc_vga_set_decode);
+
+       drm_kms_helper_poll_init(ddev);
+
+       if (loongson_vblank) {
+               ret = drm_vblank_init(ddev, descp->num_of_crtc);
+               if (ret)
+                       return ret;
+
+               ret = devm_request_irq(&pdev->dev, pdev->irq,
+                                      descp->funcs->irq_handler,
+                                      IRQF_SHARED,
+                                      dev_name(&pdev->dev), ddev);
+               if (ret) {
+                       drm_err(ddev, "Failed to register interrupt: %d\n", ret);
+                       return ret;
+               }
+
+               drm_info(ddev, "registered irq: %u\n", pdev->irq);
+       }
+
+       ret = drm_dev_register(ddev, 0);
+       if (ret)
+               return ret;
+
+       drm_fbdev_generic_setup(ddev, 32);
+
+       return 0;
+}
+
+static void lsdc_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *ddev = pci_get_drvdata(pdev);
+
+       drm_dev_unregister(ddev);
+       drm_atomic_helper_shutdown(ddev);
+}
+
+static int lsdc_drm_freeze(struct drm_device *ddev)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct lsdc_bo *lbo;
+       int ret;
+
+       /* unpin all of buffers in the VRAM */
+       mutex_lock(&ldev->gem.mutex);
+       list_for_each_entry(lbo, &ldev->gem.objects, list) {
+               struct ttm_buffer_object *tbo = &lbo->tbo;
+               struct ttm_resource *resource = tbo->resource;
+               unsigned int pin_count = tbo->pin_count;
+
+               drm_dbg(ddev, "bo[%p], size: %zuKiB, type: %s, pin count: %u\n",
+                       lbo, lsdc_bo_size(lbo) >> 10,
+                       lsdc_mem_type_to_str(resource->mem_type), pin_count);
+
+               if (!pin_count)
+                       continue;
+
+               if (resource->mem_type == TTM_PL_VRAM) {
+                       ret = lsdc_bo_reserve(lbo);
+                       if (unlikely(ret)) {
+                               drm_err(ddev, "bo reserve failed: %d\n", ret);
+                               continue;
+                       }
+
+                       do {
+                               lsdc_bo_unpin(lbo);
+                               --pin_count;
+                       } while (pin_count);
+
+                       lsdc_bo_unreserve(lbo);
+               }
+       }
+       mutex_unlock(&ldev->gem.mutex);
+
+       lsdc_bo_evict_vram(ddev);
+
+       ret = drm_mode_config_helper_suspend(ddev);
+       if (unlikely(ret)) {
+               drm_err(ddev, "Freeze error: %d", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int lsdc_drm_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *ddev = pci_get_drvdata(pdev);
+
+       return drm_mode_config_helper_resume(ddev);
+}
+
+static int lsdc_pm_freeze(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *ddev = pci_get_drvdata(pdev);
+
+       return lsdc_drm_freeze(ddev);
+}
+
+static int lsdc_pm_thaw(struct device *dev)
+{
+       return lsdc_drm_resume(dev);
+}
+
+static int lsdc_pm_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int error;
+
+       error = lsdc_pm_freeze(dev);
+       if (error)
+               return error;
+
+       pci_save_state(pdev);
+       /* Shut down the device */
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int lsdc_pm_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       pci_set_power_state(pdev, PCI_D0);
+
+       pci_restore_state(pdev);
+
+       if (pcim_enable_device(pdev))
+               return -EIO;
+
+       return lsdc_pm_thaw(dev);
+}
+
+static const struct dev_pm_ops lsdc_pm_ops = {
+       .suspend = lsdc_pm_suspend,
+       .resume = lsdc_pm_resume,
+       .freeze = lsdc_pm_freeze,
+       .thaw = lsdc_pm_thaw,
+       .poweroff = lsdc_pm_freeze,
+       .restore = lsdc_pm_resume,
+};
+
+static const struct pci_device_id lsdc_pciid_list[] = {
+       {PCI_VDEVICE(LOONGSON, 0x7a06), CHIP_LS7A1000},
+       {PCI_VDEVICE(LOONGSON, 0x7a36), CHIP_LS7A2000},
+       { }
+};
+
+struct pci_driver lsdc_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = lsdc_pciid_list,
+       .probe = lsdc_pci_probe,
+       .remove = lsdc_pci_remove,
+       .driver.pm = &lsdc_pm_ops,
+};
+
+MODULE_DEVICE_TABLE(pci, lsdc_pciid_list);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.h b/drivers/gpu/drm/loongson/lsdc_drv.h
new file mode 100644 (file)
index 0000000..fbf2d76
--- /dev/null
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_DRV_H__
+#define __LSDC_DRV_H__
+
+#include <linux/pci.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_plane.h>
+#include <drm/ttm/ttm_device.h>
+
+#include "lsdc_i2c.h"
+#include "lsdc_irq.h"
+#include "lsdc_gfxpll.h"
+#include "lsdc_output.h"
+#include "lsdc_pixpll.h"
+#include "lsdc_regs.h"
+
+/* Currently, all Loongson display controllers have two display pipes. */
+#define LSDC_NUM_CRTC           2
+
+/*
+ * LS7A1000/LS7A2000 chipsets function as the south & north bridges of the
+ * Loongson 3 series processors, they are equipped with on-board video RAM
+ * typically. While Loongson LS2K series are low cost SoCs which share the
+ * system RAM as video RAM, they don't has a dedicated VRAM.
+ *
+ * There is only a 1:1 mapping of crtcs, encoders and connectors for the DC
+ *
+ * display pipe 0 = crtc0 + dvo0 + encoder0 + connector0 + cursor0 + primary0
+ * display pipe 1 = crtc1 + dvo1 + encoder1 + connectro1 + cursor1 + primary1
+ */
+
+enum loongson_chip_id {
+       CHIP_LS7A1000 = 0,
+       CHIP_LS7A2000 = 1,
+       CHIP_LS_LAST,
+};
+
+const struct lsdc_desc *
+lsdc_device_probe(struct pci_dev *pdev, enum loongson_chip_id chip);
+
+struct lsdc_kms_funcs;
+
+/* DC specific */
+
+struct lsdc_desc {
+       u32 num_of_crtc;
+       u32 max_pixel_clk;
+       u32 max_width;
+       u32 max_height;
+       u32 num_of_hw_cursor;
+       u32 hw_cursor_w;
+       u32 hw_cursor_h;
+       u32 pitch_align;         /* CRTC DMA alignment constraint */
+       bool has_vblank_counter; /* 32 bit hw vsync counter */
+
+       /* device dependent ops, dc side */
+       const struct lsdc_kms_funcs *funcs;
+};
+
+/* GFX related resources wrangler */
+
+struct loongson_gfx_desc {
+       struct lsdc_desc dc;
+
+       u32 conf_reg_base;
+
+       /* GFXPLL shared by the DC, GMC and GPU */
+       struct {
+               u32 reg_offset;
+               u32 reg_size;
+       } gfxpll;
+
+       /* Pixel PLL, per display pipe */
+       struct {
+               u32 reg_offset;
+               u32 reg_size;
+       } pixpll[LSDC_NUM_CRTC];
+
+       enum loongson_chip_id chip_id;
+       char model[64];
+};
+
+static inline const struct loongson_gfx_desc *
+to_loongson_gfx(const struct lsdc_desc *dcp)
+{
+       return container_of_const(dcp, struct loongson_gfx_desc, dc);
+};
+
+struct lsdc_reg32 {
+       char *name;
+       u32 offset;
+};
+
+/* crtc hardware related ops */
+
+struct lsdc_crtc;
+
+struct lsdc_crtc_hw_ops {
+       void (*enable)(struct lsdc_crtc *lcrtc);
+       void (*disable)(struct lsdc_crtc *lcrtc);
+       void (*enable_vblank)(struct lsdc_crtc *lcrtc);
+       void (*disable_vblank)(struct lsdc_crtc *lcrtc);
+       void (*flip)(struct lsdc_crtc *lcrtc);
+       void (*clone)(struct lsdc_crtc *lcrtc);
+       void (*get_scan_pos)(struct lsdc_crtc *lcrtc, int *hpos, int *vpos);
+       void (*set_mode)(struct lsdc_crtc *lcrtc, const struct drm_display_mode *mode);
+       void (*soft_reset)(struct lsdc_crtc *lcrtc);
+       void (*reset)(struct lsdc_crtc *lcrtc);
+
+       u32  (*get_vblank_counter)(struct lsdc_crtc *lcrtc);
+       void (*set_dma_step)(struct lsdc_crtc *lcrtc, enum lsdc_dma_steps step);
+};
+
+struct lsdc_crtc {
+       struct drm_crtc base;
+       struct lsdc_pixpll pixpll;
+       struct lsdc_device *ldev;
+       const struct lsdc_crtc_hw_ops *hw_ops;
+       const struct lsdc_reg32 *preg;
+       unsigned int nreg;
+       struct drm_info_list *p_info_list;
+       unsigned int n_info_list;
+       bool has_vblank;
+};
+
+/* primary plane hardware related ops */
+
+struct lsdc_primary;
+
+struct lsdc_primary_plane_ops {
+       void (*update_fb_addr)(struct lsdc_primary *plane, u64 addr);
+       void (*update_fb_stride)(struct lsdc_primary *plane, u32 stride);
+       void (*update_fb_format)(struct lsdc_primary *plane,
+                                const struct drm_format_info *format);
+};
+
+struct lsdc_primary {
+       struct drm_plane base;
+       const struct lsdc_primary_plane_ops *ops;
+       struct lsdc_device *ldev;
+};
+
+/* cursor plane hardware related ops */
+
+struct lsdc_cursor;
+
+struct lsdc_cursor_plane_ops {
+       void (*update_bo_addr)(struct lsdc_cursor *plane, u64 addr);
+       void (*update_cfg)(struct lsdc_cursor *plane,
+                          enum lsdc_cursor_size cursor_size,
+                          enum lsdc_cursor_format);
+       void (*update_position)(struct lsdc_cursor *plane, int x, int y);
+};
+
+struct lsdc_cursor {
+       struct drm_plane base;
+       const struct lsdc_cursor_plane_ops *ops;
+       struct lsdc_device *ldev;
+};
+
+struct lsdc_output {
+       struct drm_encoder encoder;
+       struct drm_connector connector;
+};
+
+static inline struct lsdc_output *
+connector_to_lsdc_output(struct drm_connector *connector)
+{
+       return container_of(connector, struct lsdc_output, connector);
+}
+
+static inline struct lsdc_output *
+encoder_to_lsdc_output(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct lsdc_output, encoder);
+}
+
+struct lsdc_display_pipe {
+       struct lsdc_crtc crtc;
+       struct lsdc_primary primary;
+       struct lsdc_cursor cursor;
+       struct lsdc_output output;
+       struct lsdc_i2c *li2c;
+       unsigned int index;
+};
+
+static inline struct lsdc_display_pipe *
+output_to_display_pipe(struct lsdc_output *output)
+{
+       return container_of(output, struct lsdc_display_pipe, output);
+}
+
+struct lsdc_kms_funcs {
+       irqreturn_t (*irq_handler)(int irq, void *arg);
+
+       int (*create_i2c)(struct drm_device *ddev,
+                         struct lsdc_display_pipe *dispipe,
+                         unsigned int index);
+
+       int (*output_init)(struct drm_device *ddev,
+                          struct lsdc_display_pipe *dispipe,
+                          struct i2c_adapter *ddc,
+                          unsigned int index);
+
+       int (*cursor_plane_init)(struct drm_device *ddev,
+                                struct drm_plane *plane,
+                                unsigned int index);
+
+       int (*primary_plane_init)(struct drm_device *ddev,
+                                 struct drm_plane *plane,
+                                 unsigned int index);
+
+       int (*crtc_init)(struct drm_device *ddev,
+                        struct drm_crtc *crtc,
+                        struct drm_plane *primary,
+                        struct drm_plane *cursor,
+                        unsigned int index,
+                        bool has_vblank);
+};
+
+static inline struct lsdc_crtc *
+to_lsdc_crtc(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct lsdc_crtc, base);
+}
+
+static inline struct lsdc_display_pipe *
+crtc_to_display_pipe(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct lsdc_display_pipe, crtc.base);
+}
+
+static inline struct lsdc_primary *
+to_lsdc_primary(struct drm_plane *plane)
+{
+       return container_of(plane, struct lsdc_primary, base);
+}
+
+static inline struct lsdc_cursor *
+to_lsdc_cursor(struct drm_plane *plane)
+{
+       return container_of(plane, struct lsdc_cursor, base);
+}
+
+struct lsdc_crtc_state {
+       struct drm_crtc_state base;
+       struct lsdc_pixpll_parms pparms;
+};
+
+struct lsdc_gem {
+       /* @mutex: protect objects list */
+       struct mutex mutex;
+       struct list_head objects;
+};
+
+struct lsdc_device {
+       struct drm_device base;
+       struct ttm_device bdev;
+
+       /* @descp: features description of the DC variant */
+       const struct lsdc_desc *descp;
+       struct pci_dev *dc;
+       struct pci_dev *gpu;
+
+       struct loongson_gfxpll *gfxpll;
+
+       /* @reglock: protects concurrent access */
+       spinlock_t reglock;
+
+       void __iomem *reg_base;
+       resource_size_t vram_base;
+       resource_size_t vram_size;
+
+       resource_size_t gtt_base;
+       resource_size_t gtt_size;
+
+       struct lsdc_display_pipe dispipe[LSDC_NUM_CRTC];
+
+       struct lsdc_gem gem;
+
+       u32 irq_status;
+
+       /* tracking pinned memory */
+       size_t vram_pinned_size;
+       size_t gtt_pinned_size;
+
+       /* @num_output: count the number of active display pipe */
+       unsigned int num_output;
+};
+
+static inline struct lsdc_device *tdev_to_ldev(struct ttm_device *bdev)
+{
+       return container_of(bdev, struct lsdc_device, bdev);
+}
+
+static inline struct lsdc_device *to_lsdc(struct drm_device *ddev)
+{
+       return container_of(ddev, struct lsdc_device, base);
+}
+
+static inline struct lsdc_crtc_state *
+to_lsdc_crtc_state(struct drm_crtc_state *base)
+{
+       return container_of(base, struct lsdc_crtc_state, base);
+}
+
+void lsdc_debugfs_init(struct drm_minor *minor);
+
+int ls7a1000_crtc_init(struct drm_device *ddev,
+                      struct drm_crtc *crtc,
+                      struct drm_plane *primary,
+                      struct drm_plane *cursor,
+                      unsigned int index,
+                      bool no_vblank);
+
+int ls7a2000_crtc_init(struct drm_device *ddev,
+                      struct drm_crtc *crtc,
+                      struct drm_plane *primary,
+                      struct drm_plane *cursor,
+                      unsigned int index,
+                      bool no_vblank);
+
+int lsdc_primary_plane_init(struct drm_device *ddev,
+                           struct drm_plane *plane,
+                           unsigned int index);
+
+int ls7a1000_cursor_plane_init(struct drm_device *ddev,
+                              struct drm_plane *plane,
+                              unsigned int index);
+
+int ls7a2000_cursor_plane_init(struct drm_device *ddev,
+                              struct drm_plane *plane,
+                              unsigned int index);
+
+/* Registers access helpers */
+
+static inline u32 lsdc_rreg32(struct lsdc_device *ldev, u32 offset)
+{
+       return readl(ldev->reg_base + offset);
+}
+
+static inline void lsdc_wreg32(struct lsdc_device *ldev, u32 offset, u32 val)
+{
+       writel(val, ldev->reg_base + offset);
+}
+
+static inline void lsdc_ureg32_set(struct lsdc_device *ldev,
+                                  u32 offset,
+                                  u32 mask)
+{
+       void __iomem *addr = ldev->reg_base + offset;
+       u32 val = readl(addr);
+
+       writel(val | mask, addr);
+}
+
+static inline void lsdc_ureg32_clr(struct lsdc_device *ldev,
+                                  u32 offset,
+                                  u32 mask)
+{
+       void __iomem *addr = ldev->reg_base + offset;
+       u32 val = readl(addr);
+
+       writel(val & ~mask, addr);
+}
+
+static inline u32 lsdc_pipe_rreg32(struct lsdc_device *ldev,
+                                  u32 offset, u32 pipe)
+{
+       return readl(ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET);
+}
+
+static inline void lsdc_pipe_wreg32(struct lsdc_device *ldev,
+                                   u32 offset, u32 pipe, u32 val)
+{
+       writel(val, ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET);
+}
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
new file mode 100644 (file)
index 0000000..04293df
--- /dev/null
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/dma-buf.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
+{
+       struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
+       int ret;
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret))
+               return ret;
+
+       ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
+       if (likely(ret == 0))
+               lbo->sharing_count++;
+
+       lsdc_bo_unreserve(lbo);
+
+       return ret;
+}
+
+static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
+{
+       struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
+       int ret;
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret))
+               return;
+
+       lsdc_bo_unpin(lbo);
+       if (lbo->sharing_count)
+               lbo->sharing_count--;
+
+       lsdc_bo_unreserve(lbo);
+}
+
+static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+       struct ttm_tt *tt = tbo->ttm;
+
+       if (!tt) {
+               drm_err(obj->dev, "sharing a buffer without backing memory\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
+}
+
+static void lsdc_gem_object_free(struct drm_gem_object *obj)
+{
+       struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+
+       if (tbo)
+               ttm_bo_put(tbo);
+}
+
+static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+       struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+       int ret;
+
+       if (lbo->vmap_count > 0) {
+               ++lbo->vmap_count;
+               goto out;
+       }
+
+       ret = lsdc_bo_pin(lbo, 0, NULL);
+       if (unlikely(ret)) {
+               drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
+               return ret;
+       }
+
+       ret = ttm_bo_vmap(tbo, &lbo->map);
+       if (ret) {
+               drm_err(obj->dev, "ttm bo vmap failed\n");
+               lsdc_bo_unpin(lbo);
+               return ret;
+       }
+
+       lbo->vmap_count = 1;
+
+out:
+       *map = lbo->map;
+
+       return 0;
+}
+
+static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+       struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+       if (unlikely(!lbo->vmap_count)) {
+               drm_warn(obj->dev, "%p is not mapped\n", lbo);
+               return;
+       }
+
+       --lbo->vmap_count;
+       if (lbo->vmap_count == 0) {
+               ttm_bo_vunmap(tbo, &lbo->map);
+
+               lsdc_bo_unpin(lbo);
+       }
+}
+
+static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+       struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+       int ret;
+
+       ret = ttm_bo_mmap_obj(vma, tbo);
+       if (unlikely(ret)) {
+               drm_warn(obj->dev, "mmap %p failed\n", tbo);
+               return ret;
+       }
+
+       drm_gem_object_put(obj);
+
+       return 0;
+}
+
+static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
+       .free = lsdc_gem_object_free,
+       .export = drm_gem_prime_export,
+       .pin = lsdc_gem_prime_pin,
+       .unpin = lsdc_gem_prime_unpin,
+       .get_sg_table = lsdc_gem_prime_get_sg_table,
+       .vmap = lsdc_gem_object_vmap,
+       .vunmap = lsdc_gem_object_vunmap,
+       .mmap = lsdc_gem_object_mmap,
+};
+
+struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
+                                             u32 domain,
+                                             size_t size,
+                                             bool kerenl,
+                                             struct sg_table *sg,
+                                             struct dma_resv *resv)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct drm_gem_object *gobj;
+       struct lsdc_bo *lbo;
+       int ret;
+
+       lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
+       if (IS_ERR(lbo)) {
+               ret = PTR_ERR(lbo);
+               return ERR_PTR(ret);
+       }
+
+       if (!sg) {
+               /* VRAM is filled with random data */
+               lsdc_bo_clear(lbo);
+       }
+
+       gobj = &lbo->tbo.base;
+       gobj->funcs = &lsdc_gem_object_funcs;
+
+       /* tracking the BOs we created */
+       mutex_lock(&ldev->gem.mutex);
+       list_add_tail(&lbo->list, &ldev->gem.objects);
+       mutex_unlock(&ldev->gem.mutex);
+
+       return gobj;
+}
+
+struct drm_gem_object *
+lsdc_prime_import_sg_table(struct drm_device *ddev,
+                          struct dma_buf_attachment *attach,
+                          struct sg_table *sg)
+{
+       struct dma_resv *resv = attach->dmabuf->resv;
+       u64 size = attach->dmabuf->size;
+       struct drm_gem_object *gobj;
+       struct lsdc_bo *lbo;
+
+       dma_resv_lock(resv, NULL);
+       gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
+                                     sg, resv);
+       dma_resv_unlock(resv);
+
+       if (IS_ERR(gobj)) {
+               drm_err(ddev, "Failed to import sg table\n");
+               return gobj;
+       }
+
+       lbo = gem_to_lsdc_bo(gobj);
+       lbo->sharing_count = 1;
+
+       return gobj;
+}
+
+int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
+                    struct drm_mode_create_dumb *args)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       const struct lsdc_desc *descp = ldev->descp;
+       u32 domain = LSDC_GEM_DOMAIN_VRAM;
+       struct drm_gem_object *gobj;
+       size_t size;
+       u32 pitch;
+       u32 handle;
+       int ret;
+
+       if (!args->width || !args->height)
+               return -EINVAL;
+
+       if (args->bpp != 32 && args->bpp != 16)
+               return -EINVAL;
+
+       pitch = args->width * args->bpp / 8;
+       pitch = ALIGN(pitch, descp->pitch_align);
+       size = pitch * args->height;
+       size = ALIGN(size, PAGE_SIZE);
+
+       /* Maximum single bo size allowed is the half vram size available */
+       if (size > ldev->vram_size / 2) {
+               drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
+               return -ENOMEM;
+       }
+
+       gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
+       if (IS_ERR(gobj)) {
+               drm_err(ddev, "Failed to create gem object\n");
+               return PTR_ERR(gobj);
+       }
+
+       ret = drm_gem_handle_create(file, gobj, &handle);
+
+       /* drop reference from allocate, handle holds it now */
+       drm_gem_object_put(gobj);
+       if (ret)
+               return ret;
+
+       args->pitch = pitch;
+       args->size = size;
+       args->handle = handle;
+
+       return 0;
+}
+
+int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
+                        u32 handle, uint64_t *offset)
+{
+       struct drm_gem_object *gobj;
+
+       gobj = drm_gem_object_lookup(filp, handle);
+       if (!gobj)
+               return -ENOENT;
+
+       *offset = drm_vma_node_offset_addr(&gobj->vma_node);
+
+       drm_gem_object_put(gobj);
+
+       return 0;
+}
+
+void lsdc_gem_init(struct drm_device *ddev)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+
+       mutex_init(&ldev->gem.mutex);
+       INIT_LIST_HEAD(&ldev->gem.objects);
+}
+
+int lsdc_show_buffer_object(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *ddev = node->minor->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct lsdc_bo *lbo;
+       unsigned int i;
+
+       mutex_lock(&ldev->gem.mutex);
+
+       i = 0;
+
+       list_for_each_entry(lbo, &ldev->gem.objects, list) {
+               struct ttm_buffer_object *tbo = &lbo->tbo;
+               struct ttm_resource *resource = tbo->resource;
+
+               seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
+                          i, lbo, lsdc_bo_size(lbo) >> 10,
+                          lsdc_mem_type_to_str(resource->mem_type),
+                          lsdc_bo_gpu_offset(lbo));
+               i++;
+       }
+
+       mutex_unlock(&ldev->gem.mutex);
+
+       seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
+                  ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.h b/drivers/gpu/drm/loongson/lsdc_gem.h
new file mode 100644 (file)
index 0000000..92cbb10
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_GEM_H__
+#define __LSDC_GEM_H__
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+
+struct drm_gem_object *
+lsdc_prime_import_sg_table(struct drm_device *ddev,
+                          struct dma_buf_attachment *attach,
+                          struct sg_table *sg);
+
+int lsdc_dumb_map_offset(struct drm_file *file,
+                        struct drm_device *dev,
+                        u32 handle,
+                        uint64_t *offset);
+
+int lsdc_dumb_create(struct drm_file *file,
+                    struct drm_device *ddev,
+                    struct drm_mode_create_dumb *args);
+
+void lsdc_gem_init(struct drm_device *ddev);
+int lsdc_show_buffer_object(struct seq_file *m, void *arg);
+
+struct drm_gem_object *
+lsdc_gem_object_create(struct drm_device *ddev,
+                      u32 domain,
+                      size_t size,
+                      bool kerenl,
+                      struct sg_table *sg,
+                      struct dma_resv *resv);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_gfxpll.c b/drivers/gpu/drm/loongson/lsdc_gfxpll.c
new file mode 100644 (file)
index 0000000..249c09d
--- /dev/null
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * GFX PLL is the PLL used by DC, GMC and GPU, the structure of the GFX PLL
+ * may suffer from change across chip variants.
+ *
+ *
+ *                                            +-------------+  sel_out_dc
+ *                                       +----| / div_out_0 | _____/ _____ DC
+ *                                       |    +-------------+
+ * refclk   +---------+      +-------+   |    +-------------+  sel_out_gmc
+ * ---+---> | div_ref | ---> | loopc | --+--> | / div_out_1 | _____/ _____ GMC
+ *    |     +---------+      +-------+   |    +-------------+
+ *    |          /               *       |    +-------------+  sel_out_gpu
+ *    |                                  +----| / div_out_2 | _____/ _____ GPU
+ *    |                                       +-------------+
+ *    |                                                         ^
+ *    |                                                         |
+ *    +--------------------------- bypass ----------------------+
+ */
+
+struct loongson_gfxpll_bitmap {
+       /* Byte 0 ~ Byte 3 */
+       unsigned div_out_dc    : 7;  /*  6 : 0    DC output clock divider  */
+       unsigned div_out_gmc   : 7;  /* 13 : 7    GMC output clock divider */
+       unsigned div_out_gpu   : 7;  /* 20 : 14   GPU output clock divider */
+       unsigned loopc         : 9;  /* 29 : 21   clock multiplier         */
+       unsigned _reserved_1_  : 2;  /* 31 : 30                            */
+
+       /* Byte 4 ~ Byte 7 */
+       unsigned div_ref       : 7;   /* 38 : 32   Input clock divider    */
+       unsigned locked        : 1;   /* 39        PLL locked indicator   */
+       unsigned sel_out_dc    : 1;   /* 40        dc output clk enable   */
+       unsigned sel_out_gmc   : 1;   /* 41        gmc output clk enable  */
+       unsigned sel_out_gpu   : 1;   /* 42        gpu output clk enable  */
+       unsigned set_param     : 1;   /* 43        Trigger the update     */
+       unsigned bypass        : 1;   /* 44                               */
+       unsigned powerdown     : 1;   /* 45                               */
+       unsigned _reserved_2_  : 18;  /* 46 : 63   no use                 */
+};
+
+union loongson_gfxpll_reg_bitmap {
+       struct loongson_gfxpll_bitmap bitmap;
+       u32 w[2];
+       u64 d;
+};
+
+static void __gfxpll_rreg(struct loongson_gfxpll *this,
+                         union loongson_gfxpll_reg_bitmap *reg)
+{
+#if defined(CONFIG_64BIT)
+       reg->d = readq(this->mmio);
+#else
+       reg->w[0] = readl(this->mmio);
+       reg->w[1] = readl(this->mmio + 4);
+#endif
+}
+
+/* Update new parameters to the hardware */
+
+static int loongson_gfxpll_update(struct loongson_gfxpll * const this,
+                                 struct loongson_gfxpll_parms const *pin)
+{
+       /* None, TODO */
+
+       return 0;
+}
+
+static void loongson_gfxpll_get_rates(struct loongson_gfxpll * const this,
+                                     unsigned int *dc,
+                                     unsigned int *gmc,
+                                     unsigned int *gpu)
+{
+       struct loongson_gfxpll_parms *pparms = &this->parms;
+       union loongson_gfxpll_reg_bitmap gfxpll_reg;
+       unsigned int pre_output;
+       unsigned int dc_mhz;
+       unsigned int gmc_mhz;
+       unsigned int gpu_mhz;
+
+       __gfxpll_rreg(this, &gfxpll_reg);
+
+       pparms->div_ref = gfxpll_reg.bitmap.div_ref;
+       pparms->loopc = gfxpll_reg.bitmap.loopc;
+
+       pparms->div_out_dc = gfxpll_reg.bitmap.div_out_dc;
+       pparms->div_out_gmc = gfxpll_reg.bitmap.div_out_gmc;
+       pparms->div_out_gpu = gfxpll_reg.bitmap.div_out_gpu;
+
+       pre_output = pparms->ref_clock / pparms->div_ref * pparms->loopc;
+
+       dc_mhz = pre_output / pparms->div_out_dc / 1000;
+       gmc_mhz = pre_output / pparms->div_out_gmc / 1000;
+       gpu_mhz = pre_output / pparms->div_out_gpu / 1000;
+
+       if (dc)
+               *dc = dc_mhz;
+
+       if (gmc)
+               *gmc = gmc_mhz;
+
+       if (gpu)
+               *gpu = gpu_mhz;
+}
+
+static void loongson_gfxpll_print(struct loongson_gfxpll * const this,
+                                 struct drm_printer *p,
+                                 bool verbose)
+{
+       struct loongson_gfxpll_parms *parms = &this->parms;
+       unsigned int dc, gmc, gpu;
+
+       if (verbose) {
+               drm_printf(p, "reference clock: %u\n", parms->ref_clock);
+               drm_printf(p, "div_ref = %u\n", parms->div_ref);
+               drm_printf(p, "loopc = %u\n", parms->loopc);
+
+               drm_printf(p, "div_out_dc = %u\n", parms->div_out_dc);
+               drm_printf(p, "div_out_gmc = %u\n", parms->div_out_gmc);
+               drm_printf(p, "div_out_gpu = %u\n", parms->div_out_gpu);
+       }
+
+       this->funcs->get_rates(this, &dc, &gmc, &gpu);
+
+       drm_printf(p, "dc: %uMHz, gmc: %uMHz, gpu: %uMHz\n", dc, gmc, gpu);
+}
+
+/* GFX (DC, GPU, GMC) PLL initialization and destroy function */
+
+static void loongson_gfxpll_fini(struct drm_device *ddev, void *data)
+{
+       struct loongson_gfxpll *this = (struct loongson_gfxpll *)data;
+
+       iounmap(this->mmio);
+
+       kfree(this);
+}
+
+static int loongson_gfxpll_init(struct loongson_gfxpll * const this)
+{
+       struct loongson_gfxpll_parms *pparms = &this->parms;
+       struct drm_printer printer = drm_info_printer(this->ddev->dev);
+
+       pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+       this->mmio = ioremap(this->reg_base, this->reg_size);
+       if (IS_ERR_OR_NULL(this->mmio))
+               return -ENOMEM;
+
+       this->funcs->print(this, &printer, false);
+
+       return 0;
+}
+
+static const struct loongson_gfxpll_funcs lsdc_gmc_gpu_funcs = {
+       .init = loongson_gfxpll_init,
+       .update = loongson_gfxpll_update,
+       .get_rates = loongson_gfxpll_get_rates,
+       .print = loongson_gfxpll_print,
+};
+
+int loongson_gfxpll_create(struct drm_device *ddev,
+                          struct loongson_gfxpll **ppout)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
+       struct loongson_gfxpll *this;
+       int ret;
+
+       this = kzalloc(sizeof(*this), GFP_KERNEL);
+       if (IS_ERR_OR_NULL(this))
+               return -ENOMEM;
+
+       this->ddev = ddev;
+       this->reg_size = gfx->gfxpll.reg_size;
+       this->reg_base = gfx->conf_reg_base + gfx->gfxpll.reg_offset;
+       this->funcs = &lsdc_gmc_gpu_funcs;
+
+       ret = this->funcs->init(this);
+       if (unlikely(ret)) {
+               kfree(this);
+               return ret;
+       }
+
+       *ppout = this;
+
+       return drmm_add_action_or_reset(ddev, loongson_gfxpll_fini, this);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_gfxpll.h b/drivers/gpu/drm/loongson/lsdc_gfxpll.h
new file mode 100644 (file)
index 0000000..9d59cbf
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_GFXPLL_H__
+#define __LSDC_GFXPLL_H__
+
+#include <drm/drm_device.h>
+
+struct loongson_gfxpll;
+
+struct loongson_gfxpll_parms {
+       unsigned int ref_clock;
+       unsigned int div_ref;
+       unsigned int loopc;
+       unsigned int div_out_dc;
+       unsigned int div_out_gmc;
+       unsigned int div_out_gpu;
+};
+
+struct loongson_gfxpll_funcs {
+       int (*init)(struct loongson_gfxpll * const this);
+
+       int (*update)(struct loongson_gfxpll * const this,
+                     struct loongson_gfxpll_parms const *pin);
+
+       void (*get_rates)(struct loongson_gfxpll * const this,
+                         unsigned int *dc, unsigned int *gmc, unsigned int *gpu);
+
+       void (*print)(struct loongson_gfxpll * const this,
+                     struct drm_printer *printer, bool verbose);
+};
+
+struct loongson_gfxpll {
+       struct drm_device *ddev;
+       void __iomem *mmio;
+
+       /* PLL register offset */
+       u32 reg_base;
+       /* PLL register size in bytes */
+       u32 reg_size;
+
+       const struct loongson_gfxpll_funcs *funcs;
+
+       struct loongson_gfxpll_parms parms;
+};
+
+int loongson_gfxpll_create(struct drm_device *ddev,
+                          struct loongson_gfxpll **ppout);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c
new file mode 100644 (file)
index 0000000..9625d0b
--- /dev/null
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_managed.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * __lsdc_gpio_i2c_set - set the state of a gpio pin indicated by mask
+ * @mask: gpio pin mask
+ * @state: "0" for low, "1" for high
+ */
+static void __lsdc_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state)
+{
+       struct lsdc_device *ldev = to_lsdc(li2c->ddev);
+       unsigned long flags;
+       u8 val;
+
+       spin_lock_irqsave(&ldev->reglock, flags);
+
+       if (state) {
+               /*
+                * Setting this pin as input directly, write 1 for input.
+                * The external pull-up resistor will pull the level up
+                */
+               val = readb(li2c->dir_reg);
+               val |= mask;
+               writeb(val, li2c->dir_reg);
+       } else {
+               /* First set this pin as output, write 0 for output */
+               val = readb(li2c->dir_reg);
+               val &= ~mask;
+               writeb(val, li2c->dir_reg);
+
+               /* Then, make this pin output 0 */
+               val = readb(li2c->dat_reg);
+               val &= ~mask;
+               writeb(val, li2c->dat_reg);
+       }
+
+       spin_unlock_irqrestore(&ldev->reglock, flags);
+}
+
+/*
+ * __lsdc_gpio_i2c_get - read value back from the gpio pin indicated by mask
+ * @mask: gpio pin mask
+ * return "0" for low, "1" for high
+ */
+static int __lsdc_gpio_i2c_get(struct lsdc_i2c * const li2c, int mask)
+{
+       struct lsdc_device *ldev = to_lsdc(li2c->ddev);
+       unsigned long flags;
+       u8 val;
+
+       spin_lock_irqsave(&ldev->reglock, flags);
+
+       /* First set this pin as input */
+       val = readb(li2c->dir_reg);
+       val |= mask;
+       writeb(val, li2c->dir_reg);
+
+       /* Then get level state from this pin */
+       val = readb(li2c->dat_reg);
+
+       spin_unlock_irqrestore(&ldev->reglock, flags);
+
+       return (val & mask) ? 1 : 0;
+}
+
+static void lsdc_gpio_i2c_set_sda(void *i2c, int state)
+{
+       struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+       /* set state on the li2c->sda pin */
+       return __lsdc_gpio_i2c_set(li2c, li2c->sda, state);
+}
+
+static void lsdc_gpio_i2c_set_scl(void *i2c, int state)
+{
+       struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+       /* set state on the li2c->scl pin */
+       return __lsdc_gpio_i2c_set(li2c, li2c->scl, state);
+}
+
+static int lsdc_gpio_i2c_get_sda(void *i2c)
+{
+       struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+       /* read value from the li2c->sda pin */
+       return __lsdc_gpio_i2c_get(li2c, li2c->sda);
+}
+
+static int lsdc_gpio_i2c_get_scl(void *i2c)
+{
+       struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+       /* read the value from the li2c->scl pin */
+       return __lsdc_gpio_i2c_get(li2c, li2c->scl);
+}
+
+static void lsdc_destroy_i2c(struct drm_device *ddev, void *data)
+{
+       struct lsdc_i2c *li2c = (struct lsdc_i2c *)data;
+
+       if (li2c) {
+               i2c_del_adapter(&li2c->adapter);
+               kfree(li2c);
+       }
+}
+
+/*
+ * The DC in ls7a1000/ls7a2000/ls2k2000 has builtin gpio hardware
+ *
+ * @reg_base: gpio reg base
+ * @index: output channel index, 0 for PIPE0, 1 for PIPE1
+ */
+int lsdc_create_i2c_chan(struct drm_device *ddev,
+                        struct lsdc_display_pipe *dispipe,
+                        unsigned int index)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct i2c_adapter *adapter;
+       struct lsdc_i2c *li2c;
+       int ret;
+
+       li2c = kzalloc(sizeof(*li2c), GFP_KERNEL);
+       if (!li2c)
+               return -ENOMEM;
+
+       dispipe->li2c = li2c;
+
+       if (index == 0) {
+               li2c->sda = 0x01;  /* pin 0 */
+               li2c->scl = 0x02;  /* pin 1 */
+       } else if (index == 1) {
+               li2c->sda = 0x04;  /* pin 2 */
+               li2c->scl = 0x08;  /* pin 3 */
+       } else {
+               return -ENOENT;
+       }
+
+       li2c->ddev = ddev;
+       li2c->dir_reg = ldev->reg_base + LS7A_DC_GPIO_DIR_REG;
+       li2c->dat_reg = ldev->reg_base + LS7A_DC_GPIO_DAT_REG;
+
+       li2c->bit.setsda = lsdc_gpio_i2c_set_sda;
+       li2c->bit.setscl = lsdc_gpio_i2c_set_scl;
+       li2c->bit.getsda = lsdc_gpio_i2c_get_sda;
+       li2c->bit.getscl = lsdc_gpio_i2c_get_scl;
+       li2c->bit.udelay = 5;
+       li2c->bit.timeout = usecs_to_jiffies(2200);
+       li2c->bit.data = li2c;
+
+       adapter = &li2c->adapter;
+       adapter->algo_data = &li2c->bit;
+       adapter->owner = THIS_MODULE;
+       adapter->class = I2C_CLASS_DDC;
+       adapter->dev.parent = ddev->dev;
+       adapter->nr = -1;
+
+       snprintf(adapter->name, sizeof(adapter->name), "lsdc-i2c%u", index);
+
+       i2c_set_adapdata(adapter, li2c);
+
+       ret = i2c_bit_add_bus(adapter);
+       if (ret) {
+               kfree(li2c);
+               return ret;
+       }
+
+       ret = drmm_add_action_or_reset(ddev, lsdc_destroy_i2c, li2c);
+       if (ret)
+               return ret;
+
+       drm_info(ddev, "%s(sda pin mask=%u, scl pin mask=%u) created\n",
+                adapter->name, li2c->sda, li2c->scl);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.h b/drivers/gpu/drm/loongson/lsdc_i2c.h
new file mode 100644 (file)
index 0000000..88cd1a1
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_I2C_H__
+#define __LSDC_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct lsdc_i2c {
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data bit;
+       struct drm_device *ddev;
+       void __iomem *dir_reg;
+       void __iomem *dat_reg;
+       /* pin bit mask */
+       u8 sda;
+       u8 scl;
+};
+
+struct lsdc_display_pipe;
+
+int lsdc_create_i2c_chan(struct drm_device *ddev,
+                        struct lsdc_display_pipe *dispipe,
+                        unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_irq.c b/drivers/gpu/drm/loongson/lsdc_irq.c
new file mode 100644 (file)
index 0000000..efdc4d1
--- /dev/null
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_vblank.h>
+
+#include "lsdc_irq.h"
+
+/*
+ * For the DC in LS7A2000, clearing interrupt status is achieved by
+ * write "1" to LSDC_INT_REG.
+ *
+ * For the DC in LS7A1000, clear interrupt status is achieved by write "0"
+ * to LSDC_INT_REG.
+ *
+ * Two different hardware engineers modify it as their will.
+ */
+
+irqreturn_t ls7a2000_dc_irq_handler(int irq, void *arg)
+{
+       struct drm_device *ddev = arg;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       /* Read the interrupt status */
+       val = lsdc_rreg32(ldev, LSDC_INT_REG);
+       if ((val & INT_STATUS_MASK) == 0) {
+               drm_warn(ddev, "no interrupt occurs\n");
+               return IRQ_NONE;
+       }
+
+       ldev->irq_status = val;
+
+       /* write "1" to clear the interrupt status */
+       lsdc_wreg32(ldev, LSDC_INT_REG, val);
+
+       if (ldev->irq_status & INT_CRTC0_VSYNC)
+               drm_handle_vblank(ddev, 0);
+
+       if (ldev->irq_status & INT_CRTC1_VSYNC)
+               drm_handle_vblank(ddev, 1);
+
+       return IRQ_HANDLED;
+}
+
+/* For the DC in LS7A1000 and LS2K1000 */
+irqreturn_t ls7a1000_dc_irq_handler(int irq, void *arg)
+{
+       struct drm_device *ddev = arg;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       /* Read the interrupt status */
+       val = lsdc_rreg32(ldev, LSDC_INT_REG);
+       if ((val & INT_STATUS_MASK) == 0) {
+               drm_warn(ddev, "no interrupt occurs\n");
+               return IRQ_NONE;
+       }
+
+       ldev->irq_status = val;
+
+       /* write "0" to clear the interrupt status */
+       val &= ~(INT_CRTC0_VSYNC | INT_CRTC1_VSYNC);
+       lsdc_wreg32(ldev, LSDC_INT_REG, val);
+
+       if (ldev->irq_status & INT_CRTC0_VSYNC)
+               drm_handle_vblank(ddev, 0);
+
+       if (ldev->irq_status & INT_CRTC1_VSYNC)
+               drm_handle_vblank(ddev, 1);
+
+       return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_irq.h b/drivers/gpu/drm/loongson/lsdc_irq.h
new file mode 100644 (file)
index 0000000..726cb30
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_IRQ_H__
+#define __LSDC_IRQ_H__
+
+#include <linux/irqreturn.h>
+
+#include "lsdc_drv.h"
+
+irqreturn_t ls7a1000_dc_irq_handler(int irq, void *arg);
+irqreturn_t ls7a2000_dc_irq_handler(int irq, void *arg);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_output.h b/drivers/gpu/drm/loongson/lsdc_output.h
new file mode 100644 (file)
index 0000000..0977890
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_OUTPUT_H__
+#define __LSDC_OUTPUT_H__
+
+#include "lsdc_drv.h"
+
+int ls7a1000_output_init(struct drm_device *ddev,
+                        struct lsdc_display_pipe *dispipe,
+                        struct i2c_adapter *ddc,
+                        unsigned int index);
+
+int ls7a2000_output_init(struct drm_device *ldev,
+                        struct lsdc_display_pipe *dispipe,
+                        struct i2c_adapter *ddc,
+                        unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
new file mode 100644 (file)
index 0000000..6fc8dd1
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * The display controller in the LS7A1000 exports two DVO interfaces, thus
+ * external encoder is required, except connected to the DPI panel directly.
+ *
+ *       ___________________                                     _________
+ *      |            -------|                                   |         |
+ *      |  CRTC0 --> | DVO0 ----> Encoder0 ---> Connector0 ---> | Display |
+ *      |  _   _     -------|        ^             ^            |_________|
+ *      | | | | |  +------+ |        |             |
+ *      | |_| |_|  | i2c6 | <--------+-------------+
+ *      |          +------+ |
+ *      |                   |
+ *      |  DC in LS7A1000   |
+ *      |                   |
+ *      |  _   _   +------+ |
+ *      | | | | |  | i2c7 | <--------+-------------+
+ *      | |_| |_|  +------+ |        |             |             _________
+ *      |            -------|        |             |            |         |
+ *      |  CRTC1 --> | DVO1 ----> Encoder1 ---> Connector1 ---> |  Panel  |
+ *      |            -------|                                   |_________|
+ *      |___________________|
+ *
+ * Currently, we assume the external encoders connected to the DVO are
+ * transparent. Loongson's DVO interface can directly drive RGB888 panels.
+ *
+ *  TODO: Add support for non-transparent encoders
+ */
+
+static int ls7a1000_dpi_connector_get_modes(struct drm_connector *conn)
+{
+       unsigned int num = 0;
+       struct edid *edid;
+
+       if (conn->ddc) {
+               edid = drm_get_edid(conn, conn->ddc);
+               if (edid) {
+                       drm_connector_update_edid_property(conn, edid);
+                       num = drm_add_edid_modes(conn, edid);
+                       kfree(edid);
+               }
+
+               return num;
+       }
+
+       num = drm_add_modes_noedid(conn, 1920, 1200);
+
+       drm_set_preferred_mode(conn, 1024, 768);
+
+       return num;
+}
+
+static struct drm_encoder *
+ls7a1000_dpi_connector_get_best_encoder(struct drm_connector *connector,
+                                       struct drm_atomic_state *state)
+{
+       struct lsdc_output *output = connector_to_lsdc_output(connector);
+
+       return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs
+ls7a1000_dpi_connector_helpers = {
+       .atomic_best_encoder = ls7a1000_dpi_connector_get_best_encoder,
+       .get_modes = ls7a1000_dpi_connector_get_modes,
+};
+
+static enum drm_connector_status
+ls7a1000_dpi_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct i2c_adapter *ddc = connector->ddc;
+
+       if (ddc) {
+               if (drm_probe_ddc(ddc))
+                       return connector_status_connected;
+
+               return connector_status_disconnected;
+       }
+
+       return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs ls7a1000_dpi_connector_funcs = {
+       .detect = ls7a1000_dpi_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+};
+
+static void ls7a1000_pipe0_encoder_reset(struct drm_encoder *encoder)
+{
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+
+       /*
+        * We need this for S3 support, screen will not lightup if don't set
+        * this register correctly.
+        */
+       lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG,
+                   PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
+}
+
+static void ls7a1000_pipe1_encoder_reset(struct drm_encoder *encoder)
+{
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+
+       /*
+        * We need this for S3 support, screen will not lightup if don't set
+        * this register correctly.
+        */
+
+       /* DVO */
+       lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG,
+                   BIT(31) | PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
+}
+
+static const struct drm_encoder_funcs ls7a1000_encoder_funcs[2] = {
+       {
+               .reset = ls7a1000_pipe0_encoder_reset,
+               .destroy = drm_encoder_cleanup,
+       },
+       {
+               .reset = ls7a1000_pipe1_encoder_reset,
+               .destroy = drm_encoder_cleanup,
+       },
+};
+
+int ls7a1000_output_init(struct drm_device *ddev,
+                        struct lsdc_display_pipe *dispipe,
+                        struct i2c_adapter *ddc,
+                        unsigned int index)
+{
+       struct lsdc_output *output = &dispipe->output;
+       struct drm_encoder *encoder = &output->encoder;
+       struct drm_connector *connector = &output->connector;
+       int ret;
+
+       ret = drm_encoder_init(ddev, encoder, &ls7a1000_encoder_funcs[index],
+                              DRM_MODE_ENCODER_TMDS, "encoder-%u", index);
+       if (ret)
+               return ret;
+
+       encoder->possible_crtcs = BIT(index);
+
+       ret = drm_connector_init_with_ddc(ddev, connector,
+                                         &ls7a1000_dpi_connector_funcs,
+                                         DRM_MODE_CONNECTOR_DPI, ddc);
+       if (ret)
+               return ret;
+
+       drm_info(ddev, "display pipe-%u has a DVO\n", index);
+
+       drm_connector_helper_add(connector, &ls7a1000_dpi_connector_helpers);
+
+       drm_connector_attach_encoder(connector, encoder);
+
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                           DRM_CONNECTOR_POLL_DISCONNECT;
+
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
new file mode 100644 (file)
index 0000000..ce3dabe
--- /dev/null
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * The display controller in LS7A2000 has two display pipes
+ * Display pipe 0 is attached with a built-in transparent VGA encoder and
+ * a built-in HDMI encoder.
+ * Display pipe 1 has only one built-in HDMI encoder connected.
+ *       ______________________                          _____________
+ *      |             +-----+  |                        |             |
+ *      | CRTC0 -+--> | VGA |  ----> VGA Connector ---> | VGA Monitor |<---+
+ *      |        |    +-----+  |                        |_____________|    |
+ *      |        |             |                         ______________    |
+ *      |        |    +------+ |                        |              |   |
+ *      |        +--> | HDMI | ----> HDMI Connector --> | HDMI Monitor |<--+
+ *      |             +------+ |                        |______________|   |
+ *      |            +------+  |                                           |
+ *      |            | i2c6 |  <-------------------------------------------+
+ *      |            +------+  |
+ *      |                      |
+ *      |    DC in LS7A2000    |
+ *      |                      |
+ *      |            +------+  |
+ *      |            | i2c7 |  <--------------------------------+
+ *      |            +------+  |                                |
+ *      |                      |                          ______|_______
+ *      |            +------+  |                         |              |
+ *      | CRTC1 ---> | HDMI |  ----> HDMI Connector ---> | HDMI Monitor |
+ *      |            +------+  |                         |______________|
+ *      |______________________|
+ */
+
+static int ls7a2000_connector_get_modes(struct drm_connector *connector)
+{
+       unsigned int num = 0;
+       struct edid *edid;
+
+       if (connector->ddc) {
+               edid = drm_get_edid(connector, connector->ddc);
+               if (edid) {
+                       drm_connector_update_edid_property(connector, edid);
+                       num = drm_add_edid_modes(connector, edid);
+                       kfree(edid);
+               }
+
+               return num;
+       }
+
+       num = drm_add_modes_noedid(connector, 1920, 1200);
+
+       drm_set_preferred_mode(connector, 1024, 768);
+
+       return num;
+}
+
+static struct drm_encoder *
+ls7a2000_connector_get_best_encoder(struct drm_connector *connector,
+                                   struct drm_atomic_state *state)
+{
+       struct lsdc_output *output = connector_to_lsdc_output(connector);
+
+       return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs ls7a2000_connector_helpers = {
+       .atomic_best_encoder = ls7a2000_connector_get_best_encoder,
+       .get_modes = ls7a2000_connector_get_modes,
+};
+
+/* debugfs */
+
+#define LSDC_HDMI_REG(i, reg) {                               \
+       .name = __stringify_1(LSDC_HDMI##i##_##reg##_REG),    \
+       .offset = LSDC_HDMI##i##_##reg##_REG,                 \
+}
+
+static const struct lsdc_reg32 ls7a2000_hdmi0_encoder_regs[] = {
+       LSDC_HDMI_REG(0, ZONE),
+       LSDC_HDMI_REG(0, INTF_CTRL),
+       LSDC_HDMI_REG(0, PHY_CTRL),
+       LSDC_HDMI_REG(0, PHY_PLL),
+       LSDC_HDMI_REG(0, AVI_INFO_CRTL),
+       LSDC_HDMI_REG(0, PHY_CAL),
+       LSDC_HDMI_REG(0, AUDIO_PLL_LO),
+       LSDC_HDMI_REG(0, AUDIO_PLL_HI),
+       {NULL, 0},  /* MUST be {NULL, 0} terminated */
+};
+
+static const struct lsdc_reg32 ls7a2000_hdmi1_encoder_regs[] = {
+       LSDC_HDMI_REG(1, ZONE),
+       LSDC_HDMI_REG(1, INTF_CTRL),
+       LSDC_HDMI_REG(1, PHY_CTRL),
+       LSDC_HDMI_REG(1, PHY_PLL),
+       LSDC_HDMI_REG(1, AVI_INFO_CRTL),
+       LSDC_HDMI_REG(1, PHY_CAL),
+       LSDC_HDMI_REG(1, AUDIO_PLL_LO),
+       LSDC_HDMI_REG(1, AUDIO_PLL_HI),
+       {NULL, 0},  /* MUST be {NULL, 0} terminated */
+};
+
+static int ls7a2000_hdmi_encoder_regs_show(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *ddev = node->minor->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       const struct lsdc_reg32 *preg;
+
+       preg = (const struct lsdc_reg32 *)node->info_ent->data;
+
+       while (preg->name) {
+               u32 offset = preg->offset;
+
+               seq_printf(m, "%s (0x%04x): 0x%08x\n",
+                          preg->name, offset, lsdc_rreg32(ldev, offset));
+               ++preg;
+       }
+
+       return 0;
+}
+
+static const struct drm_info_list ls7a2000_hdmi0_debugfs_files[] = {
+       { "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi0_encoder_regs },
+};
+
+static const struct drm_info_list ls7a2000_hdmi1_debugfs_files[] = {
+       { "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi1_encoder_regs },
+};
+
+static void ls7a2000_hdmi0_late_register(struct drm_connector *connector,
+                                        struct dentry *root)
+{
+       struct drm_device *ddev = connector->dev;
+       struct drm_minor *minor = ddev->primary;
+
+       drm_debugfs_create_files(ls7a2000_hdmi0_debugfs_files,
+                                ARRAY_SIZE(ls7a2000_hdmi0_debugfs_files),
+                                root, minor);
+}
+
+static void ls7a2000_hdmi1_late_register(struct drm_connector *connector,
+                                        struct dentry *root)
+{
+       struct drm_device *ddev = connector->dev;
+       struct drm_minor *minor = ddev->primary;
+
+       drm_debugfs_create_files(ls7a2000_hdmi1_debugfs_files,
+                                ARRAY_SIZE(ls7a2000_hdmi1_debugfs_files),
+                                root, minor);
+}
+
+/* monitor present detection */
+
+static enum drm_connector_status
+ls7a2000_hdmi0_vga_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct drm_device *ddev = connector->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
+
+       if (val & HDMI0_HPD_FLAG)
+               return connector_status_connected;
+
+       if (connector->ddc) {
+               if (drm_probe_ddc(connector->ddc))
+                       return connector_status_connected;
+
+               return connector_status_disconnected;
+       }
+
+       return connector_status_unknown;
+}
+
+static enum drm_connector_status
+ls7a2000_hdmi1_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct lsdc_device *ldev = to_lsdc(connector->dev);
+       u32 val;
+
+       val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
+
+       if (val & HDMI1_HPD_FLAG)
+               return connector_status_connected;
+
+       return connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs ls7a2000_hdmi_connector_funcs[2] = {
+       {
+               .detect = ls7a2000_hdmi0_vga_connector_detect,
+               .fill_modes = drm_helper_probe_single_connector_modes,
+               .destroy = drm_connector_cleanup,
+               .reset = drm_atomic_helper_connector_reset,
+               .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+               .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+               .debugfs_init = ls7a2000_hdmi0_late_register,
+       },
+       {
+               .detect = ls7a2000_hdmi1_connector_detect,
+               .fill_modes = drm_helper_probe_single_connector_modes,
+               .destroy = drm_connector_cleanup,
+               .reset = drm_atomic_helper_connector_reset,
+               .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+               .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+               .debugfs_init = ls7a2000_hdmi1_late_register,
+       },
+};
+
+/* Even though some board has only one hdmi on display pipe 1,
+ * We still need hook lsdc_encoder_funcs up on display pipe 0,
+ * This is because we need its reset() callback get called, to
+ * set the LSDC_HDMIx_CTRL_REG using software gpio emulated i2c.
+ * Otherwise, the firmware may set LSDC_HDMIx_CTRL_REG blindly.
+ */
+static void ls7a2000_hdmi0_encoder_reset(struct drm_encoder *encoder)
+{
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
+       lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG, val);
+
+       /* using software gpio emulated i2c */
+       val = lsdc_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG);
+       val &= ~HW_I2C_EN;
+       lsdc_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, val);
+
+       /* help the hdmi phy to get out of reset state */
+       lsdc_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, HDMI_PHY_RESET_N);
+
+       mdelay(20);
+
+       drm_dbg(ddev, "HDMI-0 Reset\n");
+}
+
+static void ls7a2000_hdmi1_encoder_reset(struct drm_encoder *encoder)
+{
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
+       lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG, val);
+
+       /* using software gpio emulated i2c */
+       val = lsdc_rreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG);
+       val &= ~HW_I2C_EN;
+       lsdc_wreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG, val);
+
+       /*  help the hdmi phy to get out of reset state */
+       lsdc_wreg32(ldev, LSDC_HDMI1_PHY_CTRL_REG, HDMI_PHY_RESET_N);
+
+       mdelay(20);
+
+       drm_dbg(ddev, "HDMI-1 Reset\n");
+}
+
+static const struct drm_encoder_funcs ls7a2000_encoder_funcs[2] = {
+       {
+               .reset = ls7a2000_hdmi0_encoder_reset,
+               .destroy = drm_encoder_cleanup,
+       },
+       {
+               .reset = ls7a2000_hdmi1_encoder_reset,
+               .destroy = drm_encoder_cleanup,
+       },
+};
+
+static int ls7a2000_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+                                          struct drm_display_mode *mode)
+{
+       struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+       struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+       unsigned int index = dispipe->index;
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct hdmi_avi_infoframe infoframe;
+       u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+       unsigned char *ptr = &buffer[HDMI_INFOFRAME_HEADER_SIZE];
+       unsigned int content0, content1, content2, content3;
+       int err;
+
+       err = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
+                                                      &output->connector,
+                                                      mode);
+       if (err < 0) {
+               drm_err(ddev, "failed to setup AVI infoframe: %d\n", err);
+               return err;
+       }
+
+       /* Fixed infoframe configuration not linked to the mode */
+       infoframe.colorspace = HDMI_COLORSPACE_RGB;
+       infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+       infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
+
+       err = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+       if (err < 0) {
+               drm_err(ddev, "failed to pack AVI infoframe: %d\n", err);
+                       return err;
+       }
+
+       content0 = *(unsigned int *)ptr;
+       content1 = *(ptr + 4);
+       content2 = *(unsigned int *)(ptr + 5);
+       content3 = *(unsigned int *)(ptr + 9);
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT0, index, content0);
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT1, index, content1);
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT2, index, content2);
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT3, index, content3);
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_INFO_CRTL_REG, index,
+                        AVI_PKT_ENABLE | AVI_PKT_UPDATE);
+
+       drm_dbg(ddev, "Update HDMI-%u avi infoframe\n", index);
+
+       return 0;
+}
+
+static void ls7a2000_hdmi_atomic_disable(struct drm_encoder *encoder,
+                                        struct drm_atomic_state *state)
+{
+       struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+       struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+       unsigned int index = dispipe->index;
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       u32 val;
+
+       /* Disable the hdmi phy */
+       val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index);
+       val &= ~HDMI_PHY_EN;
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
+
+       /* Disable the hdmi interface */
+       val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index);
+       val &= ~HDMI_INTERFACE_EN;
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
+
+       drm_dbg(ddev, "HDMI-%u disabled\n", index);
+}
+
+static void ls7a2000_hdmi_atomic_enable(struct drm_encoder *encoder,
+                                       struct drm_atomic_state *state)
+{
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+       struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+       unsigned int index = dispipe->index;
+       u32 val;
+
+       /* datasheet say it should larger than 48 */
+       val = 64 << HDMI_H_ZONE_IDLE_SHIFT | 64 << HDMI_V_ZONE_IDLE_SHIFT;
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_ZONE_REG, index, val);
+
+       val = HDMI_PHY_TERM_STATUS |
+             HDMI_PHY_TERM_DET_EN |
+             HDMI_PHY_TERM_H_EN |
+             HDMI_PHY_TERM_L_EN |
+             HDMI_PHY_RESET_N |
+             HDMI_PHY_EN;
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
+
+       udelay(2);
+
+       val = HDMI_CTL_PERIOD_MODE |
+             HDMI_AUDIO_EN |
+             HDMI_PACKET_EN |
+             HDMI_INTERFACE_EN |
+             (8 << HDMI_VIDEO_PREAMBLE_SHIFT);
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
+
+       drm_dbg(ddev, "HDMI-%u enabled\n", index);
+}
+
+/*
+ *  Fout = M * Fin
+ *
+ *  M = (4 * LF) / (IDF * ODF)
+ *
+ *  IDF: Input Division Factor
+ *  ODF: Output Division Factor
+ *   LF: Loop Factor
+ *    M: Required Mult
+ *
+ *  +--------------------------------------------------------+
+ *  |     Fin (kHZ)     | M  | IDF | LF | ODF |   Fout(Mhz)  |
+ *  |-------------------+----+-----+----+-----+--------------|
+ *  |  170000 ~ 340000  | 10 | 16  | 40 |  1  | 1700 ~ 3400  |
+ *  |   85000 ~ 170000  | 10 |  8  | 40 |  2  |  850 ~ 1700  |
+ *  |   42500 ~  85000  | 10 |  4  | 40 |  4  |  425 ~ 850   |
+ *  |   21250 ~  42500  | 10 |  2  | 40 |  8  | 212.5 ~ 425  |
+ *  |   20000 ~  21250  | 10 |  1  | 40 | 16  |  200 ~ 212.5 |
+ *  +--------------------------------------------------------+
+ */
+static void ls7a2000_hdmi_phy_pll_config(struct lsdc_device *ldev,
+                                        int fin,
+                                        unsigned int index)
+{
+       struct drm_device *ddev = &ldev->base;
+       int count = 0;
+       u32 val;
+
+       /* Firstly, disable phy pll */
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, 0x0);
+
+       /*
+        * Most of time, loongson HDMI require M = 10
+        * for example, 10 = (4 * 40) / (8 * 2)
+        * here, write "1" to the ODF will get "2"
+        */
+
+       if (fin >= 170000)
+               val = (16 << HDMI_PLL_IDF_SHIFT) |
+                     (40 << HDMI_PLL_LF_SHIFT) |
+                     (0 << HDMI_PLL_ODF_SHIFT);
+       else if (fin >= 85000)
+               val = (8 << HDMI_PLL_IDF_SHIFT) |
+                     (40 << HDMI_PLL_LF_SHIFT) |
+                     (1 << HDMI_PLL_ODF_SHIFT);
+       else if (fin >= 42500)
+               val = (4 << HDMI_PLL_IDF_SHIFT) |
+                     (40 << HDMI_PLL_LF_SHIFT) |
+                     (2 << HDMI_PLL_ODF_SHIFT);
+       else if  (fin >= 21250)
+               val = (2 << HDMI_PLL_IDF_SHIFT) |
+                     (40 << HDMI_PLL_LF_SHIFT) |
+                     (3 << HDMI_PLL_ODF_SHIFT);
+       else
+               val = (1 << HDMI_PLL_IDF_SHIFT) |
+                     (40 << HDMI_PLL_LF_SHIFT) |
+                     (4 << HDMI_PLL_ODF_SHIFT);
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
+
+       val |= HDMI_PLL_ENABLE;
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
+
+       udelay(2);
+
+       drm_dbg(ddev, "Fin of HDMI-%u: %d kHz\n", index, fin);
+
+       /* Wait hdmi phy pll lock */
+       do {
+               val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index);
+
+               if (val & HDMI_PLL_LOCKED) {
+                       drm_dbg(ddev, "Setting HDMI-%u PLL take %d cycles\n",
+                               index, count);
+                       break;
+               }
+               ++count;
+       } while (count < 1000);
+
+       lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CAL_REG, index, 0x0f000ff0);
+
+       if (count >= 1000)
+               drm_err(ddev, "Setting HDMI-%u PLL failed\n", index);
+}
+
+static void ls7a2000_hdmi_atomic_mode_set(struct drm_encoder *encoder,
+                                         struct drm_crtc_state *crtc_state,
+                                         struct drm_connector_state *conn_state)
+{
+       struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+       struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+       unsigned int index = dispipe->index;
+       struct drm_device *ddev = encoder->dev;
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct drm_display_mode *mode = &crtc_state->mode;
+
+       ls7a2000_hdmi_phy_pll_config(ldev, mode->clock, index);
+
+       ls7a2000_hdmi_set_avi_infoframe(encoder, mode);
+
+       drm_dbg(ddev, "%s modeset finished\n", encoder->name);
+}
+
+static const struct drm_encoder_helper_funcs ls7a2000_encoder_helper_funcs = {
+       .atomic_disable = ls7a2000_hdmi_atomic_disable,
+       .atomic_enable = ls7a2000_hdmi_atomic_enable,
+       .atomic_mode_set = ls7a2000_hdmi_atomic_mode_set,
+};
+
+/*
+ * For LS7A2000:
+ *
+ * 1) Most of board export one vga + hdmi output interface.
+ * 2) Yet, Some boards export double hdmi output interface.
+ * 3) Still have boards export three output(2 hdmi + 1 vga).
+ *
+ * So let's hook hdmi helper funcs to all display pipe, don't miss.
+ * writing hdmi register do no harms.
+ */
+int ls7a2000_output_init(struct drm_device *ddev,
+                        struct lsdc_display_pipe *dispipe,
+                        struct i2c_adapter *ddc,
+                        unsigned int pipe)
+{
+       struct lsdc_output *output = &dispipe->output;
+       struct drm_encoder *encoder = &output->encoder;
+       struct drm_connector *connector = &output->connector;
+       int ret;
+
+       ret = drm_encoder_init(ddev, encoder, &ls7a2000_encoder_funcs[pipe],
+                              DRM_MODE_ENCODER_TMDS, "encoder-%u", pipe);
+       if (ret)
+               return ret;
+
+       encoder->possible_crtcs = BIT(pipe);
+
+       drm_encoder_helper_add(encoder, &ls7a2000_encoder_helper_funcs);
+
+       ret = drm_connector_init_with_ddc(ddev, connector,
+                                         &ls7a2000_hdmi_connector_funcs[pipe],
+                                         DRM_MODE_CONNECTOR_HDMIA, ddc);
+       if (ret)
+               return ret;
+
+       drm_info(ddev, "display pipe-%u has HDMI %s\n", pipe, pipe ? "" : "and/or VGA");
+
+       drm_connector_helper_add(connector, &ls7a2000_connector_helpers);
+
+       drm_connector_attach_encoder(connector, encoder);
+
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                           DRM_CONNECTOR_POLL_DISCONNECT;
+
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
new file mode 100644 (file)
index 0000000..04c15b4
--- /dev/null
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * The structure of the pixel PLL registers is evolved with times,
+ * it can be different across different chip also.
+ */
+
+/* size is u64, note that all loongson's cpu is little endian.
+ * This structure is same for ls7a2000, ls7a1000 and ls2k2000.
+ */
+struct lsdc_pixpll_reg {
+       /* Byte 0 ~ Byte 3 */
+       unsigned div_out       : 7;   /*  6 : 0     Output clock divider  */
+       unsigned _reserved_1_  : 14;  /* 20 : 7                           */
+       unsigned loopc         : 9;   /* 29 : 21    Clock multiplier      */
+       unsigned _reserved_2_  : 2;   /* 31 : 30                          */
+
+       /* Byte 4 ~ Byte 7 */
+       unsigned div_ref       : 7;   /* 38 : 32    Input clock divider   */
+       unsigned locked        : 1;   /* 39         PLL locked indicator  */
+       unsigned sel_out       : 1;   /* 40         output clk selector   */
+       unsigned _reserved_3_  : 2;   /* 42 : 41                          */
+       unsigned set_param     : 1;   /* 43         Trigger the update    */
+       unsigned bypass        : 1;   /* 44                               */
+       unsigned powerdown     : 1;   /* 45                               */
+       unsigned _reserved_4_  : 18;  /* 46 : 63    no use                */
+};
+
+union lsdc_pixpll_reg_bitmap {
+       struct lsdc_pixpll_reg bitmap;
+       u32 w[2];
+       u64 d;
+};
+
+struct clk_to_pixpll_parms_lookup_t {
+       unsigned int clock;        /* kHz */
+
+       unsigned short width;
+       unsigned short height;
+       unsigned short vrefresh;
+
+       /* Stores parameters for programming the Hardware PLLs */
+       unsigned short div_out;
+       unsigned short loopc;
+       unsigned short div_ref;
+};
+
+static const struct clk_to_pixpll_parms_lookup_t pixpll_parms_table[] = {
+       {148500, 1920, 1080, 60,  11, 49,  3},   /* 1920x1080@60Hz */
+       {141750, 1920, 1080, 60,  11, 78,  5},   /* 1920x1080@60Hz */
+                                                /* 1920x1080@50Hz */
+       {174500, 1920, 1080, 75,  17, 89,  3},   /* 1920x1080@75Hz */
+       {181250, 2560, 1080, 75,  8,  58,  4},   /* 2560x1080@75Hz */
+       {297000, 2560, 1080, 30,  8,  95,  4},   /* 3840x2160@30Hz */
+       {301992, 1920, 1080, 100, 10, 151, 5},   /* 1920x1080@100Hz */
+       {146250, 1680, 1050, 60,  16, 117, 5},   /* 1680x1050@60Hz */
+       {135000, 1280, 1024, 75,  10, 54,  4},   /* 1280x1024@75Hz */
+       {119000, 1680, 1050, 60,  20, 119, 5},   /* 1680x1050@60Hz */
+       {108000, 1600, 900,  60,  15, 81,  5},   /* 1600x900@60Hz  */
+                                                /* 1280x1024@60Hz */
+                                                /* 1280x960@60Hz */
+                                                /* 1152x864@75Hz */
+
+       {106500, 1440, 900,  60,  19, 81,  4},   /* 1440x900@60Hz */
+       {88750,  1440, 900,  60,  16, 71,  5},   /* 1440x900@60Hz */
+       {83500,  1280, 800,  60,  17, 71,  5},   /* 1280x800@60Hz */
+       {71000,  1280, 800,  60,  20, 71,  5},   /* 1280x800@60Hz */
+
+       {74250,  1280, 720,  60,  22, 49,  3},   /* 1280x720@60Hz */
+                                                /* 1280x720@50Hz */
+
+       {78750,  1024, 768,  75,  16, 63,  5},   /* 1024x768@75Hz */
+       {75000,  1024, 768,  70,  29, 87,  4},   /* 1024x768@70Hz */
+       {65000,  1024, 768,  60,  20, 39,  3},   /* 1024x768@60Hz */
+
+       {51200,  1024, 600,  60,  25, 64,  5},   /* 1024x600@60Hz */
+
+       {57284,  832,  624,  75,  24, 55,  4},   /* 832x624@75Hz */
+       {49500,  800,  600,  75,  40, 99,  5},   /* 800x600@75Hz */
+       {50000,  800,  600,  72,  44, 88,  4},   /* 800x600@72Hz */
+       {40000,  800,  600,  60,  30, 36,  3},   /* 800x600@60Hz */
+       {36000,  800,  600,  56,  50, 72,  4},   /* 800x600@56Hz */
+       {31500,  640,  480,  75,  40, 63,  5},   /* 640x480@75Hz */
+                                                /* 640x480@73Hz */
+
+       {30240,  640,  480,  67,  62, 75,  4},   /* 640x480@67Hz */
+       {27000,  720,  576,  50,  50, 54,  4},   /* 720x576@60Hz */
+       {25175,  640,  480,  60,  85, 107, 5},   /* 640x480@60Hz */
+       {25200,  640,  480,  60,  50, 63,  5},   /* 640x480@60Hz */
+                                                /* 720x480@60Hz */
+};
+
+static void lsdc_pixel_pll_free(struct drm_device *ddev, void *data)
+{
+       struct lsdc_pixpll *this = (struct lsdc_pixpll *)data;
+
+       iounmap(this->mmio);
+
+       kfree(this->priv);
+
+       drm_dbg(ddev, "pixpll private data freed\n");
+}
+
+/*
+ * ioremap the device dependent PLL registers
+ *
+ * @this: point to the object where this function is called from
+ */
+static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
+{
+       struct lsdc_pixpll_parms *pparms;
+
+       this->mmio = ioremap(this->reg_base, this->reg_size);
+       if (IS_ERR_OR_NULL(this->mmio))
+               return -ENOMEM;
+
+       pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
+       if (IS_ERR_OR_NULL(pparms))
+               return -ENOMEM;
+
+       pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+       this->priv = pparms;
+
+       return drmm_add_action_or_reset(this->ddev, lsdc_pixel_pll_free, this);
+}
+
+/*
+ * Find a set of pll parameters from a static local table which avoid
+ * computing the pll parameter eachtime a modeset is triggered.
+ *
+ * @this: point to the object where this function is called from
+ * @clock: the desired output pixel clock, the unit is kHz
+ * @pout: point to where the parameters to store if found
+ *
+ * Return 0 if success, return -1 if not found.
+ */
+static int lsdc_pixpll_find(struct lsdc_pixpll * const this,
+                           unsigned int clock,
+                           struct lsdc_pixpll_parms *pout)
+{
+       unsigned int num = ARRAY_SIZE(pixpll_parms_table);
+       const struct clk_to_pixpll_parms_lookup_t *pt;
+       unsigned int i;
+
+       for (i = 0; i < num; ++i) {
+               pt = &pixpll_parms_table[i];
+
+               if (clock == pt->clock) {
+                       pout->div_ref = pt->div_ref;
+                       pout->loopc   = pt->loopc;
+                       pout->div_out = pt->div_out;
+
+                       return 0;
+               }
+       }
+
+       drm_dbg_kms(this->ddev, "pixel clock %u: miss\n", clock);
+
+       return -1;
+}
+
+/*
+ * Find a set of pll parameters which have minimal difference with the
+ * desired pixel clock frequency. It does that by computing all of the
+ * possible combination. Compute the diff and find the combination with
+ * minimal diff.
+ *
+ * clock_out = refclk / div_ref * loopc / div_out
+ *
+ * refclk is determined by the oscillator mounted on motherboard(100MHz
+ * in almost all board)
+ *
+ * @this: point to the object from where this function is called
+ * @clock: the desired output pixel clock, the unit is kHz
+ * @pout: point to the out struct of lsdc_pixpll_parms
+ *
+ * Return 0 if a set of parameter is found, otherwise return the error
+ * between clock_kHz we wanted and the most closest candidate with it.
+ */
+static int lsdc_pixel_pll_compute(struct lsdc_pixpll * const this,
+                                 unsigned int clock,
+                                 struct lsdc_pixpll_parms *pout)
+{
+       struct lsdc_pixpll_parms *pparms = this->priv;
+       unsigned int refclk = pparms->ref_clock;
+       const unsigned int tolerance = 1000;
+       unsigned int min = tolerance;
+       unsigned int div_out, loopc, div_ref;
+       unsigned int computed;
+
+       if (!lsdc_pixpll_find(this, clock, pout))
+               return 0;
+
+       for (div_out = 6; div_out < 64; div_out++) {
+               for (div_ref = 3; div_ref < 6; div_ref++) {
+                       for (loopc = 6; loopc < 161; loopc++) {
+                               unsigned int diff = 0;
+
+                               if (loopc < 12 * div_ref)
+                                       continue;
+                               if (loopc > 32 * div_ref)
+                                       continue;
+
+                               computed = refclk / div_ref * loopc / div_out;
+
+                               if (clock >= computed)
+                                       diff = clock - computed;
+                               else
+                                       diff = computed - clock;
+
+                               if (diff < min) {
+                                       min = diff;
+                                       pparms->div_ref = div_ref;
+                                       pparms->div_out = div_out;
+                                       pparms->loopc = loopc;
+
+                                       if (diff == 0) {
+                                               *pout = *pparms;
+                                               return 0;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /* still acceptable */
+       if (min < tolerance) {
+               *pout = *pparms;
+               return 0;
+       }
+
+       drm_dbg(this->ddev, "can't find suitable params for %u khz\n", clock);
+
+       return min;
+}
+
+/* Pixel pll hardware related ops, per display pipe */
+
+static void __pixpll_rreg(struct lsdc_pixpll *this,
+                         union lsdc_pixpll_reg_bitmap *dst)
+{
+#if defined(CONFIG_64BIT)
+       dst->d = readq(this->mmio);
+#else
+       dst->w[0] = readl(this->mmio);
+       dst->w[1] = readl(this->mmio + 4);
+#endif
+}
+
+static void __pixpll_wreg(struct lsdc_pixpll *this,
+                         union lsdc_pixpll_reg_bitmap *src)
+{
+#if defined(CONFIG_64BIT)
+       writeq(src->d, this->mmio);
+#else
+       writel(src->w[0], this->mmio);
+       writel(src->w[1], this->mmio + 4);
+#endif
+}
+
+static void __pixpll_ops_powerup(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.powerdown = 0;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_powerdown(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.powerdown = 1;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_on(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.sel_out = 1;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_off(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.sel_out = 0;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_bypass(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.bypass = 1;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_unbypass(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.bypass = 0;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_untoggle_param(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.set_param = 0;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_set_param(struct lsdc_pixpll * const this,
+                                  struct lsdc_pixpll_parms const *p)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.div_ref = p->div_ref;
+       pixpll_reg.bitmap.loopc = p->loopc;
+       pixpll_reg.bitmap.div_out = p->div_out;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_toggle_param(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+       __pixpll_rreg(this, &pixpll_reg);
+
+       pixpll_reg.bitmap.set_param = 1;
+
+       __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_wait_locked(struct lsdc_pixpll * const this)
+{
+       union lsdc_pixpll_reg_bitmap pixpll_reg;
+       unsigned int counter = 0;
+
+       do {
+               __pixpll_rreg(this, &pixpll_reg);
+
+               if (pixpll_reg.bitmap.locked)
+                       break;
+
+               ++counter;
+       } while (counter < 2000);
+
+       drm_dbg(this->ddev, "%u loop waited\n", counter);
+}
+
+/*
+ * Update the PLL parameters to the PLL hardware
+ *
+ * @this: point to the object from which this function is called
+ * @pin: point to the struct of lsdc_pixpll_parms passed in
+ *
+ * return 0 if successful.
+ */
+static int lsdc_pixpll_update(struct lsdc_pixpll * const this,
+                             struct lsdc_pixpll_parms const *pin)
+{
+       __pixpll_ops_bypass(this);
+
+       __pixpll_ops_off(this);
+
+       __pixpll_ops_powerdown(this);
+
+       __pixpll_ops_toggle_param(this);
+
+       __pixpll_ops_set_param(this, pin);
+
+       __pixpll_ops_untoggle_param(this);
+
+       __pixpll_ops_powerup(this);
+
+       udelay(2);
+
+       __pixpll_ops_wait_locked(this);
+
+       __pixpll_ops_on(this);
+
+       __pixpll_ops_unbypass(this);
+
+       return 0;
+}
+
+static unsigned int lsdc_pixpll_get_freq(struct lsdc_pixpll * const this)
+{
+       struct lsdc_pixpll_parms *ppar = this->priv;
+       union lsdc_pixpll_reg_bitmap pix_pll_reg;
+       unsigned int freq;
+
+       __pixpll_rreg(this, &pix_pll_reg);
+
+       ppar->div_ref = pix_pll_reg.bitmap.div_ref;
+       ppar->loopc = pix_pll_reg.bitmap.loopc;
+       ppar->div_out = pix_pll_reg.bitmap.div_out;
+
+       freq = ppar->ref_clock / ppar->div_ref * ppar->loopc / ppar->div_out;
+
+       return freq;
+}
+
+static void lsdc_pixpll_print(struct lsdc_pixpll * const this,
+                             struct drm_printer *p)
+{
+       struct lsdc_pixpll_parms *parms = this->priv;
+
+       drm_printf(p, "div_ref: %u, loopc: %u, div_out: %u\n",
+                  parms->div_ref, parms->loopc, parms->div_out);
+}
+
+/*
+ * LS7A1000, LS7A2000 and ls2k2000's pixel pll setting register is same,
+ * we take this as default, create a new instance if a different model is
+ * introduced.
+ */
+static const struct lsdc_pixpll_funcs __pixpll_default_funcs = {
+       .setup = lsdc_pixel_pll_setup,
+       .compute = lsdc_pixel_pll_compute,
+       .update = lsdc_pixpll_update,
+       .get_rate = lsdc_pixpll_get_freq,
+       .print = lsdc_pixpll_print,
+};
+
+/* pixel pll initialization */
+
+int lsdc_pixpll_init(struct lsdc_pixpll * const this,
+                    struct drm_device *ddev,
+                    unsigned int index)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       const struct lsdc_desc *descp = ldev->descp;
+       const struct loongson_gfx_desc *gfx = to_loongson_gfx(descp);
+
+       this->ddev = ddev;
+       this->reg_size = 8;
+       this->reg_base = gfx->conf_reg_base + gfx->pixpll[index].reg_offset;
+       this->funcs = &__pixpll_default_funcs;
+
+       return this->funcs->setup(this);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.h b/drivers/gpu/drm/loongson/lsdc_pixpll.h
new file mode 100644 (file)
index 0000000..ec3486d
--- /dev/null
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_PIXPLL_H__
+#define __LSDC_PIXPLL_H__
+
+#include <drm/drm_device.h>
+
+/*
+ * Loongson Pixel PLL hardware structure
+ *
+ * refclk: reference frequency, 100 MHz from external oscillator
+ * outclk: output frequency desired.
+ *
+ *
+ *               L1       Fref                      Fvco     L2
+ * refclk   +-----------+      +------------------+      +---------+   outclk
+ * ---+---> | Prescaler | ---> | Clock Multiplier | ---> | divider | -------->
+ *    |     +-----------+      +------------------+      +---------+     ^
+ *    |           ^                      ^                    ^          |
+ *    |           |                      |                    |          |
+ *    |           |                      |                    |          |
+ *    |        div_ref                 loopc               div_out       |
+ *    |                                                                  |
+ *    +---- bypass (bypass above software configurable clock if set) ----+
+ *
+ *   outclk = refclk / div_ref * loopc / div_out;
+ *
+ *   sel_out: PLL clock output selector(enable).
+ *
+ *   If sel_out == 1, then enable output clock (turn On);
+ *   If sel_out == 0, then disable output clock (turn Off);
+ *
+ * PLL working requirements:
+ *
+ *  1) 20 MHz <= refclk / div_ref <= 40Mhz
+ *  2) 1.2 GHz <= refclk /div_out * loopc <= 3.2 Ghz
+ */
+
+struct lsdc_pixpll_parms {
+       unsigned int ref_clock;
+       unsigned int div_ref;
+       unsigned int loopc;
+       unsigned int div_out;
+};
+
+struct lsdc_pixpll;
+
+struct lsdc_pixpll_funcs {
+       int (*setup)(struct lsdc_pixpll * const this);
+
+       int (*compute)(struct lsdc_pixpll * const this,
+                      unsigned int clock,
+                      struct lsdc_pixpll_parms *pout);
+
+       int (*update)(struct lsdc_pixpll * const this,
+                     struct lsdc_pixpll_parms const *pin);
+
+       unsigned int (*get_rate)(struct lsdc_pixpll * const this);
+
+       void (*print)(struct lsdc_pixpll * const this,
+                     struct drm_printer *printer);
+};
+
+struct lsdc_pixpll {
+       const struct lsdc_pixpll_funcs *funcs;
+
+       struct drm_device *ddev;
+
+       /* PLL register offset */
+       u32 reg_base;
+       /* PLL register size in bytes */
+       u32 reg_size;
+
+       void __iomem *mmio;
+
+       struct lsdc_pixpll_parms *priv;
+};
+
+int lsdc_pixpll_init(struct lsdc_pixpll * const this,
+                    struct drm_device *ddev,
+                    unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
new file mode 100644 (file)
index 0000000..0d50946
--- /dev/null
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_regs.h"
+#include "lsdc_ttm.h"
+
+static const u32 lsdc_primary_formats[] = {
+       DRM_FORMAT_XRGB8888,
+};
+
+static const u32 lsdc_cursor_formats[] = {
+       DRM_FORMAT_ARGB8888,
+};
+
+static const u64 lsdc_fb_format_modifiers[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID
+};
+
+static unsigned int lsdc_get_fb_offset(struct drm_framebuffer *fb,
+                                      struct drm_plane_state *state)
+{
+       unsigned int offset = fb->offsets[0];
+
+       offset += fb->format->cpp[0] * (state->src_x >> 16);
+       offset += fb->pitches[0] * (state->src_y >> 16);
+
+       return offset;
+}
+
+static u64 lsdc_fb_base_addr(struct drm_framebuffer *fb)
+{
+       struct lsdc_device *ldev = to_lsdc(fb->dev);
+       struct lsdc_bo *lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+       return lsdc_bo_gpu_offset(lbo) + ldev->vram_base;
+}
+
+static int lsdc_primary_atomic_check(struct drm_plane *plane,
+                                    struct drm_atomic_state *state)
+{
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct drm_crtc *crtc = new_plane_state->crtc;
+       struct drm_crtc_state *new_crtc_state;
+
+       if (!crtc)
+               return 0;
+
+       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+       return drm_atomic_helper_check_plane_state(new_plane_state,
+                                                  new_crtc_state,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  false, true);
+}
+
+static void lsdc_primary_atomic_update(struct drm_plane *plane,
+                                      struct drm_atomic_state *state)
+{
+       struct lsdc_primary *primary = to_lsdc_primary(plane);
+       const struct lsdc_primary_plane_ops *ops = primary->ops;
+       struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct drm_framebuffer *new_fb = new_plane_state->fb;
+       struct drm_framebuffer *old_fb = old_plane_state->fb;
+       u64 fb_addr = lsdc_fb_base_addr(new_fb);
+
+       fb_addr += lsdc_get_fb_offset(new_fb, new_plane_state);
+
+       ops->update_fb_addr(primary, fb_addr);
+       ops->update_fb_stride(primary, new_fb->pitches[0]);
+
+       if (!old_fb || old_fb->format != new_fb->format)
+               ops->update_fb_format(primary, new_fb->format);
+}
+
+static void lsdc_primary_atomic_disable(struct drm_plane *plane,
+                                       struct drm_atomic_state *state)
+{
+       /*
+        * Do nothing, just prevent call into atomic_update().
+        * Writing the format as LSDC_PF_NONE can disable the primary,
+        * But it seems not necessary...
+        */
+       drm_dbg(plane->dev, "%s disabled\n", plane->name);
+}
+
+static int lsdc_plane_prepare_fb(struct drm_plane *plane,
+                                struct drm_plane_state *new_state)
+{
+       struct drm_framebuffer *fb = new_state->fb;
+       struct lsdc_bo *lbo;
+       u64 gpu_vaddr;
+       int ret;
+
+       if (!fb)
+               return 0;
+
+       lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret)) {
+               drm_err(plane->dev, "bo %p reserve failed\n", lbo);
+               return ret;
+       }
+
+       ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_VRAM, &gpu_vaddr);
+
+       lsdc_bo_unreserve(lbo);
+
+       if (unlikely(ret)) {
+               drm_err(plane->dev, "bo %p pin failed\n", lbo);
+               return ret;
+       }
+
+       lsdc_bo_ref(lbo);
+
+       if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               drm_dbg(plane->dev,
+                       "%s[%p] pin at 0x%llx, bo size: %zu\n",
+                       plane->name, lbo, gpu_vaddr, lsdc_bo_size(lbo));
+
+       return drm_gem_plane_helper_prepare_fb(plane, new_state);
+}
+
+static void lsdc_plane_cleanup_fb(struct drm_plane *plane,
+                                 struct drm_plane_state *old_state)
+{
+       struct drm_framebuffer *fb = old_state->fb;
+       struct lsdc_bo *lbo;
+       int ret;
+
+       if (!fb)
+               return;
+
+       lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret)) {
+               drm_err(plane->dev, "%p reserve failed\n", lbo);
+               return;
+       }
+
+       lsdc_bo_unpin(lbo);
+
+       lsdc_bo_unreserve(lbo);
+
+       lsdc_bo_unref(lbo);
+
+       if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               drm_dbg(plane->dev, "%s unpin\n", plane->name);
+}
+
+static const struct drm_plane_helper_funcs lsdc_primary_helper_funcs = {
+       .prepare_fb = lsdc_plane_prepare_fb,
+       .cleanup_fb = lsdc_plane_cleanup_fb,
+       .atomic_check = lsdc_primary_atomic_check,
+       .atomic_update = lsdc_primary_atomic_update,
+       .atomic_disable = lsdc_primary_atomic_disable,
+};
+
+static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
+                                               struct drm_atomic_state *state)
+{
+       struct drm_plane_state *new_state;
+       struct drm_crtc_state *crtc_state;
+
+       new_state = drm_atomic_get_new_plane_state(state, plane);
+
+       if (!plane->state || !plane->state->fb) {
+               drm_dbg(plane->dev, "%s: state is NULL\n", plane->name);
+               return -EINVAL;
+       }
+
+       if (new_state->crtc_w != new_state->crtc_h) {
+               drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+                       new_state->crtc_w, new_state->crtc_h);
+               return -EINVAL;
+       }
+
+       if (new_state->crtc_w != 64 && new_state->crtc_w != 32) {
+               drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+                       new_state->crtc_w, new_state->crtc_h);
+               return -EINVAL;
+       }
+
+       crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+       if (!crtc_state->active)
+               return -EINVAL;
+
+       if (plane->state->crtc != new_state->crtc ||
+           plane->state->src_w != new_state->src_w ||
+           plane->state->src_h != new_state->src_h ||
+           plane->state->crtc_w != new_state->crtc_w ||
+           plane->state->crtc_h != new_state->crtc_h)
+               return -EINVAL;
+
+       if (new_state->visible != plane->state->visible)
+               return -EINVAL;
+
+       return drm_atomic_helper_check_plane_state(plane->state,
+                                                  crtc_state,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  true, true);
+}
+
+static void lsdc_cursor_plane_atomic_async_update(struct drm_plane *plane,
+                                                 struct drm_atomic_state *state)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+       struct drm_framebuffer *old_fb = plane->state->fb;
+       struct drm_framebuffer *new_fb;
+       struct drm_plane_state *new_state;
+
+       new_state = drm_atomic_get_new_plane_state(state, plane);
+
+       new_fb = plane->state->fb;
+
+       plane->state->crtc_x = new_state->crtc_x;
+       plane->state->crtc_y = new_state->crtc_y;
+       plane->state->crtc_h = new_state->crtc_h;
+       plane->state->crtc_w = new_state->crtc_w;
+       plane->state->src_x = new_state->src_x;
+       plane->state->src_y = new_state->src_y;
+       plane->state->src_h = new_state->src_h;
+       plane->state->src_w = new_state->src_w;
+       swap(plane->state->fb, new_state->fb);
+
+       if (new_state->visible) {
+               enum lsdc_cursor_size cursor_size;
+
+               switch (new_state->crtc_w) {
+               case 64:
+                       cursor_size = CURSOR_SIZE_64X64;
+                       break;
+               case 32:
+                       cursor_size = CURSOR_SIZE_32X32;
+                       break;
+               default:
+                       cursor_size = CURSOR_SIZE_32X32;
+                       break;
+               }
+
+               ops->update_position(cursor, new_state->crtc_x, new_state->crtc_y);
+
+               ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
+
+               if (!old_fb || old_fb != new_fb)
+                       ops->update_bo_addr(cursor, lsdc_fb_base_addr(new_fb));
+       }
+}
+
+/* ls7a1000 cursor plane helpers */
+
+static int ls7a1000_cursor_plane_atomic_check(struct drm_plane *plane,
+                                             struct drm_atomic_state *state)
+{
+       struct drm_plane_state *new_plane_state;
+       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc *crtc;
+
+       new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+       crtc = new_plane_state->crtc;
+       if (!crtc) {
+               drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
+               return 0;
+       }
+
+       if (new_plane_state->crtc_w != 32 || new_plane_state->crtc_h != 32) {
+               drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+                       new_plane_state->crtc_w, new_plane_state->crtc_h);
+               return -EINVAL;
+       }
+
+       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+       return drm_atomic_helper_check_plane_state(new_plane_state,
+                                                  new_crtc_state,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  true, true);
+}
+
+static void ls7a1000_cursor_plane_atomic_update(struct drm_plane *plane,
+                                               struct drm_atomic_state *state)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct drm_framebuffer *new_fb = new_plane_state->fb;
+       struct drm_framebuffer *old_fb = old_plane_state->fb;
+       const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+       u64 addr = lsdc_fb_base_addr(new_fb);
+
+       if (!new_plane_state->visible)
+               return;
+
+       ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
+
+       if (!old_fb || old_fb != new_fb)
+               ops->update_bo_addr(cursor, addr);
+
+       ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_ARGB8888);
+}
+
+static void ls7a1000_cursor_plane_atomic_disable(struct drm_plane *plane,
+                                                struct drm_atomic_state *state)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+
+       ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_DISABLE);
+}
+
+static const struct drm_plane_helper_funcs ls7a1000_cursor_plane_helper_funcs = {
+       .prepare_fb = lsdc_plane_prepare_fb,
+       .cleanup_fb = lsdc_plane_cleanup_fb,
+       .atomic_check = ls7a1000_cursor_plane_atomic_check,
+       .atomic_update = ls7a1000_cursor_plane_atomic_update,
+       .atomic_disable = ls7a1000_cursor_plane_atomic_disable,
+       .atomic_async_check = lsdc_cursor_plane_atomic_async_check,
+       .atomic_async_update = lsdc_cursor_plane_atomic_async_update,
+};
+
+/* ls7a2000 cursor plane helpers */
+
+static int ls7a2000_cursor_plane_atomic_check(struct drm_plane *plane,
+                                             struct drm_atomic_state *state)
+{
+       struct drm_plane_state *new_plane_state;
+       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc *crtc;
+
+       new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+       crtc = new_plane_state->crtc;
+       if (!crtc) {
+               drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
+               return 0;
+       }
+
+       if (new_plane_state->crtc_w != new_plane_state->crtc_h) {
+               drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+                       new_plane_state->crtc_w, new_plane_state->crtc_h);
+               return -EINVAL;
+       }
+
+       if (new_plane_state->crtc_w != 64 && new_plane_state->crtc_w != 32) {
+               drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+                       new_plane_state->crtc_w, new_plane_state->crtc_h);
+               return -EINVAL;
+       }
+
+       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+       return drm_atomic_helper_check_plane_state(new_plane_state,
+                                                  new_crtc_state,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  DRM_PLANE_NO_SCALING,
+                                                  true, true);
+}
+
+/* Update the format, size and location of the cursor */
+
+static void ls7a2000_cursor_plane_atomic_update(struct drm_plane *plane,
+                                               struct drm_atomic_state *state)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct drm_framebuffer *new_fb = new_plane_state->fb;
+       struct drm_framebuffer *old_fb = old_plane_state->fb;
+       const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+       enum lsdc_cursor_size cursor_size;
+
+       if (!new_plane_state->visible)
+               return;
+
+       ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
+
+       if (!old_fb || new_fb != old_fb) {
+               u64 addr = lsdc_fb_base_addr(new_fb);
+
+               ops->update_bo_addr(cursor, addr);
+       }
+
+       switch (new_plane_state->crtc_w) {
+       case 64:
+               cursor_size = CURSOR_SIZE_64X64;
+               break;
+       case 32:
+               cursor_size = CURSOR_SIZE_32X32;
+               break;
+       default:
+               cursor_size = CURSOR_SIZE_64X64;
+               break;
+       }
+
+       ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
+}
+
+static void ls7a2000_cursor_plane_atomic_disable(struct drm_plane *plane,
+                                                struct drm_atomic_state *state)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       const struct lsdc_cursor_plane_ops *hw_ops = cursor->ops;
+
+       hw_ops->update_cfg(cursor, CURSOR_SIZE_64X64, CURSOR_FORMAT_DISABLE);
+}
+
+static const struct drm_plane_helper_funcs ls7a2000_cursor_plane_helper_funcs = {
+       .prepare_fb = lsdc_plane_prepare_fb,
+       .cleanup_fb = lsdc_plane_cleanup_fb,
+       .atomic_check = ls7a2000_cursor_plane_atomic_check,
+       .atomic_update = ls7a2000_cursor_plane_atomic_update,
+       .atomic_disable = ls7a2000_cursor_plane_atomic_disable,
+       .atomic_async_check = lsdc_cursor_plane_atomic_async_check,
+       .atomic_async_update = lsdc_cursor_plane_atomic_async_update,
+};
+
+static void lsdc_plane_atomic_print_state(struct drm_printer *p,
+                                         const struct drm_plane_state *state)
+{
+       struct drm_framebuffer *fb = state->fb;
+       u64 addr;
+
+       if (!fb)
+               return;
+
+       addr = lsdc_fb_base_addr(fb);
+
+       drm_printf(p, "\tdma addr=%llx\n", addr);
+}
+
+static const struct drm_plane_funcs lsdc_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = drm_plane_cleanup,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .atomic_print_state = lsdc_plane_atomic_print_state,
+};
+
+/* Primary plane 0 hardware related ops  */
+
+static void lsdc_primary0_update_fb_addr(struct lsdc_primary *primary, u64 addr)
+{
+       struct lsdc_device *ldev = primary->ldev;
+       u32 status;
+       u32 lo, hi;
+
+       /* 40-bit width physical address bus */
+       lo = addr & 0xFFFFFFFF;
+       hi = (addr >> 32) & 0xFF;
+
+       status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+       if (status & FB_REG_IN_USING) {
+               lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_LO_REG, lo);
+               lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_HI_REG, hi);
+       } else {
+               lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_LO_REG, lo);
+               lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_HI_REG, hi);
+       }
+}
+
+static void lsdc_primary0_update_fb_stride(struct lsdc_primary *primary, u32 stride)
+{
+       struct lsdc_device *ldev = primary->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_STRIDE_REG, stride);
+}
+
+static void lsdc_primary0_update_fb_format(struct lsdc_primary *primary,
+                                          const struct drm_format_info *format)
+{
+       struct lsdc_device *ldev = primary->ldev;
+       u32 status;
+
+       status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+       /*
+        * TODO: add RGB565 support, only support XRBG8888 at present
+        */
+       status &= ~CFG_PIX_FMT_MASK;
+       status |= LSDC_PF_XRGB8888;
+
+       lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, status);
+}
+
+/* Primary plane 1 hardware related ops */
+
+static void lsdc_primary1_update_fb_addr(struct lsdc_primary *primary, u64 addr)
+{
+       struct lsdc_device *ldev = primary->ldev;
+       u32 status;
+       u32 lo, hi;
+
+       /* 40-bit width physical address bus */
+       lo = addr & 0xFFFFFFFF;
+       hi = (addr >> 32) & 0xFF;
+
+       status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+       if (status & FB_REG_IN_USING) {
+               lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_LO_REG, lo);
+               lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_HI_REG, hi);
+       } else {
+               lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_LO_REG, lo);
+               lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_HI_REG, hi);
+       }
+}
+
+static void lsdc_primary1_update_fb_stride(struct lsdc_primary *primary, u32 stride)
+{
+       struct lsdc_device *ldev = primary->ldev;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_STRIDE_REG, stride);
+}
+
+static void lsdc_primary1_update_fb_format(struct lsdc_primary *primary,
+                                          const struct drm_format_info *format)
+{
+       struct lsdc_device *ldev = primary->ldev;
+       u32 status;
+
+       status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+       /*
+        * TODO: add RGB565 support, only support XRBG8888 at present
+        */
+       status &= ~CFG_PIX_FMT_MASK;
+       status |= LSDC_PF_XRGB8888;
+
+       lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, status);
+}
+
+static const struct lsdc_primary_plane_ops lsdc_primary_plane_hw_ops[2] = {
+       {
+               .update_fb_addr = lsdc_primary0_update_fb_addr,
+               .update_fb_stride = lsdc_primary0_update_fb_stride,
+               .update_fb_format = lsdc_primary0_update_fb_format,
+       },
+       {
+               .update_fb_addr = lsdc_primary1_update_fb_addr,
+               .update_fb_stride = lsdc_primary1_update_fb_stride,
+               .update_fb_format = lsdc_primary1_update_fb_format,
+       },
+};
+
+/*
+ * Update location, format, enable and disable state of the cursor,
+ * For those who have two hardware cursor, let cursor 0 is attach to CRTC-0,
+ * cursor 1 is attach to CRTC-1. Compositing the primary plane and cursor
+ * plane is automatically done by hardware, the cursor is alway on the top of
+ * the primary plane. In other word, z-order is fixed in hardware and cannot
+ * be changed. For those old DC who has only one hardware cursor, we made it
+ * shared by the two screen, this works on extend screen mode.
+ */
+
+/* cursor plane 0 (for pipe 0) related hardware ops */
+
+static void lsdc_cursor0_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       /* 40-bit width physical address bus */
+       lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
+       lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor0_update_position(struct lsdc_cursor *cursor, int x, int y)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       if (x < 0)
+               x = 0;
+
+       if (y < 0)
+               y = 0;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor0_update_cfg(struct lsdc_cursor *cursor,
+                                   enum lsdc_cursor_size cursor_size,
+                                   enum lsdc_cursor_format fmt)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+       u32 cfg;
+
+       cfg = CURSOR_ON_CRTC0 << CURSOR_LOCATION_SHIFT |
+             cursor_size << CURSOR_SIZE_SHIFT |
+             fmt << CURSOR_FORMAT_SHIFT;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
+}
+
+/* cursor plane 1 (for pipe 1) related hardware ops */
+
+static void lsdc_cursor1_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       /* 40-bit width physical address bus */
+       lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_HI_REG, (addr >> 32) & 0xFF);
+       lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor1_update_position(struct lsdc_cursor *cursor, int x, int y)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       if (x < 0)
+               x = 0;
+
+       if (y < 0)
+               y = 0;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR1_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor1_update_cfg(struct lsdc_cursor *cursor,
+                                   enum lsdc_cursor_size cursor_size,
+                                   enum lsdc_cursor_format fmt)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+       u32 cfg;
+
+       cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
+             cursor_size << CURSOR_SIZE_SHIFT |
+             fmt << CURSOR_FORMAT_SHIFT;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR1_CFG_REG, cfg);
+}
+
+/* The hardware cursors become normal since ls7a2000/ls2k2000 */
+
+static const struct lsdc_cursor_plane_ops ls7a2000_cursor_hw_ops[2] = {
+       {
+               .update_bo_addr = lsdc_cursor0_update_bo_addr,
+               .update_cfg = lsdc_cursor0_update_cfg,
+               .update_position = lsdc_cursor0_update_position,
+       },
+       {
+               .update_bo_addr = lsdc_cursor1_update_bo_addr,
+               .update_cfg = lsdc_cursor1_update_cfg,
+               .update_position = lsdc_cursor1_update_position,
+       },
+};
+
+/* Quirks for cursor 1, only for old loongson display controller */
+
+static void lsdc_cursor1_update_bo_addr_quirk(struct lsdc_cursor *cursor, u64 addr)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       /* 40-bit width physical address bus */
+       lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
+       lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor1_update_position_quirk(struct lsdc_cursor *cursor, int x, int y)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+
+       if (x < 0)
+               x = 0;
+
+       if (y < 0)
+               y = 0;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor1_update_cfg_quirk(struct lsdc_cursor *cursor,
+                                         enum lsdc_cursor_size cursor_size,
+                                         enum lsdc_cursor_format fmt)
+{
+       struct lsdc_device *ldev = cursor->ldev;
+       u32 cfg;
+
+       cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
+             cursor_size << CURSOR_SIZE_SHIFT |
+             fmt << CURSOR_FORMAT_SHIFT;
+
+       lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
+}
+
+/*
+ * The unforgiving LS7A1000/LS2K1000 has only one hardware cursors plane
+ */
+static const struct lsdc_cursor_plane_ops ls7a1000_cursor_hw_ops[2] = {
+       {
+               .update_bo_addr = lsdc_cursor0_update_bo_addr,
+               .update_cfg = lsdc_cursor0_update_cfg,
+               .update_position = lsdc_cursor0_update_position,
+       },
+       {
+               .update_bo_addr = lsdc_cursor1_update_bo_addr_quirk,
+               .update_cfg = lsdc_cursor1_update_cfg_quirk,
+               .update_position = lsdc_cursor1_update_position_quirk,
+       },
+};
+
+int lsdc_primary_plane_init(struct drm_device *ddev,
+                           struct drm_plane *plane,
+                           unsigned int index)
+{
+       struct lsdc_primary *primary = to_lsdc_primary(plane);
+       int ret;
+
+       ret = drm_universal_plane_init(ddev, plane, 1 << index,
+                                      &lsdc_plane_funcs,
+                                      lsdc_primary_formats,
+                                      ARRAY_SIZE(lsdc_primary_formats),
+                                      lsdc_fb_format_modifiers,
+                                      DRM_PLANE_TYPE_PRIMARY,
+                                      "ls-primary-plane-%u", index);
+       if (ret)
+               return ret;
+
+       drm_plane_helper_add(plane, &lsdc_primary_helper_funcs);
+
+       primary->ldev = to_lsdc(ddev);
+       primary->ops = &lsdc_primary_plane_hw_ops[index];
+
+       return 0;
+}
+
+int ls7a1000_cursor_plane_init(struct drm_device *ddev,
+                              struct drm_plane *plane,
+                              unsigned int index)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       int ret;
+
+       ret = drm_universal_plane_init(ddev, plane, 1 << index,
+                                      &lsdc_plane_funcs,
+                                      lsdc_cursor_formats,
+                                      ARRAY_SIZE(lsdc_cursor_formats),
+                                      lsdc_fb_format_modifiers,
+                                      DRM_PLANE_TYPE_CURSOR,
+                                      "ls-cursor-plane-%u", index);
+       if (ret)
+               return ret;
+
+       cursor->ldev = to_lsdc(ddev);
+       cursor->ops = &ls7a1000_cursor_hw_ops[index];
+
+       drm_plane_helper_add(plane, &ls7a1000_cursor_plane_helper_funcs);
+
+       return 0;
+}
+
+int ls7a2000_cursor_plane_init(struct drm_device *ddev,
+                              struct drm_plane *plane,
+                              unsigned int index)
+{
+       struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+       int ret;
+
+       ret = drm_universal_plane_init(ddev, plane, 1 << index,
+                                      &lsdc_plane_funcs,
+                                      lsdc_cursor_formats,
+                                      ARRAY_SIZE(lsdc_cursor_formats),
+                                      lsdc_fb_format_modifiers,
+                                      DRM_PLANE_TYPE_CURSOR,
+                                      "ls-cursor-plane-%u", index);
+       if (ret)
+               return ret;
+
+       cursor->ldev = to_lsdc(ddev);
+       cursor->ops = &ls7a2000_cursor_hw_ops[index];
+
+       drm_plane_helper_add(plane, &ls7a2000_cursor_plane_helper_funcs);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_probe.c b/drivers/gpu/drm/loongson/lsdc_probe.c
new file mode 100644 (file)
index 0000000..48ba69b
--- /dev/null
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include "lsdc_drv.h"
+#include "lsdc_probe.h"
+
+/*
+ * Processor ID (implementation) values for bits 15:8 of the PRID register.
+ */
+#define LOONGSON_CPU_IMP_MASK           0xff00
+#define LOONGSON_CPU_IMP_SHIFT          8
+
+#define LOONGARCH_CPU_IMP_LS2K1000      0xa0
+#define LOONGARCH_CPU_IMP_LS2K2000      0xb0
+#define LOONGARCH_CPU_IMP_LS3A5000      0xc0
+
+#define LOONGSON_CPU_MIPS_IMP_LS2K      0x61 /* Loongson 2K Mips series SoC */
+
+/*
+ * Particular Revision values for bits 7:0 of the PRID register.
+ */
+#define LOONGSON_CPU_REV_MASK           0x00ff
+
+#define LOONGARCH_CPUCFG_PRID_REG       0x0
+
+/*
+ * We can achieve fine-grained control with the information about the host.
+ */
+
+unsigned int loongson_cpu_get_prid(u8 *imp, u8 *rev)
+{
+       unsigned int prid = 0;
+
+#if defined(__loongarch__)
+       __asm__ volatile("cpucfg %0, %1\n\t"
+                       : "=&r"(prid)
+                       : "r"(LOONGARCH_CPUCFG_PRID_REG)
+                       );
+#endif
+
+#if defined(__mips__)
+       __asm__ volatile("mfc0\t%0, $15\n\t"
+                       : "=r" (prid)
+                       );
+#endif
+
+       if (imp)
+               *imp = (prid & LOONGSON_CPU_IMP_MASK) >> LOONGSON_CPU_IMP_SHIFT;
+
+       if (rev)
+               *rev = prid & LOONGSON_CPU_REV_MASK;
+
+       return prid;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_probe.h b/drivers/gpu/drm/loongson/lsdc_probe.h
new file mode 100644 (file)
index 0000000..8bb6de2
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_PROBE_H__
+#define __LSDC_PROBE_H__
+
+/* Helpers for chip detection */
+unsigned int loongson_cpu_get_prid(u8 *impl, u8 *rev);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_regs.h b/drivers/gpu/drm/loongson/lsdc_regs.h
new file mode 100644 (file)
index 0000000..e8ea286
--- /dev/null
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_REGS_H__
+#define __LSDC_REGS_H__
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * PIXEL PLL Reference clock
+ */
+#define LSDC_PLL_REF_CLK_KHZ            100000
+
+/*
+ * Those PLL registers are relative to LSxxxxx_CFG_REG_BASE. xxxxx = 7A1000,
+ * 7A2000, 2K2000, 2K1000 etc.
+ */
+
+/* LS7A1000 */
+
+#define LS7A1000_PIXPLL0_REG            0x04B0
+#define LS7A1000_PIXPLL1_REG            0x04C0
+
+/* The DC, GPU, Graphic Memory Controller share the single gfxpll */
+#define LS7A1000_PLL_GFX_REG            0x0490
+
+#define LS7A1000_CONF_REG_BASE          0x10010000
+
+/* LS7A2000 */
+
+#define LS7A2000_PIXPLL0_REG            0x04B0
+#define LS7A2000_PIXPLL1_REG            0x04C0
+
+/* The DC, GPU, Graphic Memory Controller share the single gfxpll */
+#define LS7A2000_PLL_GFX_REG            0x0490
+
+#define LS7A2000_CONF_REG_BASE          0x10010000
+
+/* For LSDC_CRTCx_CFG_REG */
+#define CFG_PIX_FMT_MASK                GENMASK(2, 0)
+
+enum lsdc_pixel_format {
+       LSDC_PF_NONE = 0,
+       LSDC_PF_XRGB444 = 1,    /* [12 bits] */
+       LSDC_PF_XRGB555 = 2,    /* [15 bits] */
+       LSDC_PF_XRGB565 = 3,    /* RGB [16 bits] */
+       LSDC_PF_XRGB8888 = 4,   /* XRGB [32 bits] */
+};
+
+/*
+ * Each crtc has two set fb address registers usable, FB_REG_IN_USING bit of
+ * LSDC_CRTCx_CFG_REG indicate which fb address register is in using by the
+ * CRTC currently. CFG_PAGE_FLIP is used to trigger the switch, the switching
+ * will be finished at the very next vblank. Trigger it again if you want to
+ * switch back.
+ *
+ * If FB0_ADDR_REG is in using, we write the address to FB0_ADDR_REG,
+ * if FB1_ADDR_REG is in using, we write the address to FB1_ADDR_REG.
+ */
+#define CFG_PAGE_FLIP                   BIT(7)
+#define CFG_OUTPUT_ENABLE               BIT(8)
+#define CFG_HW_CLONE                    BIT(9)
+/* Indicate witch fb addr reg is in using, currently. read only */
+#define FB_REG_IN_USING                 BIT(11)
+#define CFG_GAMMA_EN                    BIT(12)
+
+/* The DC get soft reset if this bit changed from "1" to "0", active low */
+#define CFG_RESET_N                     BIT(20)
+/* If this bit is set, it say that the CRTC stop working anymore, anchored. */
+#define CRTC_ANCHORED                   BIT(24)
+
+/*
+ * The DMA step of the DC in LS7A2000/LS2K2000 is configurable,
+ * setting those bits on ls7a1000 platform make no effect.
+ */
+#define CFG_DMA_STEP_MASK              GENMASK(17, 16)
+#define CFG_DMA_STEP_SHIFT             16
+enum lsdc_dma_steps {
+       LSDC_DMA_STEP_256_BYTES = 0,
+       LSDC_DMA_STEP_128_BYTES = 1,
+       LSDC_DMA_STEP_64_BYTES = 2,
+       LSDC_DMA_STEP_32_BYTES = 3,
+};
+
+#define CFG_VALID_BITS_MASK             GENMASK(20, 0)
+
+/* For LSDC_CRTCx_HSYNC_REG */
+#define HSYNC_INV                       BIT(31)
+#define HSYNC_EN                        BIT(30)
+#define HSYNC_END_MASK                  GENMASK(28, 16)
+#define HSYNC_END_SHIFT                 16
+#define HSYNC_START_MASK                GENMASK(12, 0)
+#define HSYNC_START_SHIFT               0
+
+/* For LSDC_CRTCx_VSYNC_REG */
+#define VSYNC_INV                       BIT(31)
+#define VSYNC_EN                        BIT(30)
+#define VSYNC_END_MASK                  GENMASK(27, 16)
+#define VSYNC_END_SHIFT                 16
+#define VSYNC_START_MASK                GENMASK(11, 0)
+#define VSYNC_START_SHIFT               0
+
+/*********** CRTC0 ***********/
+#define LSDC_CRTC0_CFG_REG              0x1240
+#define LSDC_CRTC0_FB0_ADDR_LO_REG      0x1260
+#define LSDC_CRTC0_FB0_ADDR_HI_REG      0x15A0
+#define LSDC_CRTC0_STRIDE_REG           0x1280
+#define LSDC_CRTC0_FB_ORIGIN_REG        0x1300
+#define LSDC_CRTC0_HDISPLAY_REG         0x1400
+#define LSDC_CRTC0_HSYNC_REG            0x1420
+#define LSDC_CRTC0_VDISPLAY_REG         0x1480
+#define LSDC_CRTC0_VSYNC_REG            0x14A0
+#define LSDC_CRTC0_GAMMA_INDEX_REG      0x14E0
+#define LSDC_CRTC0_GAMMA_DATA_REG       0x1500
+#define LSDC_CRTC0_FB1_ADDR_LO_REG      0x1580
+#define LSDC_CRTC0_FB1_ADDR_HI_REG      0x15C0
+
+/*********** CRTC1 ***********/
+#define LSDC_CRTC1_CFG_REG              0x1250
+#define LSDC_CRTC1_FB0_ADDR_LO_REG      0x1270
+#define LSDC_CRTC1_FB0_ADDR_HI_REG      0x15B0
+#define LSDC_CRTC1_STRIDE_REG           0x1290
+#define LSDC_CRTC1_FB_ORIGIN_REG        0x1310
+#define LSDC_CRTC1_HDISPLAY_REG         0x1410
+#define LSDC_CRTC1_HSYNC_REG            0x1430
+#define LSDC_CRTC1_VDISPLAY_REG         0x1490
+#define LSDC_CRTC1_VSYNC_REG            0x14B0
+#define LSDC_CRTC1_GAMMA_INDEX_REG      0x14F0
+#define LSDC_CRTC1_GAMMA_DATA_REG       0x1510
+#define LSDC_CRTC1_FB1_ADDR_LO_REG      0x1590
+#define LSDC_CRTC1_FB1_ADDR_HI_REG      0x15D0
+
+/* For LSDC_CRTCx_DVO_CONF_REG */
+#define PHY_CLOCK_POL                   BIT(9)
+#define PHY_CLOCK_EN                    BIT(8)
+#define PHY_DE_POL                      BIT(1)
+#define PHY_DATA_EN                     BIT(0)
+
+/*********** DVO0 ***********/
+#define LSDC_CRTC0_DVO_CONF_REG         0x13C0
+
+/*********** DVO1 ***********/
+#define LSDC_CRTC1_DVO_CONF_REG         0x13D0
+
+/*
+ * All of the DC variants has the hardware which record the scan position
+ * of the CRTC, [31:16] : current X position, [15:0] : current Y position
+ */
+#define LSDC_CRTC0_SCAN_POS_REG         0x14C0
+#define LSDC_CRTC1_SCAN_POS_REG         0x14D0
+
+/*
+ * LS7A2000 has Sync Deviation register.
+ */
+#define SYNC_DEVIATION_EN               BIT(31)
+#define SYNC_DEVIATION_NUM              GENMASK(12, 0)
+#define LSDC_CRTC0_SYNC_DEVIATION_REG   0x1B80
+#define LSDC_CRTC1_SYNC_DEVIATION_REG   0x1B90
+
+/*
+ * In gross, LSDC_CRTC1_XXX_REG - LSDC_CRTC0_XXX_REG = 0x10, but not all of
+ * the registers obey this rule, LSDC_CURSORx_XXX_REG just don't honor this.
+ * This is the root cause we can't untangle the code by manpulating offset
+ * of the register access simply. Our hardware engineers are lack experiance
+ * when they design this...
+ */
+#define CRTC_PIPE_OFFSET                0x10
+
+/*
+ * There is only one hardware cursor unit in LS7A1000 and LS2K1000, let
+ * CFG_HW_CLONE_EN bit be "1" could eliminate this embarrassment, we made
+ * it on custom clone mode application. While LS7A2000 has two hardware
+ * cursor unit which is good enough.
+ */
+#define CURSOR_FORMAT_MASK              GENMASK(1, 0)
+#define CURSOR_FORMAT_SHIFT             0
+enum lsdc_cursor_format {
+       CURSOR_FORMAT_DISABLE = 0,
+       CURSOR_FORMAT_MONOCHROME = 1,   /* masked */
+       CURSOR_FORMAT_ARGB8888 = 2,     /* A8R8G8B8 */
+};
+
+/*
+ * LS7A1000 and LS2K1000 only support 32x32, LS2K2000 and LS7A2000 support
+ * 64x64, but it seems that setting this bit make no harms on LS7A1000, it
+ * just don't take effects.
+ */
+#define CURSOR_SIZE_SHIFT               2
+enum lsdc_cursor_size {
+       CURSOR_SIZE_32X32 = 0,
+       CURSOR_SIZE_64X64 = 1,
+};
+
+#define CURSOR_LOCATION_SHIFT           4
+enum lsdc_cursor_location {
+       CURSOR_ON_CRTC0 = 0,
+       CURSOR_ON_CRTC1 = 1,
+};
+
+#define LSDC_CURSOR0_CFG_REG            0x1520
+#define LSDC_CURSOR0_ADDR_LO_REG        0x1530
+#define LSDC_CURSOR0_ADDR_HI_REG        0x15e0
+#define LSDC_CURSOR0_POSITION_REG       0x1540  /* [31:16] Y, [15:0] X */
+#define LSDC_CURSOR0_BG_COLOR_REG       0x1550  /* background color */
+#define LSDC_CURSOR0_FG_COLOR_REG       0x1560  /* foreground color */
+
+#define LSDC_CURSOR1_CFG_REG            0x1670
+#define LSDC_CURSOR1_ADDR_LO_REG        0x1680
+#define LSDC_CURSOR1_ADDR_HI_REG        0x16e0
+#define LSDC_CURSOR1_POSITION_REG       0x1690  /* [31:16] Y, [15:0] X */
+#define LSDC_CURSOR1_BG_COLOR_REG       0x16A0  /* background color */
+#define LSDC_CURSOR1_FG_COLOR_REG       0x16B0  /* foreground color */
+
+/*
+ * DC Interrupt Control Register, 32bit, Address Offset: 1570
+ *
+ * Bits 15:0 inidicate the interrupt status
+ * Bits 31:16 control enable interrupts corresponding to bit 15:0 or not
+ * Write 1 to enable, write 0 to disable
+ *
+ * RF: Read Finished
+ * IDBU: Internal Data Buffer Underflow
+ * IDBFU: Internal Data Buffer Fatal Underflow
+ * CBRF: Cursor Buffer Read Finished Flag, no use.
+ * FBRF0: CRTC-0 reading from its framebuffer finished.
+ * FBRF1: CRTC-1 reading from its framebuffer finished.
+ *
+ * +-------+--------------------------+-------+--------+--------+-------+
+ * | 31:27 |         26:16            | 15:11 |   10   |   9    |   8   |
+ * +-------+--------------------------+-------+--------+--------+-------+
+ * |  N/A  | Interrupt Enable Control |  N/A  | IDBFU0 | IDBFU1 | IDBU0 |
+ * +-------+--------------------------+-------+--------+--------+-------+
+ *
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ * |   7   |   6   |   5   |  4   |   3    |   2    |   1    |   0    |
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ * | IDBU1 | FBRF0 | FBRF1 | CRRF | HSYNC0 | VSYNC0 | HSYNC1 | VSYNC1 |
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ *
+ * unfortunately, CRTC0's interrupt is mess with CRTC1's interrupt in one
+ * register again.
+ */
+
+#define LSDC_INT_REG                    0x1570
+
+#define INT_CRTC0_VSYNC                 BIT(2)
+#define INT_CRTC0_HSYNC                 BIT(3)
+#define INT_CRTC0_RF                    BIT(6)
+#define INT_CRTC0_IDBU                  BIT(8)
+#define INT_CRTC0_IDBFU                 BIT(10)
+
+#define INT_CRTC1_VSYNC                 BIT(0)
+#define INT_CRTC1_HSYNC                 BIT(1)
+#define INT_CRTC1_RF                    BIT(5)
+#define INT_CRTC1_IDBU                  BIT(7)
+#define INT_CRTC1_IDBFU                 BIT(9)
+
+#define INT_CRTC0_VSYNC_EN              BIT(18)
+#define INT_CRTC0_HSYNC_EN              BIT(19)
+#define INT_CRTC0_RF_EN                 BIT(22)
+#define INT_CRTC0_IDBU_EN               BIT(24)
+#define INT_CRTC0_IDBFU_EN              BIT(26)
+
+#define INT_CRTC1_VSYNC_EN              BIT(16)
+#define INT_CRTC1_HSYNC_EN              BIT(17)
+#define INT_CRTC1_RF_EN                 BIT(21)
+#define INT_CRTC1_IDBU_EN               BIT(23)
+#define INT_CRTC1_IDBFU_EN              BIT(25)
+
+#define INT_STATUS_MASK                 GENMASK(15, 0)
+
+/*
+ * LS7A1000/LS7A2000 have 4 gpios which are used to emulated I2C.
+ * They are under control of the LS7A_DC_GPIO_DAT_REG and LS7A_DC_GPIO_DIR_REG
+ * register, Those GPIOs has no relationship whth the GPIO hardware on the
+ * bridge chip itself. Those offsets are relative to DC register base address
+ *
+ * LS2k1000 don't have those registers, they use hardware i2c or general GPIO
+ * emulated i2c from linux i2c subsystem.
+ *
+ * GPIO data register, address offset: 0x1650
+ *   +---------------+-----------+-----------+
+ *   | 7 | 6 | 5 | 4 |  3  |  2  |  1  |  0  |
+ *   +---------------+-----------+-----------+
+ *   |               |    DVO1   |    DVO0   |
+ *   +      N/A      +-----------+-----------+
+ *   |               | SCL | SDA | SCL | SDA |
+ *   +---------------+-----------+-----------+
+ */
+#define LS7A_DC_GPIO_DAT_REG            0x1650
+
+/*
+ *  GPIO Input/Output direction control register, address offset: 0x1660
+ */
+#define LS7A_DC_GPIO_DIR_REG            0x1660
+
+/*
+ *  LS7A2000 has two built-in HDMI Encoder and one VGA encoder
+ */
+
+/*
+ * Number of continuous packets may be present
+ * in HDMI hblank and vblank zone, should >= 48
+ */
+#define LSDC_HDMI0_ZONE_REG             0x1700
+#define LSDC_HDMI1_ZONE_REG             0x1710
+
+#define HDMI_H_ZONE_IDLE_SHIFT          0
+#define HDMI_V_ZONE_IDLE_SHIFT          16
+
+/* HDMI Iterface Control Reg */
+#define HDMI_INTERFACE_EN               BIT(0)
+#define HDMI_PACKET_EN                  BIT(1)
+#define HDMI_AUDIO_EN                   BIT(2)
+/*
+ * Preamble:
+ * Immediately preceding each video data period or data island period is the
+ * preamble. This is a sequence of eight identical control characters that
+ * indicate whether the upcoming data period is a video data period or is a
+ * data island. The values of CTL0, CTL1, CTL2, and CTL3 indicate the type of
+ * data period that follows.
+ */
+#define HDMI_VIDEO_PREAMBLE_MASK        GENMASK(7, 4)
+#define HDMI_VIDEO_PREAMBLE_SHIFT       4
+/* 1: hw i2c, 0: gpio emu i2c, shouldn't put in LSDC_HDMIx_INTF_CTRL_REG */
+#define HW_I2C_EN                       BIT(8)
+#define HDMI_CTL_PERIOD_MODE            BIT(9)
+#define LSDC_HDMI0_INTF_CTRL_REG        0x1720
+#define LSDC_HDMI1_INTF_CTRL_REG        0x1730
+
+#define HDMI_PHY_EN                     BIT(0)
+#define HDMI_PHY_RESET_N                BIT(1)
+#define HDMI_PHY_TERM_L_EN              BIT(8)
+#define HDMI_PHY_TERM_H_EN              BIT(9)
+#define HDMI_PHY_TERM_DET_EN            BIT(10)
+#define HDMI_PHY_TERM_STATUS            BIT(11)
+#define LSDC_HDMI0_PHY_CTRL_REG         0x1800
+#define LSDC_HDMI1_PHY_CTRL_REG         0x1810
+
+/* High level duration need > 1us */
+#define HDMI_PLL_ENABLE                 BIT(0)
+#define HDMI_PLL_LOCKED                 BIT(16)
+/* Bypass the software configured values, using default source from somewhere */
+#define HDMI_PLL_BYPASS                 BIT(17)
+
+#define HDMI_PLL_IDF_SHIFT              1
+#define HDMI_PLL_IDF_MASK               GENMASK(5, 1)
+#define HDMI_PLL_LF_SHIFT               6
+#define HDMI_PLL_LF_MASK                GENMASK(12, 6)
+#define HDMI_PLL_ODF_SHIFT              13
+#define HDMI_PLL_ODF_MASK               GENMASK(15, 13)
+#define LSDC_HDMI0_PHY_PLL_REG          0x1820
+#define LSDC_HDMI1_PHY_PLL_REG          0x1830
+
+/* LS7A2000/LS2K2000 has hpd status reg, while the two hdmi's status
+ * located at the one register again.
+ */
+#define LSDC_HDMI_HPD_STATUS_REG        0x1BA0
+#define HDMI0_HPD_FLAG                  BIT(0)
+#define HDMI1_HPD_FLAG                  BIT(1)
+
+#define LSDC_HDMI0_PHY_CAL_REG          0x18C0
+#define LSDC_HDMI1_PHY_CAL_REG          0x18D0
+
+/* AVI InfoFrame */
+#define LSDC_HDMI0_AVI_CONTENT0         0x18E0
+#define LSDC_HDMI1_AVI_CONTENT0         0x18D0
+#define LSDC_HDMI0_AVI_CONTENT1         0x1900
+#define LSDC_HDMI1_AVI_CONTENT1         0x1910
+#define LSDC_HDMI0_AVI_CONTENT2         0x1920
+#define LSDC_HDMI1_AVI_CONTENT2         0x1930
+#define LSDC_HDMI0_AVI_CONTENT3         0x1940
+#define LSDC_HDMI1_AVI_CONTENT3         0x1950
+
+/* 1: enable avi infoframe packet, 0: disable avi infoframe packet */
+#define AVI_PKT_ENABLE                  BIT(0)
+/* 1: send one every two frame, 0: send one each frame */
+#define AVI_PKT_SEND_FREQ               BIT(1)
+/*
+ * 1: write 1 to flush avi reg content0 ~ content3 to the packet to be send,
+ * The hardware will clear this bit automatically.
+ */
+#define AVI_PKT_UPDATE                  BIT(2)
+
+#define LSDC_HDMI0_AVI_INFO_CRTL_REG    0x1960
+#define LSDC_HDMI1_AVI_INFO_CRTL_REG    0x1970
+
+/*
+ * LS7A2000 has the hardware which count the number of vblank generated
+ */
+#define LSDC_CRTC0_VSYNC_COUNTER_REG    0x1A00
+#define LSDC_CRTC1_VSYNC_COUNTER_REG    0x1A10
+
+/*
+ * LS7A2000 has the audio hardware associate with the HDMI encoder.
+ */
+#define LSDC_HDMI0_AUDIO_PLL_LO_REG     0x1A20
+#define LSDC_HDMI1_AUDIO_PLL_LO_REG     0x1A30
+
+#define LSDC_HDMI0_AUDIO_PLL_HI_REG     0x1A40
+#define LSDC_HDMI1_AUDIO_PLL_HI_REG     0x1A50
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
new file mode 100644 (file)
index 0000000..bf79dc5
--- /dev/null
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_prime.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_ttm.h"
+
+const char *lsdc_mem_type_to_str(uint32_t mem_type)
+{
+       switch (mem_type) {
+       case TTM_PL_VRAM:
+               return "VRAM";
+       case TTM_PL_TT:
+               return "GTT";
+       case TTM_PL_SYSTEM:
+               return "SYSTEM";
+       default:
+               break;
+       }
+
+       return "Unknown";
+}
+
+const char *lsdc_domain_to_str(u32 domain)
+{
+       switch (domain) {
+       case LSDC_GEM_DOMAIN_VRAM:
+               return "VRAM";
+       case LSDC_GEM_DOMAIN_GTT:
+               return "GTT";
+       case LSDC_GEM_DOMAIN_SYSTEM:
+               return "SYSTEM";
+       default:
+               break;
+       }
+
+       return "Unknown";
+}
+
+static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
+{
+       u32 c = 0;
+       u32 pflags = 0;
+       u32 i;
+
+       if (lbo->tbo.base.size <= PAGE_SIZE)
+               pflags |= TTM_PL_FLAG_TOPDOWN;
+
+       lbo->placement.placement = lbo->placements;
+       lbo->placement.busy_placement = lbo->placements;
+
+       if (domain & LSDC_GEM_DOMAIN_VRAM) {
+               lbo->placements[c].mem_type = TTM_PL_VRAM;
+               lbo->placements[c++].flags = pflags;
+       }
+
+       if (domain & LSDC_GEM_DOMAIN_GTT) {
+               lbo->placements[c].mem_type = TTM_PL_TT;
+               lbo->placements[c++].flags = pflags;
+       }
+
+       if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
+               lbo->placements[c].mem_type = TTM_PL_SYSTEM;
+               lbo->placements[c++].flags = 0;
+       }
+
+       if (!c) {
+               lbo->placements[c].mem_type = TTM_PL_SYSTEM;
+               lbo->placements[c++].flags = 0;
+       }
+
+       lbo->placement.num_placement = c;
+       lbo->placement.num_busy_placement = c;
+
+       for (i = 0; i < c; ++i) {
+               lbo->placements[i].fpfn = 0;
+               lbo->placements[i].lpfn = 0;
+       }
+}
+
+static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
+{
+       ttm_tt_fini(tt);
+       kfree(tt);
+}
+
+static struct ttm_tt *
+lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
+{
+       struct ttm_tt *tt;
+       int ret;
+
+       tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+       if (!tt)
+               return NULL;
+
+       ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
+       if (ret < 0) {
+               kfree(tt);
+               return NULL;
+       }
+
+       return tt;
+}
+
+static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
+                               struct ttm_tt *ttm,
+                               struct ttm_operation_ctx *ctx)
+{
+       bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+       if (slave && ttm->sg) {
+               drm_prime_sg_to_dma_addr_array(ttm->sg,
+                                              ttm->dma_address,
+                                              ttm->num_pages);
+
+               return 0;
+       }
+
+       return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+}
+
+static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
+                                  struct ttm_tt *ttm)
+{
+       bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+       if (slave)
+               return;
+
+       return ttm_pool_free(&bdev->pool, ttm);
+}
+
+static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
+                               struct ttm_placement *tplacement)
+{
+       struct ttm_resource *resource = tbo->resource;
+       struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+       switch (resource->mem_type) {
+       case TTM_PL_VRAM:
+               lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
+               break;
+       case TTM_PL_TT:
+       default:
+               lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
+               break;
+       }
+
+       *tplacement = lbo->placement;
+}
+
+static int lsdc_bo_move(struct ttm_buffer_object *tbo,
+                       bool evict,
+                       struct ttm_operation_ctx *ctx,
+                       struct ttm_resource *new_mem,
+                       struct ttm_place *hop)
+{
+       struct drm_device *ddev = tbo->base.dev;
+       struct ttm_resource *old_mem = tbo->resource;
+       struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+       int ret;
+
+       if (unlikely(tbo->pin_count > 0)) {
+               drm_warn(ddev, "Can't move a pinned BO\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_bo_wait_ctx(tbo, ctx);
+       if (ret)
+               return ret;
+
+       if (!old_mem) {
+               drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
+                       lbo, lsdc_mem_type_to_str(new_mem->mem_type),
+                       lsdc_bo_size(lbo));
+               ttm_bo_move_null(tbo, new_mem);
+               return 0;
+       }
+
+       if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
+               ttm_bo_move_null(tbo, new_mem);
+               drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
+                       lbo, lsdc_bo_size(lbo));
+               return 0;
+       }
+
+       if (old_mem->mem_type == TTM_PL_SYSTEM &&
+           new_mem->mem_type == TTM_PL_TT) {
+               drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
+                       lbo, lsdc_bo_size(lbo));
+               ttm_bo_move_null(tbo, new_mem);
+               return 0;
+       }
+
+       if (old_mem->mem_type == TTM_PL_TT &&
+           new_mem->mem_type == TTM_PL_SYSTEM) {
+               drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
+                       lbo, lsdc_bo_size(lbo));
+               ttm_resource_free(tbo, &tbo->resource);
+               ttm_bo_assign_mem(tbo, new_mem);
+               return 0;
+       }
+
+       drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
+               lbo,
+               lsdc_mem_type_to_str(old_mem->mem_type),
+               lsdc_mem_type_to_str(new_mem->mem_type),
+               lsdc_bo_size(lbo));
+
+       return ttm_bo_move_memcpy(tbo, ctx, new_mem);
+}
+
+static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
+                                 struct ttm_resource *mem)
+{
+       struct lsdc_device *ldev = tdev_to_ldev(bdev);
+
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               break;
+       case TTM_PL_TT:
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
+               mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_write_combined;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct ttm_device_funcs lsdc_bo_driver = {
+       .ttm_tt_create = lsdc_ttm_tt_create,
+       .ttm_tt_populate = lsdc_ttm_tt_populate,
+       .ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
+       .ttm_tt_destroy = lsdc_ttm_tt_destroy,
+       .eviction_valuable = ttm_bo_eviction_valuable,
+       .evict_flags = lsdc_bo_evict_flags,
+       .move = lsdc_bo_move,
+       .io_mem_reserve = lsdc_bo_reserve_io_mem,
+};
+
+u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+       struct drm_device *ddev = tbo->base.dev;
+       struct ttm_resource *resource = tbo->resource;
+
+       if (unlikely(!tbo->pin_count)) {
+               drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
+               return 0;
+       }
+
+       if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
+               return 0;
+
+       return resource->start << PAGE_SHIFT;
+}
+
+size_t lsdc_bo_size(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+
+       return tbo->base.size;
+}
+
+int lsdc_bo_reserve(struct lsdc_bo *lbo)
+{
+       return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
+}
+
+void lsdc_bo_unreserve(struct lsdc_bo *lbo)
+{
+       return ttm_bo_unreserve(&lbo->tbo);
+}
+
+int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+       struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+       int ret;
+
+       if (tbo->pin_count)
+               goto bo_pinned;
+
+       if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
+               return -EINVAL;
+
+       if (domain)
+               lsdc_bo_set_placement(lbo, domain);
+
+       ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
+       if (unlikely(ret)) {
+               drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
+               return ret;
+       }
+
+       if (domain == LSDC_GEM_DOMAIN_VRAM)
+               ldev->vram_pinned_size += lsdc_bo_size(lbo);
+       else if (domain == LSDC_GEM_DOMAIN_GTT)
+               ldev->gtt_pinned_size += lsdc_bo_size(lbo);
+
+bo_pinned:
+       ttm_bo_pin(tbo);
+
+       if (gpu_addr)
+               *gpu_addr = lsdc_bo_gpu_offset(lbo);
+
+       return 0;
+}
+
+void lsdc_bo_unpin(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+       struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+
+       if (unlikely(!tbo->pin_count)) {
+               drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
+               return;
+       }
+
+       ttm_bo_unpin(tbo);
+
+       if (!tbo->pin_count) {
+               if (tbo->resource->mem_type == TTM_PL_VRAM)
+                       ldev->vram_pinned_size -= lsdc_bo_size(lbo);
+               else if (tbo->resource->mem_type == TTM_PL_TT)
+                       ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
+       }
+}
+
+void lsdc_bo_ref(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+
+       ttm_bo_get(tbo);
+}
+
+void lsdc_bo_unref(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+
+       ttm_bo_put(tbo);
+}
+
+int lsdc_bo_kmap(struct lsdc_bo *lbo)
+{
+       struct ttm_buffer_object *tbo = &lbo->tbo;
+       struct drm_gem_object *gem = &tbo->base;
+       struct drm_device *ddev = gem->dev;
+       long ret;
+       int err;
+
+       ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
+                                   MAX_SCHEDULE_TIMEOUT);
+       if (ret < 0) {
+               drm_warn(ddev, "wait fence timeout\n");
+               return ret;
+       }
+
+       if (lbo->kptr)
+               return 0;
+
+       err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
+       if (err) {
+               drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
+               return err;
+       }
+
+       lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
+
+       return 0;
+}
+
+void lsdc_bo_kunmap(struct lsdc_bo *lbo)
+{
+       if (!lbo->kptr)
+               return;
+
+       lbo->kptr = NULL;
+       ttm_bo_kunmap(&lbo->kmap);
+}
+
+void lsdc_bo_clear(struct lsdc_bo *lbo)
+{
+       lsdc_bo_kmap(lbo);
+
+       if (lbo->is_iomem)
+               memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
+       else
+               memset(lbo->kptr, 0, lbo->size);
+
+       lsdc_bo_kunmap(lbo);
+}
+
+int lsdc_bo_evict_vram(struct drm_device *ddev)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct ttm_device *bdev = &ldev->bdev;
+       struct ttm_resource_manager *man;
+
+       man = ttm_manager_type(bdev, TTM_PL_VRAM);
+       if (unlikely(!man))
+               return 0;
+
+       return ttm_resource_manager_evict_all(bdev, man);
+}
+
+static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
+{
+       struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+       struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+       mutex_lock(&ldev->gem.mutex);
+       list_del_init(&lbo->list);
+       mutex_unlock(&ldev->gem.mutex);
+
+       drm_gem_object_release(&tbo->base);
+
+       kfree(lbo);
+}
+
+struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
+                              u32 domain,
+                              size_t size,
+                              bool kernel,
+                              struct sg_table *sg,
+                              struct dma_resv *resv)
+{
+       struct lsdc_device *ldev = to_lsdc(ddev);
+       struct ttm_device *bdev = &ldev->bdev;
+       struct ttm_buffer_object *tbo;
+       struct lsdc_bo *lbo;
+       enum ttm_bo_type bo_type;
+       int ret;
+
+       lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
+       if (!lbo)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&lbo->list);
+
+       lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
+                                       LSDC_GEM_DOMAIN_GTT |
+                                       LSDC_GEM_DOMAIN_SYSTEM);
+
+       tbo = &lbo->tbo;
+
+       size = ALIGN(size, PAGE_SIZE);
+
+       ret = drm_gem_object_init(ddev, &tbo->base, size);
+       if (ret) {
+               kfree(lbo);
+               return ERR_PTR(ret);
+       }
+
+       tbo->bdev = bdev;
+
+       if (kernel)
+               bo_type = ttm_bo_type_kernel;
+       else if (sg)
+               bo_type = ttm_bo_type_sg;
+       else
+               bo_type = ttm_bo_type_device;
+
+       lsdc_bo_set_placement(lbo, domain);
+       lbo->size = size;
+
+       ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
+                                  false, sg, resv, lsdc_bo_destroy);
+       if (ret) {
+               kfree(lbo);
+               return ERR_PTR(ret);
+       }
+
+       return lbo;
+}
+
+struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
+                                            u32 domain,
+                                            size_t size)
+{
+       struct lsdc_bo *lbo;
+       int ret;
+
+       lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
+       if (IS_ERR(lbo))
+               return ERR_CAST(lbo);
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret)) {
+               lsdc_bo_unref(lbo);
+               return ERR_PTR(ret);
+       }
+
+       ret = lsdc_bo_pin(lbo, domain, NULL);
+       lsdc_bo_unreserve(lbo);
+       if (unlikely(ret)) {
+               lsdc_bo_unref(lbo);
+               return ERR_PTR(ret);
+       }
+
+       return lbo;
+}
+
+void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
+{
+       int ret;
+
+       ret = lsdc_bo_reserve(lbo);
+       if (unlikely(ret))
+               return;
+
+       lsdc_bo_unpin(lbo);
+       lsdc_bo_unreserve(lbo);
+
+       lsdc_bo_unref(lbo);
+}
+
+static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
+{
+       struct lsdc_device *ldev = (struct lsdc_device *)data;
+
+       ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
+       ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
+
+       ttm_device_fini(&ldev->bdev);
+
+       drm_dbg(ddev, "ttm finished\n");
+}
+
+int lsdc_ttm_init(struct lsdc_device *ldev)
+{
+       struct drm_device *ddev = &ldev->base;
+       unsigned long num_vram_pages;
+       unsigned long num_gtt_pages;
+       int ret;
+
+       ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
+                             ddev->anon_inode->i_mapping,
+                             ddev->vma_offset_manager, false, true);
+       if (ret)
+               return ret;
+
+       num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
+
+       ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
+       if (unlikely(ret))
+               return ret;
+
+       drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
+
+       /* 512M is far enough for us now */
+       ldev->gtt_size = 512 << 20;
+
+       num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
+
+       ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
+       if (unlikely(ret))
+               return ret;
+
+       drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
+
+       return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
+}
+
+void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
+{
+       struct ttm_device *bdev = &ldev->bdev;
+       struct drm_device *ddev = &ldev->base;
+       struct drm_minor *minor = ddev->primary;
+       struct dentry *root = minor->debugfs_root;
+       struct ttm_resource_manager *vram_man;
+       struct ttm_resource_manager *gtt_man;
+
+       vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
+       gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
+
+       ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
+       ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.h b/drivers/gpu/drm/loongson/lsdc_ttm.h
new file mode 100644 (file)
index 0000000..843e147
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_TTM_H__
+#define __LSDC_TTM_H__
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+#include <linux/list.h>
+
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
+
+#define LSDC_GEM_DOMAIN_SYSTEM          0x1
+#define LSDC_GEM_DOMAIN_GTT             0x2
+#define LSDC_GEM_DOMAIN_VRAM            0x4
+
+struct lsdc_bo {
+       struct ttm_buffer_object tbo;
+
+       /* Protected by gem.mutex */
+       struct list_head list;
+
+       struct iosys_map map;
+
+       unsigned int vmap_count;
+       /* cross device driver sharing reference count */
+       unsigned int sharing_count;
+
+       struct ttm_bo_kmap_obj kmap;
+       void *kptr;
+       bool is_iomem;
+
+       size_t size;
+
+       u32 initial_domain;
+
+       struct ttm_placement placement;
+       struct ttm_place placements[4];
+};
+
+static inline struct ttm_buffer_object *to_ttm_bo(struct drm_gem_object *gem)
+{
+       return container_of(gem, struct ttm_buffer_object, base);
+}
+
+static inline struct lsdc_bo *to_lsdc_bo(struct ttm_buffer_object *tbo)
+{
+       return container_of(tbo, struct lsdc_bo, tbo);
+}
+
+static inline struct lsdc_bo *gem_to_lsdc_bo(struct drm_gem_object *gem)
+{
+       return container_of(gem, struct lsdc_bo, tbo.base);
+}
+
+const char *lsdc_mem_type_to_str(uint32_t mem_type);
+const char *lsdc_domain_to_str(u32 domain);
+
+struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
+                              u32 domain,
+                              size_t size,
+                              bool kernel,
+                              struct sg_table *sg,
+                              struct dma_resv *resv);
+
+struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
+                                            u32 domain,
+                                            size_t size);
+
+void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo);
+
+int lsdc_bo_reserve(struct lsdc_bo *lbo);
+void lsdc_bo_unreserve(struct lsdc_bo *lbo);
+
+int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr);
+void lsdc_bo_unpin(struct lsdc_bo *lbo);
+
+void lsdc_bo_ref(struct lsdc_bo *lbo);
+void lsdc_bo_unref(struct lsdc_bo *lbo);
+
+u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo);
+size_t lsdc_bo_size(struct lsdc_bo *lbo);
+
+int lsdc_bo_kmap(struct lsdc_bo *lbo);
+void lsdc_bo_kunmap(struct lsdc_bo *lbo);
+void lsdc_bo_clear(struct lsdc_bo *lbo);
+
+int lsdc_bo_evict_vram(struct drm_device *ddev);
+
+int lsdc_ttm_init(struct lsdc_device *ldev);
+void lsdc_ttm_debugfs_init(struct lsdc_device *ldev);
+
+#endif
index a8cd86c06c147867b1d76b99b731e594283eafad..a2572fb311f08277d5bad24069e3839bbaf3334c 100644 (file)
@@ -448,7 +448,7 @@ regulator_epod_off:
 
 }
 
-static int mcde_remove(struct platform_device *pdev)
+static void mcde_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
        struct mcde *mcde = to_mcde(drm);
@@ -457,8 +457,6 @@ static int mcde_remove(struct platform_device *pdev)
        clk_disable_unprepare(mcde->mcde_clk);
        regulator_disable(mcde->vana);
        regulator_disable(mcde->epod);
-
-       return 0;
 }
 
 static const struct of_device_id mcde_of_match[] = {
@@ -471,10 +469,10 @@ static const struct of_device_id mcde_of_match[] = {
 static struct platform_driver mcde_driver = {
        .driver = {
                .name           = "mcde",
-               .of_match_table = of_match_ptr(mcde_of_match),
+               .of_match_table = mcde_of_match,
        },
        .probe = mcde_probe,
-       .remove = mcde_remove,
+       .remove_new = mcde_remove,
 };
 
 static struct platform_driver *const component_drivers[] = {
index 9f9ac8699310d693af3129cf6f71210ceb98c96c..e2fad1a048b5423a595efca08c856e8ed2a29dd5 100644 (file)
@@ -1208,14 +1208,12 @@ static int mcde_dsi_probe(struct platform_device *pdev)
        return component_add(dev, &mcde_dsi_component_ops);
 }
 
-static int mcde_dsi_remove(struct platform_device *pdev)
+static void mcde_dsi_remove(struct platform_device *pdev)
 {
        struct mcde_dsi *d = platform_get_drvdata(pdev);
 
        component_del(&pdev->dev, &mcde_dsi_component_ops);
        mipi_dsi_host_unregister(&d->dsi_host);
-
-       return 0;
 }
 
 static const struct of_device_id mcde_dsi_of_match[] = {
@@ -1228,8 +1226,8 @@ static const struct of_device_id mcde_dsi_of_match[] = {
 struct platform_driver mcde_dsi_driver = {
        .driver = {
                .name           = "mcde-dsi",
-               .of_match_table = of_match_ptr(mcde_dsi_of_match),
+               .of_match_table = mcde_dsi_of_match,
        },
        .probe = mcde_dsi_probe,
-       .remove = mcde_dsi_remove,
+       .remove_new = mcde_dsi_remove,
 };
index 12e1469a93abed51f125db96a91169718862c3d4..4da9ac93b29e3c313c738b6422c80bbe4ba81850 100644 (file)
@@ -6,8 +6,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
index 886f1827a3a3ef3b065c97b0778edcd8213cd466..4234ff7485e884cea00c1069ba576cbb9bed9d48 100644 (file)
@@ -6,8 +6,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
index 78e44e6befd62de46750f3896a72568edc89214e..78ea99f1444faeb04feabb946e66bf6a0a3de51b 100644 (file)
@@ -6,8 +6,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
index c5237f4eb7fe1f00ba058b3065430dfc001c7df0..673f9a5738f28f8ef418e744c3919093e28e76a7 100644 (file)
@@ -6,8 +6,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 
index fd14a59bc9516e915e99c2e28c179c890a1e4ac5..e525a6b9e5b0bc257f6aea8e8dda8e4c47a22899 100644 (file)
@@ -5,8 +5,7 @@
 
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
index 5aaf4342cdbdaf487dbe38f54c71f303b9ee7283..2bffe424546667ec3d3d2ce931bc55507e46e392 100644 (file)
@@ -10,8 +10,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
index f2f6a5c01a6d2b99da765e07a4cf7dd1dba7ab75..6bf6367853fbae60ead277d51f107aa70314ee9f 100644 (file)
@@ -7,8 +7,9 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
index f0d851b53dffcee8047308f3ee729ad99eaf0ba6..faa907f2f443e36944f182b2b3b414ad44530d13 100644 (file)
@@ -8,8 +8,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
index df56fbb40ff49ece3bc1f0ad2e28b6718a7fd9cf..2f931e4e2b60091efb805ad8831d96f0c78f3ace 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/media-bus-format.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
index 8d44f3df116fa277cafd912c76d90b9bf96be56d..b6fa4ad2f94dc0b7dfb9d0b9d31259e95ed265dd 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/mailbox_controller.h>
+#include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 #include <linux/soc/mediatek/mtk-mmsys.h>
index ed96489af903daefdb122553595510c1dfdb0eab..93552d76b6e77822baa6ac3ebfb28b3959d588e9 100644 (file)
@@ -7,8 +7,9 @@
 #include <linux/component.h>
 #include <linux/iommu.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/dma-mapping.h>
 
@@ -559,11 +560,8 @@ static const struct drm_driver mtk_drm_driver = {
 
        .dumb_create = mtk_drm_gem_dumb_create,
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = mtk_drm_gem_prime_import,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
-       .gem_prime_mmap = drm_gem_prime_mmap,
        .fops = &mtk_drm_fops,
 
        .name = DRIVER_NAME,
index 73dc4da3ba3bd0fd27332071b40eadb652b9c7c3..db7ac666ec5e1193b5dbfcdfc771ce817e000896 100644 (file)
@@ -7,7 +7,7 @@
 #include <drm/drm_framebuffer.h>
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
index 5746f06220c12e8398da6f6f76ac41cc3264451f..c3adaeefd551a269c788e81ce89e19103912b0c9 100644 (file)
@@ -6,8 +6,7 @@
 #include <drm/drm_fourcc.h>
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
index 747b639ea0c471f0d2814e481f807466fb8d0098..cb674966e9aca787e679c77f9661fc1b786d9510 100644 (file)
@@ -516,11 +516,9 @@ static int meson_drv_probe(struct platform_device *pdev)
        return 0;
 };
 
-static int meson_drv_remove(struct platform_device *pdev)
+static void meson_drv_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &meson_drv_master_ops);
-
-       return 0;
 }
 
 static struct meson_drm_match_data meson_drm_gxbb_data = {
@@ -560,7 +558,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
 
 static struct platform_driver meson_drm_platform_driver = {
        .probe      = meson_drv_probe,
-       .remove     = meson_drv_remove,
+       .remove_new = meson_drv_remove,
        .shutdown   = meson_drv_shutdown,
        .driver     = {
                .name   = "meson-drm",
index b23009a3380fb38a70d74584a1f12e3beea4e7c6..3f9345c14f31c13b071f420533fe8a450d3e0f36 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/device.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regmap.h>
 
 struct drm_crtc;
index 3d046878ce6cb74f5a3662e795e091be9d7b4b75..5a9538bc0e26f83468a4e3fb751c0ee64643fbc6 100644 (file)
@@ -9,8 +9,9 @@
 #include <linux/component.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
@@ -379,8 +380,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
                         mode->clock > 340000 ? 40 : 10);
 
        if (drm_mode_is_420_only(display, mode) ||
-           (!is_hdmi2_sink &&
-            drm_mode_is_420_also(display, mode)))
+           (!is_hdmi2_sink && drm_mode_is_420_also(display, mode)) ||
+           dw_hdmi_bus_fmt_is_420(hdmi))
                mode_is_420 = true;
 
        /* Enable clocks */
@@ -852,11 +853,9 @@ static int meson_dw_hdmi_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &meson_dw_hdmi_ops);
 }
 
-static int meson_dw_hdmi_remove(struct platform_device *pdev)
+static void meson_dw_hdmi_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &meson_dw_hdmi_ops);
-
-       return 0;
 }
 
 static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
@@ -879,7 +878,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
 
 static struct platform_driver meson_dw_hdmi_platform_driver = {
        .probe          = meson_dw_hdmi_probe,
-       .remove         = meson_dw_hdmi_remove,
+       .remove_new     = meson_dw_hdmi_remove,
        .driver         = {
                .name           = DRIVER_NAME,
                .of_match_table = meson_dw_hdmi_of_table,
index 57447abf1a295fc42cf316d1f7b5f79d0c66a228..e5fe4e994f43b1b8cff6b658e34043981ae139e6 100644 (file)
@@ -7,9 +7,10 @@
 
 #include <linux/clk.h>
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/phy/phy.h>
 #include <linux/bitfield.h>
index 812e172dec63a7b140a58dd527cc11c543190077..3f93c70488cad1829bbe488d8bf8f7b3833859f1 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 
 #include <drm/drm_atomic_helper.h>
index 53231bfdf7e2471610825b0d111a1178e84ab26d..9913971fa5d20bc9d192695d8b2773d0244615ce 100644 (file)
@@ -9,8 +9,10 @@
 #include <linux/component.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
index 976f0ab2006b5a23533b7e9b42119db91b0ceb0f..abddf37f0ea119d23d109e7e1b962a6553135446 100644 (file)
@@ -20,7 +20,7 @@
 
 #include "mgag200_drv.h"
 
-int mgag200_modeset = -1;
+static int mgag200_modeset = -1;
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, mgag200_modeset, int, 0400);
 
index a78662bd62730c48dc209330e91191244d0c1d3e..6309a857ca312869e37f9e9021195efd9a01d4ff 100644 (file)
@@ -21,7 +21,7 @@ config DRM_MSM
        select DRM_BRIDGE
        select DRM_PANEL_BRIDGE
        select DRM_SCHED
-       select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+       select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
        select SHMEM
        select TMPFS
        select QCOM_SCM
index 5deb79924897afded45a9399e1e3e29e5e534921..b20ef6c8ea26d935338166adebb2c3685905635b 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/clk.h>
 #include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
 #include <soc/qcom/cmd-db.h>
index 1245c7aa49df844ce293207c6a74374eb0bee805..4a2e479723a852f10ff226231192b791a42ac691 100644 (file)
@@ -6,7 +6,7 @@
 
 #define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
 
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 #include <drm/display/drm_dp_helper.h>
 #include <drm/drm_edid.h>
index 3f6dfb4f9d5a6dabf33a78a5b3c43aa1ed3bbe45..4c6d73e24bb53e148dd1cd05922c8289228c39d5 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/of_irq.h>
 #include <linux/pinctrl/consumer.h>
index 3132105a2a433af412541b8f56574ec7e2264576..60509fb397100227e3c0585236c2fcad1784f7a4 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <linux/of_irq.h>
 #include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 #include <drm/drm_bridge_connector.h>
 #include <drm/drm_of.h>
index 9780107e1cc973febbe54d309faad9c0b2fed23f..3e00fb8190b2a12ce88d004d5e0a866f9cb855de 100644 (file)
@@ -3,7 +3,8 @@
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 
 #include "hdmi.h"
 
index 891eff8433a9cd691daa0b989da7fbfaf38cec8e..2a0e3529598b698456eb51762f3da480f04695ed 100644 (file)
@@ -1086,10 +1086,7 @@ static const struct drm_driver msm_driver = {
        .postclose          = msm_postclose,
        .dumb_create        = msm_gem_dumb_create,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
-       .gem_prime_mmap     = msm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
 #endif
index e13a8cbd61c95004082c2298208cb9855e20458c..44c9e06f2dffa9ab1a96dd52f549aa2b86b17245 100644 (file)
@@ -282,7 +282,6 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
index b933a85420f6d54237e6fb9e18b6cb62d4b45f54..030bedac632d09fb88f9a7c8436f18136fd0463d 100644 (file)
@@ -25,9 +25,9 @@ module_param(fbdev, bool, 0600);
  * fbdev funcs, to implement legacy fbdev interface on top of drm driver
  */
 
-FB_GEN_DEFAULT_DEFERRED_SYS_OPS(msm_fbdev,
-                               drm_fb_helper_damage_range,
-                               drm_fb_helper_damage_area)
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
+                                  drm_fb_helper_damage_range,
+                                  drm_fb_helper_damage_area)
 
 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
@@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
                goto err_drm_fb_helper_unprepare;
        }
 
-       ret = msm_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index 20cfd86d2b324ae44287b51c30fd59f7e90425fb..635744bc4765fc2c29e959ecc433840d48e8bb17 100644 (file)
@@ -1234,6 +1234,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
        list_add_tail(&msm_obj->node, &priv->objects);
        mutex_unlock(&priv->obj_lock);
 
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto fail;
+
        return obj;
 
 fail:
@@ -1290,6 +1294,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        list_add_tail(&msm_obj->node, &priv->objects);
        mutex_unlock(&priv->obj_lock);
 
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto fail;
+
        return obj;
 
 fail:
index c1d91863df055b619332b466128d7b3aba9fe6d2..5f68e31a3e4e1cbeed95bfde138711c0fc9c9759 100644 (file)
 #include "msm_drv.h"
 #include "msm_gem.h"
 
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
-       int ret;
-
-       /* Ensure the mmap offset is initialized.  We lazily initialize it,
-        * so if it has not been first mmap'd directly as a GEM object, the
-        * mmap offset will not be already initialized.
-        */
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret)
-               return ret;
-
-       return drm_gem_prime_mmap(obj, vma);
-}
-
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
index 05648c910c68efe2785c0251606eb17368d9d755..6bf6c4a0f55036e3c848582039550ddef4c61a59 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/irqchip.h>
 #include <linux/irqdesc.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
index c9d8cbb214079d8b969103b6376f1e03eb356146..18de2f17e2491cf72ca503720e1fc978a1db8568 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -285,7 +284,7 @@ err_free:
        return ret;
 }
 
-static int lcdif_remove(struct platform_device *pdev)
+static void lcdif_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
 
@@ -293,8 +292,6 @@ static int lcdif_remove(struct platform_device *pdev)
        drm_atomic_helper_shutdown(drm);
        lcdif_unload(drm);
        drm_dev_put(drm);
-
-       return 0;
 }
 
 static void lcdif_shutdown(struct platform_device *pdev)
@@ -362,7 +359,7 @@ static const struct dev_pm_ops lcdif_pm_ops = {
 
 static struct platform_driver lcdif_platform_driver = {
        .probe          = lcdif_probe,
-       .remove         = lcdif_remove,
+       .remove_new     = lcdif_remove,
        .shutdown       = lcdif_shutdown,
        .driver = {
                .name           = "imx-lcdif",
index 368b1fbd8305b3c77a5c867d99d41200f93b9ee7..625c1bfc41733b23e05de4556e3c9841094f7922 100644 (file)
@@ -377,7 +377,7 @@ err_free:
        return ret;
 }
 
-static int mxsfb_remove(struct platform_device *pdev)
+static void mxsfb_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
 
@@ -385,8 +385,6 @@ static int mxsfb_remove(struct platform_device *pdev)
        drm_atomic_helper_shutdown(drm);
        mxsfb_unload(drm);
        drm_dev_put(drm);
-
-       return 0;
 }
 
 static void mxsfb_shutdown(struct platform_device *pdev)
@@ -418,7 +416,7 @@ static const struct dev_pm_ops mxsfb_pm_ops = {
 
 static struct platform_driver mxsfb_platform_driver = {
        .probe          = mxsfb_probe,
-       .remove         = mxsfb_remove,
+       .remove_new     = mxsfb_remove,
        .shutdown       = mxsfb_shutdown,
        .driver = {
                .name           = "mxsfb",
index 3bcc9c0f20193e276438af51653b6146585228cf..7ed2516b6de0555a87f185f8890f648e602f3218 100644 (file)
@@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
        writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
 }
 
+static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
+                                              struct drm_atomic_state *state)
+{
+       struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+
+       writel(0, mxsfb->base + LCDC_AS_CTRL);
+}
+
 static bool mxsfb_format_mod_supported(struct drm_plane *plane,
                                       uint32_t format,
                                       uint64_t modifier)
@@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
 static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
        .atomic_check = mxsfb_plane_atomic_check,
        .atomic_update = mxsfb_plane_overlay_atomic_update,
+       .atomic_disable = mxsfb_plane_overlay_atomic_disable,
 };
 
 static const struct drm_plane_funcs mxsfb_plane_funcs = {
index 5e5617006da50cd29c52a1d370582dd235f4de91..cf6b3a80c0c8ae4968b69e600e89639bfd63db2f 100644 (file)
@@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o
 nouveau-y += nouveau_sgdma.o
 nouveau-y += nouveau_ttm.o
 nouveau-y += nouveau_vmm.o
+nouveau-y += nouveau_exec.o
+nouveau-y += nouveau_sched.o
+nouveau-y += nouveau_uvmm.o
 
 # DRM - modesetting
 nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
index a70bd65e1400760e46fef5097547df256b2f5d51..c52e8096cca415893269c0fed15c5da3c17ec0aa 100644 (file)
@@ -10,6 +10,8 @@ config DRM_NOUVEAU
        select DRM_KMS_HELPER
        select DRM_TTM
        select DRM_TTM_HELPER
+       select DRM_EXEC
+       select DRM_SCHED
        select I2C
        select I2C_ALGOBIT
        select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
index a6f2e681bde98a655c57eec251b3c7f28c977ce4..a34924523133c78f864c829abe3f9d8dcc9a22c4 100644 (file)
@@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
        PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
        PUSH_KICK(push);
 
-       ret = nouveau_fence_new(chan, false, pfence);
+       ret = nouveau_fence_new(pfence);
        if (ret)
                goto fail;
 
+       ret = nouveau_fence_emit(*pfence, chan);
+       if (ret)
+               goto fail_fence_unref;
+
        return 0;
+
+fail_fence_unref:
+       nouveau_fence_unref(pfence);
 fail:
        spin_lock_irqsave(&dev->event_lock, flags);
        list_del(&s->head);
index 78ee32da01c8adee5e4f8718b40a24d596c5b836..a95ee5dcc2e39474afcd583a42f093b4c6280c2d 100644 (file)
@@ -29,6 +29,7 @@
 #include <nvhw/class/cl507a.h>
 
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
 
 bool
 curs507a_space(struct nv50_wndw *wndw)
@@ -99,6 +100,7 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 {
        struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
        struct nv50_head *head = nv50_head(asyw->state.crtc);
+       struct drm_framebuffer *fb = asyw->state.fb;
        int ret;
 
        ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
@@ -124,11 +126,30 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
                return -EINVAL;
        }
 
+       if (asyw->image.pitch[0] != asyw->image.w * fb->format->cpp[0]) {
+               NV_ATOMIC(drm,
+                         "%s: invalid cursor image pitch: image must be packed (pitch = %d, width = %d)\n",
+                         wndw->plane.name, asyw->image.pitch[0], asyw->image.w);
+               return -EINVAL;
+       }
+
        ret = head->func->curs_layout(head, asyw, asyh);
-       if (ret)
+       if (ret) {
+               NV_ATOMIC(drm,
+                         "%s: invalid cursor image size: unsupported size %dx%d\n",
+                         wndw->plane.name, asyw->image.w, asyw->image.h);
+               return ret;
+       }
+
+       ret = head->func->curs_format(head, asyw, asyh);
+       if (ret) {
+               NV_ATOMIC(drm,
+                         "%s: invalid cursor image format 0x%X\n",
+                         wndw->plane.name, fb->format->format);
                return ret;
+       }
 
-       return head->func->curs_format(head, asyw, asyh);
+       return 0;
 }
 
 static const u32
index 42e1665ba11a3ff5b1dd88b7fdf355ce80c1be34..d18e24b6f1eba9ab9bcc614e2ecc49698adac555 100644 (file)
@@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
        struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
        struct nv50_mstc *mstc = msto->mstc;
        struct nv50_mstm *mstm = mstc->mstm;
-       struct drm_dp_mst_atomic_payload *payload;
+       struct drm_dp_mst_topology_state *old_mst_state;
+       struct drm_dp_mst_atomic_payload *payload, *old_payload;
 
        NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
 
+       old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
+
        payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+       old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
 
        // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
        if (msto->disabled) {
-               drm_dp_remove_payload(mgr, mst_state, payload, payload);
+               drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
 
                nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
        } else {
@@ -1124,7 +1128,7 @@ nv50_mstc_mode_valid(struct drm_connector *connector,
         * MSTB's max possible PBN
         */
 
-       return nv50_dp_mode_valid(connector, outp, mode, NULL);
+       return nv50_dp_mode_valid(outp, mode, NULL);
 }
 
 static int
index 9c7ff56831c5472bfb323005a7d39ba193698c82..a5a182b3c28de0360df43171f80c70d7fc67fc52 100644 (file)
@@ -3,7 +3,10 @@
 struct nvif_vmm_v0 {
        __u8  version;
        __u8  page_nr;
-       __u8  managed;
+#define NVIF_VMM_V0_TYPE_UNMANAGED                                         0x00
+#define NVIF_VMM_V0_TYPE_MANAGED                                           0x01
+#define NVIF_VMM_V0_TYPE_RAW                                               0x02
+       __u8  type;
        __u8  pad03[5];
        __u64 addr;
        __u64 size;
@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
 #define NVIF_VMM_V0_UNMAP                                                  0x04
 #define NVIF_VMM_V0_PFNMAP                                                 0x05
 #define NVIF_VMM_V0_PFNCLR                                                 0x06
+#define NVIF_VMM_V0_RAW                                                    0x07
 #define NVIF_VMM_V0_MTHD(i)                                         ((i) + 0x80)
 
 struct nvif_vmm_page_v0 {
@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
        __u64 addr;
 };
 
+struct nvif_vmm_raw_v0 {
+       __u8 version;
+#define NVIF_VMM_RAW_V0_GET    0x0
+#define NVIF_VMM_RAW_V0_PUT    0x1
+#define NVIF_VMM_RAW_V0_MAP    0x2
+#define NVIF_VMM_RAW_V0_UNMAP  0x3
+#define NVIF_VMM_RAW_V0_SPARSE 0x4
+       __u8  op;
+       __u8  sparse;
+       __u8  ref;
+       __u8  shift;
+       __u32 argc;
+       __u8  pad01[7];
+       __u64 addr;
+       __u64 size;
+       __u64 offset;
+       __u64 memory;
+       __u64 argv;
+};
+
 struct nvif_vmm_pfnmap_v0 {
        __u8  version;
        __u8  page;
index a2ee92201ace46a18b5ab94d008a9efd84ea970c..0ecedd0ee0a5ca8028f2ecd175c0aa640d1ec02a 100644 (file)
@@ -4,6 +4,12 @@
 struct nvif_mem;
 struct nvif_mmu;
 
+enum nvif_vmm_type {
+       UNMANAGED,
+       MANAGED,
+       RAW,
+};
+
 enum nvif_vmm_get {
        ADDR,
        PTES,
@@ -30,8 +36,9 @@ struct nvif_vmm {
        int page_nr;
 };
 
-int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
-                 u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
+                 enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
+                 struct nvif_vmm *);
 void nvif_vmm_dtor(struct nvif_vmm *);
 int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
                 u8 page, u8 align, u64 size, struct nvif_vma *);
@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
 int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
                 struct nvif_mem *, u64 offset);
 int nvif_vmm_unmap(struct nvif_vmm *, u64);
+
+int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+                    void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
+int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+                      u8 shift, bool sparse);
+int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
 #endif
index b67b9c1a6b4e5fd498b2af5166afaa98501ba032..738899fcf30b6a206f10b20b9be016e4952173e4 100644 (file)
@@ -3,7 +3,7 @@
 #define __NVKM_ENGINE_H__
 #define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
 #include <core/subdev.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
 struct nvkm_fb_tile;
 
 extern const struct nvkm_subdev_func nvkm_engine;
@@ -22,6 +22,7 @@ struct nvkm_engine_func {
        int (*init)(struct nvkm_engine *);
        int (*fini)(struct nvkm_engine *, bool suspend);
        int (*reset)(struct nvkm_engine *);
+       int (*nonstall)(struct nvkm_engine *);
        void (*intr)(struct nvkm_engine *);
        void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
        bool (*chsw_load)(struct nvkm_engine *);
@@ -32,8 +33,7 @@ struct nvkm_engine_func {
        } base;
 
        struct {
-               int (*cclass)(struct nvkm_fifo_chan *,
-                             const struct nvkm_oclass *,
+               int (*cclass)(struct nvkm_chan *, const struct nvkm_oclass *,
                              struct nvkm_object **);
                int (*sclass)(struct nvkm_oclass *, int index);
        } fifo;
index 4486d98628493c614034e17ba6f090fd197d7da6..3fd5c007a66353233a0f06310d7f13c119b03031 100644 (file)
@@ -49,9 +49,4 @@ nvkm_blob_dtor(struct nvkm_blob *blob)
        (p = container_of((h), typeof(*p), m), nvkm_list_find_next(p, (h), m, (c)))
 #define nvkm_list_foreach(p,h,m,c)                                                           \
        for (p = nvkm_list_find(p, (h), m, (c)); p; p = nvkm_list_find_next(p, (h), m, (c)))
-
-/*FIXME: remove after */
-#define nvkm_fifo_chan nvkm_chan
-#define nvkm_fifo_chan_func nvkm_chan_func
-#define nvkm_fifo_cgrp nvkm_cgrp
 #endif
index cd86d9198e4ac215511f807ec67fc81dae0fc0c2..b7bb8a29a729784651725b142157875f4be4a57e 100644 (file)
@@ -3,7 +3,7 @@
 #define __NVKM_FLCNEN_H__
 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
 
 enum nvkm_falcon_dmaidx {
        FALCON_DMAIDX_UCODE             = 0,
index 01a22a13b452047425a63914cad7f2f081c8d37b..1755b0df3cc1db62a8bb967bc88aebf52420b16c 100644 (file)
@@ -59,6 +59,7 @@ struct nvkm_fb {
        struct nvkm_memory *mmu_wr;
 };
 
+u64 nvkm_fb_vidmem_size(struct nvkm_device *);
 int nvkm_fb_mem_unlock(struct nvkm_fb *);
 
 void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
index 70e7887ef4b4b5e6515034728c4ece061a7957f0..2fd2f2433fc7d40068873380321f353d277cae76 100644 (file)
@@ -17,6 +17,7 @@ struct nvkm_vma {
        bool part:1; /* Region was split from an allocated region by map(). */
        bool busy:1; /* Region busy (for temporarily preventing user access). */
        bool mapped:1; /* Region contains valid pages. */
+       bool no_comp:1; /* Force no memory compression. */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
        struct nvkm_tags *tags; /* Compression tag reference. */
 };
@@ -27,10 +28,26 @@ struct nvkm_vmm {
        const char *name;
        u32 debug;
        struct kref kref;
-       struct mutex mutex;
+
+       struct {
+               struct mutex vmm;
+               struct mutex ref;
+               struct mutex map;
+       } mutex;
 
        u64 start;
        u64 limit;
+       struct {
+               struct {
+                       u64 addr;
+                       u64 size;
+               } p;
+               struct {
+                       u64 addr;
+                       u64 size;
+               } n;
+               bool raw;
+       } managed;
 
        struct nvkm_vmm_pt *pd;
        struct list_head join;
@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
 
        const struct nvkm_vmm_page *page;
 
+       bool no_comp;
        struct nvkm_tags *tags;
        u64 next;
        u64 type;
index 82dab51d8aebc93482bec31f358a51e0186b2aee..30afbec9e3b1bdbf87fd05c1cb4b58739f2941c6 100644 (file)
@@ -35,6 +35,7 @@
 #include "nouveau_chan.h"
 #include "nouveau_abi16.h"
 #include "nouveau_vmm.h"
+#include "nouveau_sched.h"
 
 static struct nouveau_abi16 *
 nouveau_abi16(struct drm_file *file_priv)
@@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 {
        struct nouveau_abi16_ntfy *ntfy, *temp;
 
+       /* When a client exits without waiting for it's queued up jobs to
+        * finish it might happen that we fault the channel. This is due to
+        * drm_file_free() calling drm_gem_release() before the postclose()
+        * callback. Hence, we can't tear down this scheduler entity before
+        * uvmm mappings are unmapped. Currently, we can't detect this case.
+        *
+        * However, this should be rare and harmless, since the channel isn't
+        * needed anymore.
+        */
+       nouveau_sched_entity_fini(&chan->sched_entity);
+
        /* wait for all activity to stop before cleaning up */
        if (chan->chan)
                nouveau_channel_idle(chan->chan);
@@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
        if (!drm->channel)
                return nouveau_abi16_put(abi16, -ENODEV);
 
+       /* If uvmm wasn't initialized until now disable it completely to prevent
+        * userspace from mixing up UAPIs.
+        *
+        * The client lock is already acquired by nouveau_abi16_get().
+        */
+       __nouveau_cli_disable_uvmm_noinit(cli);
+
        device = &abi16->device;
        engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
 
@@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
        if (ret)
                goto done;
 
+       ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
+                                       drm->sched_wq);
+       if (ret)
+               goto done;
+
        init->channel = chan->chan->chid;
 
        if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
index 27eae85f33e619ff12a51cc000e5979705707b53..9f538486c10e37108ef0b4aaa41c1ad2542baaef 100644 (file)
@@ -26,6 +26,7 @@ struct nouveau_abi16_chan {
        struct nouveau_bo *ntfy;
        struct nouveau_vma *ntfy_vma;
        struct nvkm_mm  heap;
+       struct nouveau_sched_entity sched_entity;
 };
 
 struct nouveau_abi16 {
@@ -43,28 +44,6 @@ int  nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
 
-struct drm_nouveau_channel_alloc {
-       uint32_t     fb_ctxdma_handle;
-       uint32_t     tt_ctxdma_handle;
-
-       int          channel;
-       uint32_t     pushbuf_domains;
-
-       /* Notifier memory */
-       uint32_t     notifier_handle;
-
-       /* DRM-enforced subchannel assignments */
-       struct {
-               uint32_t handle;
-               uint32_t grclass;
-       } subchan[8];
-       uint32_t nr_subchan;
-};
-
-struct drm_nouveau_channel_free {
-       int channel;
-};
-
 struct drm_nouveau_grobj_alloc {
        int      channel;
        uint32_t handle;
@@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free {
        uint32_t handle;
 };
 
-#define NOUVEAU_GETPARAM_PCI_VENDOR      3
-#define NOUVEAU_GETPARAM_PCI_DEVICE      4
-#define NOUVEAU_GETPARAM_BUS_TYPE        5
-#define NOUVEAU_GETPARAM_FB_SIZE         8
-#define NOUVEAU_GETPARAM_AGP_SIZE        9
-#define NOUVEAU_GETPARAM_CHIPSET_ID      11
-#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
-#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
-#define NOUVEAU_GETPARAM_PTIMER_TIME     14
-#define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
-#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
-struct drm_nouveau_getparam {
-       uint64_t param;
-       uint64_t value;
-};
-
 struct drm_nouveau_setparam {
        uint64_t param;
        uint64_t value;
 };
 
-#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
 #define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
 #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
 #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
 #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
index c2ec91cc845d554ba908657f2705ca65dcd4df01..19cab37ac69c9931ea7af1dc659701af98000e58 100644 (file)
@@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
 
 struct nouveau_bo *
 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
-                u32 tile_mode, u32 tile_flags)
+                u32 tile_mode, u32 tile_flags, bool internal)
 {
        struct nouveau_drm *drm = cli->drm;
        struct nouveau_bo *nvbo;
        struct nvif_mmu *mmu = &cli->mmu;
-       struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
+       struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
        int i, pi = -1;
 
        if (!*size) {
@@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
        nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
        if (!nvbo)
                return ERR_PTR(-ENOMEM);
+
        INIT_LIST_HEAD(&nvbo->head);
        INIT_LIST_HEAD(&nvbo->entry);
        INIT_LIST_HEAD(&nvbo->vma_list);
@@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
                        nvbo->force_coherent = true;
        }
 
-       if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
-               nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
-               if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
-                       kfree(nvbo);
-                       return ERR_PTR(-EINVAL);
+       nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+       if (!nouveau_cli_uvmm(cli) || internal) {
+               /* for BO noVM allocs, don't assign kinds */
+               if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+                       nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+                       if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+                               kfree(nvbo);
+                               return ERR_PTR(-EINVAL);
+                       }
+
+                       nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+               } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+                       nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+                       nvbo->comp = (tile_flags & 0x00030000) >> 16;
+                       if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+                               kfree(nvbo);
+                               return ERR_PTR(-EINVAL);
+                       }
+               } else {
+                       nvbo->zeta = (tile_flags & 0x00000007);
                }
+               nvbo->mode = tile_mode;
+
+               /* Determine the desirable target GPU page size for the buffer. */
+               for (i = 0; i < vmm->page_nr; i++) {
+                       /* Because we cannot currently allow VMM maps to fail
+                        * during buffer migration, we need to determine page
+                        * size for the buffer up-front, and pre-allocate its
+                        * page tables.
+                        *
+                        * Skip page sizes that can't support needed domains.
+                        */
+                       if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+                           (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+                               continue;
+                       if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+                           (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+                               continue;
 
-               nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
-       } else
-       if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               nvbo->kind = (tile_flags & 0x00007f00) >> 8;
-               nvbo->comp = (tile_flags & 0x00030000) >> 16;
-               if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+                       /* Select this page size if it's the first that supports
+                        * the potential memory domains, or when it's compatible
+                        * with the requested compression settings.
+                        */
+                       if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+                               pi = i;
+
+                       /* Stop once the buffer is larger than the current page size. */
+                       if (*size >= 1ULL << vmm->page[i].shift)
+                               break;
+               }
+
+               if (WARN_ON(pi < 0)) {
                        kfree(nvbo);
                        return ERR_PTR(-EINVAL);
                }
-       } else {
-               nvbo->zeta = (tile_flags & 0x00000007);
-       }
-       nvbo->mode = tile_mode;
-       nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
-
-       /* Determine the desirable target GPU page size for the buffer. */
-       for (i = 0; i < vmm->page_nr; i++) {
-               /* Because we cannot currently allow VMM maps to fail
-                * during buffer migration, we need to determine page
-                * size for the buffer up-front, and pre-allocate its
-                * page tables.
-                *
-                * Skip page sizes that can't support needed domains.
-                */
-               if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
-                   (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
-                       continue;
-               if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
-                   (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
-                       continue;
 
-               /* Select this page size if it's the first that supports
-                * the potential memory domains, or when it's compatible
-                * with the requested compression settings.
-                */
-               if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
-                       pi = i;
-
-               /* Stop once the buffer is larger than the current page size. */
-               if (*size >= 1ULL << vmm->page[i].shift)
-                       break;
-       }
+               /* Disable compression if suitable settings couldn't be found. */
+               if (nvbo->comp && !vmm->page[pi].comp) {
+                       if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+                               nvbo->kind = mmu->kind[nvbo->kind];
+                       nvbo->comp = 0;
+               }
+               nvbo->page = vmm->page[pi].shift;
+       } else {
+               /* reject other tile flags when in VM mode. */
+               if (tile_mode)
+                       return ERR_PTR(-EINVAL);
+               if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
+                       return ERR_PTR(-EINVAL);
 
-       if (WARN_ON(pi < 0)) {
-               kfree(nvbo);
-               return ERR_PTR(-EINVAL);
-       }
+               /* Determine the desirable target GPU page size for the buffer. */
+               for (i = 0; i < vmm->page_nr; i++) {
+                       /* Because we cannot currently allow VMM maps to fail
+                        * during buffer migration, we need to determine page
+                        * size for the buffer up-front, and pre-allocate its
+                        * page tables.
+                        *
+                        * Skip page sizes that can't support needed domains.
+                        */
+                       if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+                               continue;
+                       if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+                           (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+                               continue;
 
-       /* Disable compression if suitable settings couldn't be found. */
-       if (nvbo->comp && !vmm->page[pi].comp) {
-               if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
-                       nvbo->kind = mmu->kind[nvbo->kind];
-               nvbo->comp = 0;
+                       if (pi < 0)
+                               pi = i;
+                       /* Stop once the buffer is larger than the current page size. */
+                       if (*size >= 1ULL << vmm->page[i].shift)
+                               break;
+               }
+               if (WARN_ON(pi < 0)) {
+                       kfree(nvbo);
+                       return ERR_PTR(-EINVAL);
+               }
+               nvbo->page = vmm->page[pi].shift;
        }
-       nvbo->page = vmm->page[pi].shift;
 
        nouveau_bo_fixup_align(nvbo, align, size);
 
@@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
 {
        int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
        int ret;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false,
+               .resv = robj,
+       };
 
        nouveau_bo_placement_set(nvbo, domain, 0);
        INIT_LIST_HEAD(&nvbo->io_reserve_lru);
 
-       ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
-                                  &nvbo->placement, align >> PAGE_SHIFT, false,
+       ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
+                                  &nvbo->placement, align >> PAGE_SHIFT, &ctx,
                                   sg, robj, nouveau_bo_del_ttm);
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
                return ret;
        }
 
+       if (!robj)
+               ttm_bo_unreserve(&nvbo->bo);
+
        return 0;
 }
 
@@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
        int ret;
 
        nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
-                               tile_flags);
+                               tile_flags, true);
        if (IS_ERR(nvbo))
                return PTR_ERR(nvbo);
 
@@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
        dma_resv_init(&nvbo->bo.base._resv);
        drm_vma_node_reset(&nvbo->bo.base.vma_node);
 
+       /* This must be called before ttm_bo_init_reserved(). Subsequent
+        * bo_move() callbacks might already iterate the GEMs GPUVA list.
+        */
+       drm_gem_gpuva_init(&nvbo->bo.base);
+
        ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
        if (ret)
                return ret;
@@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
                mutex_lock(&cli->mutex);
        else
                mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+
        ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
-       if (ret == 0) {
-               ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
-               if (ret == 0) {
-                       ret = nouveau_fence_new(chan, false, &fence);
-                       if (ret == 0) {
-                               /* TODO: figure out a better solution here
-                                *
-                                * wait on the fence here explicitly as going through
-                                * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
-                                *
-                                * Without this the operation can timeout and we'll fallback to a
-                                * software copy, which might take several minutes to finish.
-                                */
-                               nouveau_fence_wait(fence, false, false);
-                               ret = ttm_bo_move_accel_cleanup(bo,
-                                                               &fence->base,
-                                                               evict, false,
-                                                               new_reg);
-                               nouveau_fence_unref(&fence);
-                       }
-               }
+       if (ret)
+               goto out_unlock;
+
+       ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
+       if (ret)
+               goto out_unlock;
+
+       ret = nouveau_fence_new(&fence);
+       if (ret)
+               goto out_unlock;
+
+       ret = nouveau_fence_emit(fence, chan);
+       if (ret) {
+               nouveau_fence_unref(&fence);
+               goto out_unlock;
        }
+
+       /* TODO: figure out a better solution here
+        *
+        * wait on the fence here explicitly as going through
+        * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+        *
+        * Without this the operation can timeout and we'll fallback to a
+        * software copy, which might take several minutes to finish.
+        */
+       nouveau_fence_wait(fence, false, false);
+       ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
+                                       new_reg);
+       nouveau_fence_unref(&fence);
+
+out_unlock:
        mutex_unlock(&cli->mutex);
        return ret;
 }
@@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
                list_for_each_entry(vma, &nvbo->vma_list, head) {
                        nouveau_vma_map(vma, mem);
                }
+               nouveau_uvmm_bo_map_all(nvbo, mem);
        } else {
                list_for_each_entry(vma, &nvbo->vma_list, head) {
                        ret = dma_resv_wait_timeout(bo->base.resv,
@@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
                        WARN_ON(ret <= 0);
                        nouveau_vma_unmap(vma);
                }
+               nouveau_uvmm_bo_unmap_all(nvbo);
        }
 
        if (new_reg)
index 774dd93ca76bd8158f63425f3e0fdfbdb2f85125..07f671cf895e0424c2a17836455089b733757680 100644 (file)
@@ -26,6 +26,7 @@ struct nouveau_bo {
        struct list_head entry;
        int pbbo_index;
        bool validate_mapped;
+       bool no_share;
 
        /* GPU address space is independent of CPU word size */
        uint64_t offset;
@@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver;
 
 void nouveau_bo_move_init(struct nouveau_drm *);
 struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
-                                   u32 domain, u32 tile_mode, u32 tile_flags);
+                                   u32 domain, u32 tile_mode, u32 tile_flags, bool internal);
 int  nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
                     struct sg_table *sg, struct dma_resv *robj);
 int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
index e648ecd0c1a03fb9ed614f2ce1f30f90c21b3dea..1fd5ccf41128adf9904ef5c8652aebd4fd26016b 100644 (file)
@@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
 int nouveau_vram_pushbuf;
 module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
 
+void
+nouveau_channel_kill(struct nouveau_channel *chan)
+{
+       atomic_set(&chan->killed, 1);
+       if (chan->fence)
+               nouveau_fence_context_kill(chan->fence, -ENODEV);
+}
+
 static int
 nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
 {
@@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
        struct nouveau_cli *cli = (void *)chan->user.client;
 
        NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
-       atomic_set(&chan->killed, 1);
-       if (chan->fence)
-               nouveau_fence_context_kill(chan->fence, -ENODEV);
+
+       if (unlikely(!atomic_read(&chan->killed)))
+               nouveau_channel_kill(chan);
 
        return NVIF_EVENT_DROP;
 }
@@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan)
                struct nouveau_fence *fence = NULL;
                int ret;
 
-               ret = nouveau_fence_new(chan, false, &fence);
+               ret = nouveau_fence_new(&fence);
                if (!ret) {
-                       ret = nouveau_fence_wait(fence, false, false);
+                       ret = nouveau_fence_emit(fence, chan);
+                       if (!ret)
+                               ret = nouveau_fence_wait(fence, false, false);
                        nouveau_fence_unref(&fence);
                }
 
@@ -90,6 +100,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                if (cli)
                        nouveau_svmm_part(chan->vmm->svmm, chan->inst);
 
+               nvif_object_dtor(&chan->blit);
                nvif_object_dtor(&chan->nvsw);
                nvif_object_dtor(&chan->gart);
                nvif_object_dtor(&chan->vram);
@@ -148,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 
        chan->device = device;
        chan->drm = drm;
-       chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+       chan->vmm = nouveau_cli_vmm(cli);
        atomic_set(&chan->killed, 0);
 
        /* allocate memory for dma push buffer */
index e06a8ffed31a8bc10e084d61dfc05b1ec90e0ba4..5de2ef4e98c2bb111d23462eef426734e0074c38 100644 (file)
@@ -53,6 +53,7 @@ struct nouveau_channel {
        u32 user_put;
 
        struct nvif_object user;
+       struct nvif_object blit;
 
        struct nvif_event kill;
        atomic_t killed;
@@ -65,6 +66,7 @@ int  nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv,
                         u32 vram, u32 gart, struct nouveau_channel **);
 void nouveau_channel_del(struct nouveau_channel **);
 int  nouveau_channel_idle(struct nouveau_channel *);
+void nouveau_channel_kill(struct nouveau_channel *);
 
 extern int nouveau_vram_pushbuf;
 
index f75c6f09dd2af5fca8431f1144eb3439511f81c0..68b4fb4bec63f86f200fa7cf989eafe5c6464868 100644 (file)
@@ -619,7 +619,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 
                nouveau_connector_set_encoder(connector, nv_encoder);
                conn_status = connector_status_connected;
-               drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
+
+               if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+                       drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
+
                goto out;
        } else {
                nouveau_connector_set_edid(nv_connector, NULL);
@@ -1079,7 +1082,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
        case DCB_OUTPUT_TV:
                return get_slave_funcs(encoder)->mode_valid(encoder, mode);
        case DCB_OUTPUT_DP:
-               return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
+               return nv50_dp_mode_valid(nv_encoder, mode, NULL);
        default:
                BUG();
                return MODE_BAD;
index 99d022a91afc565a9ee9e3bf584369e64ba81ec1..053f703f2f68d73a8bb7b2357735cfa77c2d9a63 100644 (file)
@@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
        return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
 }
 
+static void
+nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
+{
+       MA_STATE(mas, &uvmm->region_mt, 0, 0);
+       struct nouveau_uvma_region *reg;
+
+       seq_puts  (m, " VA regions  | start              | range              | end                \n");
+       seq_puts  (m, "----------------------------------------------------------------------------\n");
+       mas_for_each(&mas, reg, ULONG_MAX)
+               seq_printf(m, "             | 0x%016llx | 0x%016llx | 0x%016llx\n",
+                          reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
+}
+
+static int
+nouveau_debugfs_gpuva(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+       struct nouveau_cli *cli;
+
+       mutex_lock(&drm->clients_lock);
+       list_for_each_entry(cli, &drm->clients, head) {
+               struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+               if (!uvmm)
+                       continue;
+
+               nouveau_uvmm_lock(uvmm);
+               drm_debugfs_gpuva_info(m, &uvmm->umgr);
+               seq_puts(m, "\n");
+               nouveau_debugfs_gpuva_regions(m, uvmm);
+               nouveau_uvmm_unlock(uvmm);
+       }
+       mutex_unlock(&drm->clients_lock);
+
+       return 0;
+}
+
 static const struct file_operations nouveau_pstate_fops = {
        .owner = THIS_MODULE,
        .open = nouveau_debugfs_pstate_open,
@@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = {
 static struct drm_info_list nouveau_debugfs_list[] = {
        { "vbios.rom",  nouveau_debugfs_vbios_image, 0, NULL },
        { "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
+       DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
index ec3ffff487fcd349007ecd3b8f9da609461926d4..99977e5fe71612600fafd471b6aff025070bac63 100644 (file)
@@ -465,7 +465,8 @@ nouveau_display_hpd_work(struct work_struct *work)
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
        u32 pending;
-       bool changed = false;
+       int changed = 0;
+       struct drm_connector *first_changed_connector = NULL;
 
        pm_runtime_get_sync(dev->dev);
 
@@ -509,7 +510,12 @@ nouveau_display_hpd_work(struct work_struct *work)
                if (old_epoch_counter == connector->epoch_counter)
                        continue;
 
-               changed = true;
+               changed++;
+               if (!first_changed_connector) {
+                       drm_connector_get(connector);
+                       first_changed_connector = connector;
+               }
+
                drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
                            connector->base.id, connector->name,
                            drm_get_connector_status_name(old_status),
@@ -520,9 +526,14 @@ nouveau_display_hpd_work(struct work_struct *work)
        drm_connector_list_iter_end(&conn_iter);
        mutex_unlock(&dev->mode_config.mutex);
 
-       if (changed)
+       if (changed == 1)
+               drm_kms_helper_connector_hotplug_event(first_changed_connector);
+       else if (changed > 0)
                drm_kms_helper_hotplug_event(dev);
 
+       if (first_changed_connector)
+               drm_connector_put(first_changed_connector);
+
        pm_runtime_mark_last_busy(drm->dev->dev);
 noop:
        pm_runtime_put_autosuspend(dev->dev);
index 789857faa04885842f91188b5a41dae840f04ef1..61e84562094a218dcf9790f35bc4dec196dc6556 100644 (file)
@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
                goto done;
        }
 
-       nouveau_fence_new(dmem->migrate.chan, false, &fence);
+       if (!nouveau_fence_new(&fence))
+               nouveau_fence_emit(fence, dmem->migrate.chan);
        migrate_vma_pages(&args);
        nouveau_dmem_fence_done(&fence);
        dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
                }
        }
 
-       nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+       if (!nouveau_fence_new(&fence))
+               nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
        migrate_device_pages(src_pfns, dst_pfns, npages);
        nouveau_dmem_fence_done(&fence);
        migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
                addr += PAGE_SIZE;
        }
 
-       nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
+       if (!nouveau_fence_new(&fence))
+               nouveau_fence_emit(fence, drm->dmem->migrate.chan);
        migrate_vma_pages(args);
        nouveau_dmem_fence_done(&fence);
        nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
index d49b4875fc3c934fe4d6cd87c0d3b1b3b997088f..6a4980b2d4d4e16e2ff6f8ee66bb52b150ac6c87 100644 (file)
@@ -267,8 +267,7 @@ nouveau_dp_irq(struct work_struct *work)
  *   yet)
  */
 enum drm_mode_status
-nv50_dp_mode_valid(struct drm_connector *connector,
-                  struct nouveau_encoder *outp,
+nv50_dp_mode_valid(struct nouveau_encoder *outp,
                   const struct drm_display_mode *mode,
                   unsigned *out_clock)
 {
index 7aac9384600ed43863ded2165441c731886c36de..4396f501b16a3f40e7048755d142a7d252b0f8a3 100644 (file)
@@ -68,6 +68,9 @@
 #include "nouveau_platform.h"
 #include "nouveau_svm.h"
 #include "nouveau_dmem.h"
+#include "nouveau_exec.h"
+#include "nouveau_uvmm.h"
+#include "nouveau_sched.h"
 
 DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
                        "DRM_UT_CORE",
@@ -196,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
        WARN_ON(!list_empty(&cli->worker));
 
        usif_client_fini(cli);
+       nouveau_uvmm_fini(&cli->uvmm);
+       nouveau_sched_entity_fini(&cli->sched_entity);
        nouveau_vmm_fini(&cli->svm);
        nouveau_vmm_fini(&cli->vmm);
        nvif_mmu_dtor(&cli->mmu);
@@ -301,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
        }
 
        cli->mem = &mems[ret];
+
+       ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
+                                       drm->sched_wq);
+       if (ret)
+               goto done;
+
        return 0;
 done:
        if (ret)
@@ -375,15 +386,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
                ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
                                       NVDRM_NVSW, nouveau_abi16_swclass(drm),
                                       NULL, 0, &drm->channel->nvsw);
+
+               if (ret == 0 && device->info.chipset >= 0x11) {
+                       ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
+                                              0x005f, 0x009f,
+                                              NULL, 0, &drm->channel->blit);
+               }
+
                if (ret == 0) {
                        struct nvif_push *push = drm->channel->chan.push;
-                       ret = PUSH_WAIT(push, 2);
-                       if (ret == 0)
+                       ret = PUSH_WAIT(push, 8);
+                       if (ret == 0) {
+                               if (device->info.chipset >= 0x11) {
+                                       PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
+                                       PUSH_NVSQ(push, NV09F, 0x0120, 0,
+                                                              0x0124, 1,
+                                                              0x0128, 2);
+                               }
                                PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
+                       }
                }
 
                if (ret) {
-                       NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
+                       NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
                        nouveau_accel_gr_fini(drm);
                        return;
                }
@@ -554,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev)
        nvif_parent_ctor(&nouveau_parent, &drm->parent);
        drm->master.base.object.parent = &drm->parent;
 
-       ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+       ret = nouveau_sched_init(drm);
        if (ret)
                goto fail_alloc;
 
+       ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+       if (ret)
+               goto fail_sched;
+
        ret = nouveau_cli_init(drm, "DRM", &drm->client);
        if (ret)
                goto fail_master;
@@ -614,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev)
        }
 
        return 0;
-
 fail_dispinit:
        nouveau_display_destroy(dev);
 fail_dispctor:
@@ -627,6 +655,8 @@ fail_ttm:
        nouveau_cli_fini(&drm->client);
 fail_master:
        nouveau_cli_fini(&drm->master);
+fail_sched:
+       nouveau_sched_fini(drm);
 fail_alloc:
        nvif_parent_dtor(&drm->parent);
        kfree(drm);
@@ -678,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
        }
        mutex_unlock(&drm->clients_lock);
 
+       nouveau_sched_fini(drm);
+
        nouveau_cli_fini(&drm->client);
        nouveau_cli_fini(&drm->master);
        nvif_parent_dtor(&drm->parent);
@@ -1179,6 +1211,9 @@ nouveau_ioctls[] = {
        DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
 };
 
 long
@@ -1226,6 +1261,8 @@ nouveau_driver_fops = {
 static struct drm_driver
 driver_stub = {
        .driver_features = DRIVER_GEM |
+                          DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
+                          DRIVER_GEM_GPUVA |
                           DRIVER_MODESET |
                           DRIVER_RENDER,
        .open = nouveau_drm_open,
@@ -1240,10 +1277,7 @@ driver_stub = {
        .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
        .fops = &nouveau_driver_fops,
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
        .dumb_create = nouveau_display_dumb_create,
        .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
index b5de312a523ffc8a4ed7db198decc2fcb5f15ca6..1fe17ff95f5eec4c49e73a7123c91112c2a3fd22 100644 (file)
@@ -10,8 +10,8 @@
 #define DRIVER_DATE            "20120801"
 
 #define DRIVER_MAJOR           1
-#define DRIVER_MINOR           3
-#define DRIVER_PATCHLEVEL      1
+#define DRIVER_MINOR           4
+#define DRIVER_PATCHLEVEL      0
 
 /*
  * 1.1.1:
@@ -63,7 +63,9 @@ struct platform_device;
 
 #include "nouveau_fence.h"
 #include "nouveau_bios.h"
+#include "nouveau_sched.h"
 #include "nouveau_vmm.h"
+#include "nouveau_uvmm.h"
 
 struct nouveau_drm_tile {
        struct nouveau_fence *fence;
@@ -91,6 +93,10 @@ struct nouveau_cli {
        struct nvif_mmu mmu;
        struct nouveau_vmm vmm;
        struct nouveau_vmm svm;
+       struct nouveau_uvmm uvmm;
+
+       struct nouveau_sched_entity sched_entity;
+
        const struct nvif_mclass *mem;
 
        struct list_head head;
@@ -112,6 +118,59 @@ struct nouveau_cli_work {
        struct dma_fence_cb cb;
 };
 
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm(struct nouveau_cli *cli)
+{
+       if (!cli || !cli->uvmm.vmm.cli)
+               return NULL;
+
+       return &cli->uvmm;
+}
+
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm_locked(struct nouveau_cli *cli)
+{
+       struct nouveau_uvmm *uvmm;
+
+       mutex_lock(&cli->mutex);
+       uvmm = nouveau_cli_uvmm(cli);
+       mutex_unlock(&cli->mutex);
+
+       return uvmm;
+}
+
+static inline struct nouveau_vmm *
+nouveau_cli_vmm(struct nouveau_cli *cli)
+{
+       struct nouveau_uvmm *uvmm;
+
+       uvmm = nouveau_cli_uvmm(cli);
+       if (uvmm)
+               return &uvmm->vmm;
+
+       if (cli->svm.cli)
+               return &cli->svm;
+
+       return &cli->vmm;
+}
+
+static inline void
+__nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+       if (!uvmm)
+               cli->uvmm.disabled = true;
+}
+
+static inline void
+nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+       mutex_lock(&cli->mutex);
+       __nouveau_cli_disable_uvmm_noinit(cli);
+       mutex_unlock(&cli->mutex);
+}
+
 void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
                            struct nouveau_cli_work *);
 
@@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv)
        return fpriv ? fpriv->driver_priv : NULL;
 }
 
+static inline void
+u_free(void *addr)
+{
+       kvfree(addr);
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
+{
+       void *mem;
+       void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+       size *= nmemb;
+
+       mem = kvmalloc(size, GFP_KERNEL);
+       if (!mem)
+               return ERR_PTR(-ENOMEM);
+
+       if (copy_from_user(mem, userptr, size)) {
+               u_free(mem);
+               return ERR_PTR(-EFAULT);
+       }
+
+       return mem;
+}
+
 #include <nvif/object.h>
 #include <nvif/parent.h>
 
@@ -222,6 +307,10 @@ struct nouveau_drm {
                struct mutex lock;
                bool component_registered;
        } audio;
+
+       struct drm_gpu_scheduler sched;
+       struct workqueue_struct *sched_wq;
+
 };
 
 static inline struct nouveau_drm *
index 70c1ad6c4d9dbac8858e459db81ada48325cdc48..bcba1a14cfab6d713985b9e12064f71424b138d0 100644 (file)
@@ -143,8 +143,7 @@ enum nouveau_dp_status {
 int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *);
 bool nouveau_dp_link_check(struct nouveau_connector *);
 void nouveau_dp_irq(struct work_struct *);
-enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
-                                       struct nouveau_encoder *,
+enum drm_mode_status nv50_dp_mode_valid(struct nouveau_encoder *,
                                        const struct drm_display_mode *,
                                        unsigned *clock);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
new file mode 100644 (file)
index 0000000..0f927ad
--- /dev/null
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_chan.h"
+#include "nouveau_sched.h"
+#include "nouveau_uvmm.h"
+
+/**
+ * DOC: Overview
+ *
+ * Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
+ * DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
+ *
+ * In order to use the UAPI firstly a user client must initialize the VA space
+ * using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
+ * should be managed by the kernel and which by the UMD.
+ *
+ * The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
+ * userspace-managable portion of the VA space. It provides operations to map
+ * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
+ * backed by a GEM object and the kernel will ignore GEM handles provided
+ * alongside a sparse mapping.
+ *
+ * Userspace may request memory backed mappings either within or outside of the
+ * bounds (but not crossing those bounds) of a previously mapped sparse
+ * mapping. Subsequently requested memory backed mappings within a sparse
+ * mapping will take precedence over the corresponding range of the sparse
+ * mapping. If such memory backed mappings are unmapped the kernel will make
+ * sure that the corresponding sparse mapping will take their place again.
+ * Requests to unmap a sparse mapping that still contains memory backed mappings
+ * will result in those memory backed mappings being unmapped first.
+ *
+ * Unmap requests are not bound to the range of existing mappings and can even
+ * overlap the bounds of sparse mappings. For such a request the kernel will
+ * make sure to unmap all memory backed mappings within the given range,
+ * splitting up memory backed mappings which are only partially contained
+ * within the given range. Unmap requests with the sparse flag set must match
+ * the range of a previously mapped sparse mapping exactly though.
+ *
+ * While the kernel generally permits arbitrary sequences and ranges of memory
+ * backed mappings being mapped and unmapped, either within a single or multiple
+ * VM_BIND ioctl calls, there are some restrictions for sparse mappings.
+ *
+ * The kernel does not permit to:
+ *   - unmap non-existent sparse mappings
+ *   - unmap a sparse mapping and map a new sparse mapping overlapping the range
+ *     of the previously unmapped sparse mapping within the same VM_BIND ioctl
+ *   - unmap a sparse mapping and map new memory backed mappings overlapping the
+ *     range of the previously unmapped sparse mapping within the same VM_BIND
+ *     ioctl
+ *
+ * When using the VM_BIND ioctl to request the kernel to map memory to a given
+ * virtual address in the GPU's VA space there is no guarantee that the actual
+ * mappings are created in the GPU's MMU. If the given memory is swapped out
+ * at the time the bind operation is executed the kernel will stash the mapping
+ * details into it's internal alloctor and create the actual MMU mappings once
+ * the memory is swapped back in. While this is transparent for userspace, it is
+ * guaranteed that all the backing memory is swapped back in and all the memory
+ * mappings, as requested by userspace previously, are actually mapped once the
+ * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
+ *
+ * A VM_BIND job can be executed either synchronously or asynchronously. If
+ * exectued asynchronously, userspace may provide a list of syncobjs this job
+ * will wait for and/or a list of syncobj the kernel will signal once the
+ * VM_BIND job finished execution. If executed synchronously the ioctl will
+ * block until the bind job is finished. For synchronous jobs the kernel will
+ * not permit any syncobjs submitted to the kernel.
+ *
+ * To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
+ * jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
+ * the option to synchronize them with syncobjs.
+ *
+ * Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
+ *
+ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
+ * an up to date view of the VA space. However, the actual mappings might still
+ * be pending. Hence, EXEC jobs require to have the particular fences - of
+ * the corresponding VM_BIND jobs they depent on - attached to them.
+ */
+
+static int
+nouveau_exec_job_submit(struct nouveau_job *job)
+{
+       struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+       struct nouveau_cli *cli = job->cli;
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+       struct drm_exec *exec = &job->exec;
+       struct drm_gem_object *obj;
+       unsigned long index;
+       int ret;
+
+       ret = nouveau_fence_new(&exec_job->fence);
+       if (ret)
+               return ret;
+
+       nouveau_uvmm_lock(uvmm);
+       drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+                           DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_until_all_locked(exec) {
+               struct drm_gpuva *va;
+
+               drm_gpuva_for_each_va(va, &uvmm->umgr) {
+                       if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+                               continue;
+
+                       ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
+                       drm_exec_retry_on_contention(exec);
+                       if (ret)
+                               goto err_uvmm_unlock;
+               }
+       }
+       nouveau_uvmm_unlock(uvmm);
+
+       drm_exec_for_each_locked_object(exec, index, obj) {
+               struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+               ret = nouveau_bo_validate(nvbo, true, false);
+               if (ret)
+                       goto err_exec_fini;
+       }
+
+       return 0;
+
+err_uvmm_unlock:
+       nouveau_uvmm_unlock(uvmm);
+err_exec_fini:
+       drm_exec_fini(exec);
+       return ret;
+
+}
+
+static void
+nouveau_exec_job_armed_submit(struct nouveau_job *job)
+{
+       struct drm_exec *exec = &job->exec;
+       struct drm_gem_object *obj;
+       unsigned long index;
+
+       drm_exec_for_each_locked_object(exec, index, obj)
+               dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+       drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_exec_job_run(struct nouveau_job *job)
+{
+       struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+       struct nouveau_channel *chan = exec_job->chan;
+       struct nouveau_fence *fence = exec_job->fence;
+       int i, ret;
+
+       ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+       if (ret) {
+               NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       for (i = 0; i < exec_job->push.count; i++) {
+               nv50_dma_push(chan, exec_job->push.s[i].va,
+                             exec_job->push.s[i].va_len);
+       }
+
+       ret = nouveau_fence_emit(fence, chan);
+       if (ret) {
+               NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
+               WIND_RING(chan);
+               return ERR_PTR(ret);
+       }
+
+       exec_job->fence = NULL;
+
+       return &fence->base;
+}
+
+static void
+nouveau_exec_job_free(struct nouveau_job *job)
+{
+       struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+
+       nouveau_job_free(job);
+
+       nouveau_fence_unref(&exec_job->fence);
+       kfree(exec_job->push.s);
+       kfree(exec_job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_exec_job_timeout(struct nouveau_job *job)
+{
+       struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+       struct nouveau_channel *chan = exec_job->chan;
+
+       if (unlikely(!atomic_read(&chan->killed)))
+               nouveau_channel_kill(chan);
+
+       NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
+                 chan->chid);
+
+       nouveau_sched_entity_fini(job->entity);
+
+       return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static struct nouveau_job_ops nouveau_exec_job_ops = {
+       .submit = nouveau_exec_job_submit,
+       .armed_submit = nouveau_exec_job_armed_submit,
+       .run = nouveau_exec_job_run,
+       .free = nouveau_exec_job_free,
+       .timeout = nouveau_exec_job_timeout,
+};
+
+int
+nouveau_exec_job_init(struct nouveau_exec_job **pjob,
+                     struct nouveau_exec_job_args *__args)
+{
+       struct nouveau_exec_job *job;
+       struct nouveau_job_args args = {};
+       int ret;
+
+       job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
+       if (!job)
+               return -ENOMEM;
+
+       job->push.count = __args->push.count;
+       if (__args->push.count) {
+               job->push.s = kmemdup(__args->push.s,
+                                     sizeof(*__args->push.s) *
+                                     __args->push.count,
+                                     GFP_KERNEL);
+               if (!job->push.s) {
+                       ret = -ENOMEM;
+                       goto err_free_job;
+               }
+       }
+
+       job->chan = __args->chan;
+
+       args.sched_entity = __args->sched_entity;
+       args.file_priv = __args->file_priv;
+
+       args.in_sync.count = __args->in_sync.count;
+       args.in_sync.s = __args->in_sync.s;
+
+       args.out_sync.count = __args->out_sync.count;
+       args.out_sync.s = __args->out_sync.s;
+
+       args.ops = &nouveau_exec_job_ops;
+       args.resv_usage = DMA_RESV_USAGE_WRITE;
+
+       ret = nouveau_job_init(&job->base, &args);
+       if (ret)
+               goto err_free_pushs;
+
+       return 0;
+
+err_free_pushs:
+       kfree(job->push.s);
+err_free_job:
+       kfree(job);
+       *pjob = NULL;
+
+       return ret;
+}
+
+static int
+nouveau_exec(struct nouveau_exec_job_args *args)
+{
+       struct nouveau_exec_job *job;
+       int ret;
+
+       ret = nouveau_exec_job_init(&job, args);
+       if (ret)
+               return ret;
+
+       ret = nouveau_job_submit(&job->base);
+       if (ret)
+               goto err_job_fini;
+
+       return 0;
+
+err_job_fini:
+       nouveau_job_fini(&job->base);
+       return ret;
+}
+
+static int
+nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
+                  struct drm_nouveau_exec *req)
+{
+       struct drm_nouveau_sync **s;
+       u32 inc = req->wait_count;
+       u64 ins = req->wait_ptr;
+       u32 outc = req->sig_count;
+       u64 outs = req->sig_ptr;
+       u32 pushc = req->push_count;
+       u64 pushs = req->push_ptr;
+       int ret;
+
+       if (pushc) {
+               args->push.count = pushc;
+               args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
+               if (IS_ERR(args->push.s))
+                       return PTR_ERR(args->push.s);
+       }
+
+       if (inc) {
+               s = &args->in_sync.s;
+
+               args->in_sync.count = inc;
+               *s = u_memcpya(ins, inc, sizeof(**s));
+               if (IS_ERR(*s)) {
+                       ret = PTR_ERR(*s);
+                       goto err_free_pushs;
+               }
+       }
+
+       if (outc) {
+               s = &args->out_sync.s;
+
+               args->out_sync.count = outc;
+               *s = u_memcpya(outs, outc, sizeof(**s));
+               if (IS_ERR(*s)) {
+                       ret = PTR_ERR(*s);
+                       goto err_free_ins;
+               }
+       }
+
+       return 0;
+
+err_free_pushs:
+       u_free(args->push.s);
+err_free_ins:
+       u_free(args->in_sync.s);
+       return ret;
+}
+
+static void
+nouveau_exec_ufree(struct nouveau_exec_job_args *args)
+{
+       u_free(args->push.s);
+       u_free(args->in_sync.s);
+       u_free(args->out_sync.s);
+}
+
+int
+nouveau_exec_ioctl_exec(struct drm_device *dev,
+                       void *data,
+                       struct drm_file *file_priv)
+{
+       struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
+       struct nouveau_cli *cli = nouveau_cli(file_priv);
+       struct nouveau_abi16_chan *chan16;
+       struct nouveau_channel *chan = NULL;
+       struct nouveau_exec_job_args args = {};
+       struct drm_nouveau_exec *req = data;
+       int ret = 0;
+
+       if (unlikely(!abi16))
+               return -ENOMEM;
+
+       /* abi16 locks already */
+       if (unlikely(!nouveau_cli_uvmm(cli)))
+               return nouveau_abi16_put(abi16, -ENOSYS);
+
+       list_for_each_entry(chan16, &abi16->channels, head) {
+               if (chan16->chan->chid == req->channel) {
+                       chan = chan16->chan;
+                       break;
+               }
+       }
+
+       if (!chan)
+               return nouveau_abi16_put(abi16, -ENOENT);
+
+       if (unlikely(atomic_read(&chan->killed)))
+               return nouveau_abi16_put(abi16, -ENODEV);
+
+       if (!chan->dma.ib_max)
+               return nouveau_abi16_put(abi16, -ENOSYS);
+
+       if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
+               NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
+                        req->push_count, NOUVEAU_GEM_MAX_PUSH);
+               return nouveau_abi16_put(abi16, -EINVAL);
+       }
+
+       ret = nouveau_exec_ucopy(&args, req);
+       if (ret)
+               goto out;
+
+       args.sched_entity = &chan16->sched_entity;
+       args.file_priv = file_priv;
+       args.chan = chan;
+
+       ret = nouveau_exec(&args);
+       if (ret)
+               goto out_free_args;
+
+out_free_args:
+       nouveau_exec_ufree(&args);
+out:
+       return nouveau_abi16_put(abi16, ret);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
new file mode 100644 (file)
index 0000000..778cacd
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_EXEC_H__
+#define __NOUVEAU_EXEC_H__
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_sched.h"
+
+struct nouveau_exec_job_args {
+       struct drm_file *file_priv;
+       struct nouveau_sched_entity *sched_entity;
+
+       struct drm_exec exec;
+       struct nouveau_channel *chan;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } in_sync;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } out_sync;
+
+       struct {
+               struct drm_nouveau_exec_push *s;
+               u32 count;
+       } push;
+};
+
+struct nouveau_exec_job {
+       struct nouveau_job base;
+       struct nouveau_fence *fence;
+       struct nouveau_channel *chan;
+
+       struct {
+               struct drm_nouveau_exec_push *s;
+               u32 count;
+       } push;
+};
+
+#define to_nouveau_exec_job(job)               \
+               container_of((job), struct nouveau_exec_job, base)
+
+int nouveau_exec_job_init(struct nouveau_exec_job **job,
+                         struct nouveau_exec_job_args *args);
+
+int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+
+#endif
index ee5e9d40c166ff7cc84764543c3ec89fbcc09480..77c739a55b1959a85bec13252375fe6e94744df7 100644 (file)
@@ -96,6 +96,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
                if (nouveau_fence_signal(fence))
                        nvif_event_block(&fctx->event);
        }
+       fctx->killed = 1;
        spin_unlock_irqrestore(&fctx->lock, flags);
 }
 
@@ -210,6 +211,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
        struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
        int ret;
 
+       if (unlikely(!chan->fence))
+               return -ENODEV;
+
        fence->channel  = chan;
        fence->timeout  = jiffies + (15 * HZ);
 
@@ -226,6 +230,12 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
                dma_fence_get(&fence->base);
                spin_lock_irq(&fctx->lock);
 
+               if (unlikely(fctx->killed)) {
+                       spin_unlock_irq(&fctx->lock);
+                       dma_fence_put(&fence->base);
+                       return -ENODEV;
+               }
+
                if (nouveau_fence_update(chan, fctx))
                        nvif_event_block(&fctx->event);
 
@@ -396,25 +406,16 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
 }
 
 int
-nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
-                 struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_fence **pfence)
 {
        struct nouveau_fence *fence;
-       int ret = 0;
-
-       if (unlikely(!chan->fence))
-               return -ENODEV;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (!fence)
                return -ENOMEM;
 
-       ret = nouveau_fence_emit(fence, chan);
-       if (ret)
-               nouveau_fence_unref(&fence);
-
        *pfence = fence;
-       return ret;
+       return 0;
 }
 
 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
index 0ca2bc85adf617a9c428fcac614148d2b8f712f1..2c72d96ef17d12a92ae3001293b12388c4d9d0b3 100644 (file)
@@ -17,8 +17,7 @@ struct nouveau_fence {
        unsigned long timeout;
 };
 
-int  nouveau_fence_new(struct nouveau_channel *, bool sysmem,
-                      struct nouveau_fence **);
+int  nouveau_fence_new(struct nouveau_fence **);
 void nouveau_fence_unref(struct nouveau_fence **);
 
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
@@ -45,7 +44,7 @@ struct nouveau_fence_chan {
        char name[32];
 
        struct nvif_event event;
-       int notify_ref, dead;
+       int notify_ref, dead, killed;
 };
 
 struct nouveau_fence_priv {
index ab9062e5097770be99ff6fefc7f6faddc9085738..f39360870c70851bb47d83c6250174b85a660bbe 100644 (file)
@@ -103,13 +103,17 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct device *dev = drm->dev->dev;
-       struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+       struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
        struct nouveau_vma *vma;
        int ret;
 
        if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
                return 0;
 
+       if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+               return -EPERM;
+
        ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
        if (ret)
                return ret;
@@ -120,7 +124,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
                goto out;
        }
 
-       ret = nouveau_vma_new(nvbo, vmm, &vma);
+       /* only create a VMA on binding */
+       if (!nouveau_cli_uvmm(cli))
+               ret = nouveau_vma_new(nvbo, vmm, &vma);
+       else
+               ret = 0;
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 out:
@@ -180,13 +188,16 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct device *dev = drm->dev->dev;
-       struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
+       struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
        struct nouveau_vma *vma;
        int ret;
 
        if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
                return;
 
+       if (nouveau_cli_uvmm(cli))
+               return;
+
        ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
        if (ret)
                return;
@@ -209,6 +220,7 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
        .free = nouveau_gem_object_del,
        .open = nouveau_gem_object_open,
        .close = nouveau_gem_object_close,
+       .export = nouveau_gem_prime_export,
        .pin = nouveau_gem_prime_pin,
        .unpin = nouveau_gem_prime_unpin,
        .get_sg_table = nouveau_gem_prime_get_sg_table,
@@ -224,18 +236,28 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
                struct nouveau_bo **pnvbo)
 {
        struct nouveau_drm *drm = cli->drm;
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+       struct dma_resv *resv = NULL;
        struct nouveau_bo *nvbo;
        int ret;
 
+       if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
+               if (unlikely(!uvmm))
+                       return -EINVAL;
+
+               resv = &uvmm->resv;
+       }
+
        if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
                domain |= NOUVEAU_GEM_DOMAIN_CPU;
 
        nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
-                               tile_flags);
+                               tile_flags, false);
        if (IS_ERR(nvbo))
                return PTR_ERR(nvbo);
 
        nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+       nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
 
        /* Initialize the embedded gem-object. We return a single gem-reference
         * to the caller, instead of a normal nouveau_bo ttm reference. */
@@ -246,7 +268,14 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
                return ret;
        }
 
-       ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
+       if (resv)
+               dma_resv_lock(resv, NULL);
+
+       ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
+
+       if (resv)
+               dma_resv_unlock(resv);
+
        if (ret)
                return ret;
 
@@ -269,7 +298,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
-       struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+       struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
        struct nouveau_vma *vma;
 
        if (is_power_of_2(nvbo->valid_domains))
@@ -279,13 +308,15 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
        else
                rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
        rep->offset = nvbo->offset;
-       if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+       if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
+           !nouveau_cli_uvmm(cli)) {
                vma = nouveau_vma_find(nvbo, vmm);
                if (!vma)
                        return -EINVAL;
 
                rep->offset = vma->addr;
-       }
+       } else
+               rep->offset = 0;
 
        rep->size = nvbo->bo.base.size;
        rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
@@ -310,6 +341,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
        struct nouveau_bo *nvbo = NULL;
        int ret = 0;
 
+       /* If uvmm wasn't initialized until now disable it completely to prevent
+        * userspace from mixing up UAPIs.
+        */
+       nouveau_cli_disable_uvmm_noinit(cli);
+
        ret = nouveau_gem_new(cli, req->info.size, req->align,
                              req->info.domain, req->info.tile_mode,
                              req->info.tile_flags, &nvbo);
@@ -613,32 +649,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
        return 0;
 }
 
-static inline void
-u_free(void *addr)
-{
-       kvfree(addr);
-}
-
-static inline void *
-u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
-{
-       void *mem;
-       void __user *userptr = (void __force __user *)(uintptr_t)user;
-
-       size *= nmemb;
-
-       mem = kvmalloc(size, GFP_KERNEL);
-       if (!mem)
-               return ERR_PTR(-ENOMEM);
-
-       if (copy_from_user(mem, userptr, size)) {
-               u_free(mem);
-               return ERR_PTR(-EFAULT);
-       }
-
-       return mem;
-}
-
 static int
 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                                struct drm_nouveau_gem_pushbuf *req,
@@ -747,6 +757,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
        if (unlikely(!abi16))
                return -ENOMEM;
 
+       if (unlikely(nouveau_cli_uvmm(cli)))
+               return -ENOSYS;
+
        list_for_each_entry(temp, &abi16->channels, head) {
                if (temp->chan->chid == req->channel) {
                        chan = temp->chan;
@@ -899,8 +912,11 @@ revalidate:
                }
        }
 
-       ret = nouveau_fence_new(chan, false, &fence);
+       ret = nouveau_fence_new(&fence);
+       if (!ret)
+               ret = nouveau_fence_emit(fence, chan);
        if (ret) {
+               nouveau_fence_unref(&fence);
                NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
                WIND_RING(chan);
                goto out;
index 3b919c7c931cb001303d420b6e62759faa904934..10814d446435e9c7baefece5c958e5430569c964 100644 (file)
@@ -37,5 +37,6 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
 extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
 extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
        struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
-
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+                                        int flags);
 #endif
index 76c86d8bb01e139c0c91565580080fc696ad3214..5365a3d3a17f7c5190916b3c2552a0dfc373f4df 100644 (file)
@@ -35,4 +35,9 @@ int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
 int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
 void nouveau_mem_fini(struct nouveau_mem *);
 int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+int
+nouveau_mem_map_fixed(struct nouveau_mem *mem,
+                     struct nvif_vmm *vmm,
+                     u8 kind, u64 addr,
+                     u64 offset, u64 range);
 #endif
index f42c2b1b0363cacd8b06b9da3daa21e793cc6d11..1b2ff0c40fc1c90c451b2e5fe241d99a88a613e1 100644 (file)
@@ -50,7 +50,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 
        dma_resv_lock(robj, NULL);
        nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
-                               NOUVEAU_GEM_DOMAIN_GART, 0, 0);
+                               NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
        if (IS_ERR(nvbo)) {
                obj = ERR_CAST(nvbo);
                goto unlock;
@@ -102,3 +102,14 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
 
        nouveau_bo_unpin(nvbo);
 }
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+                                        int flags)
+{
+       struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+
+       if (nvbo->no_share)
+               return ERR_PTR(-EPERM);
+
+       return drm_gem_prime_export(gobj, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
new file mode 100644 (file)
index 0000000..3424a1b
--- /dev/null
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/slab.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_syncobj.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_sched.h"
+
+/* FIXME
+ *
+ * We want to make sure that jobs currently executing can't be deferred by
+ * other jobs competing for the hardware. Otherwise we might end up with job
+ * timeouts just because of too many clients submitting too many jobs. We don't
+ * want jobs to time out because of system load, but because of the job being
+ * too bulky.
+ *
+ * For now allow for up to 16 concurrent jobs in flight until we know how many
+ * rings the hardware can process in parallel.
+ */
+#define NOUVEAU_SCHED_HW_SUBMISSIONS           16
+#define NOUVEAU_SCHED_JOB_TIMEOUT_MS           10000
+
+int
+nouveau_job_init(struct nouveau_job *job,
+                struct nouveau_job_args *args)
+{
+       struct nouveau_sched_entity *entity = args->sched_entity;
+       int ret;
+
+       job->file_priv = args->file_priv;
+       job->cli = nouveau_cli(args->file_priv);
+       job->entity = entity;
+
+       job->sync = args->sync;
+       job->resv_usage = args->resv_usage;
+
+       job->ops = args->ops;
+
+       job->in_sync.count = args->in_sync.count;
+       if (job->in_sync.count) {
+               if (job->sync)
+                       return -EINVAL;
+
+               job->in_sync.data = kmemdup(args->in_sync.s,
+                                        sizeof(*args->in_sync.s) *
+                                        args->in_sync.count,
+                                        GFP_KERNEL);
+               if (!job->in_sync.data)
+                       return -ENOMEM;
+       }
+
+       job->out_sync.count = args->out_sync.count;
+       if (job->out_sync.count) {
+               if (job->sync) {
+                       ret = -EINVAL;
+                       goto err_free_in_sync;
+               }
+
+               job->out_sync.data = kmemdup(args->out_sync.s,
+                                         sizeof(*args->out_sync.s) *
+                                         args->out_sync.count,
+                                         GFP_KERNEL);
+               if (!job->out_sync.data) {
+                       ret = -ENOMEM;
+                       goto err_free_in_sync;
+               }
+
+               job->out_sync.objs = kcalloc(job->out_sync.count,
+                                            sizeof(*job->out_sync.objs),
+                                            GFP_KERNEL);
+               if (!job->out_sync.objs) {
+                       ret = -ENOMEM;
+                       goto err_free_out_sync;
+               }
+
+               job->out_sync.chains = kcalloc(job->out_sync.count,
+                                              sizeof(*job->out_sync.chains),
+                                              GFP_KERNEL);
+               if (!job->out_sync.chains) {
+                       ret = -ENOMEM;
+                       goto err_free_objs;
+               }
+
+       }
+
+       ret = drm_sched_job_init(&job->base, &entity->base, NULL);
+       if (ret)
+               goto err_free_chains;
+
+       job->state = NOUVEAU_JOB_INITIALIZED;
+
+       return 0;
+
+err_free_chains:
+       kfree(job->out_sync.chains);
+err_free_objs:
+       kfree(job->out_sync.objs);
+err_free_out_sync:
+       kfree(job->out_sync.data);
+err_free_in_sync:
+       kfree(job->in_sync.data);
+return ret;
+}
+
+void
+nouveau_job_free(struct nouveau_job *job)
+{
+       kfree(job->in_sync.data);
+       kfree(job->out_sync.data);
+       kfree(job->out_sync.objs);
+       kfree(job->out_sync.chains);
+}
+
+void nouveau_job_fini(struct nouveau_job *job)
+{
+       dma_fence_put(job->done_fence);
+       drm_sched_job_cleanup(&job->base);
+       job->ops->free(job);
+}
+
+static int
+sync_find_fence(struct nouveau_job *job,
+               struct drm_nouveau_sync *sync,
+               struct dma_fence **fence)
+{
+       u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+       u64 point = 0;
+       int ret;
+
+       if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+           stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+               return -EOPNOTSUPP;
+
+       if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+               point = sync->timeline_value;
+
+       ret = drm_syncobj_find_fence(job->file_priv,
+                                    sync->handle, point,
+                                    0 /* flags */, fence);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+nouveau_job_add_deps(struct nouveau_job *job)
+{
+       struct dma_fence *in_fence = NULL;
+       int ret, i;
+
+       for (i = 0; i < job->in_sync.count; i++) {
+               struct drm_nouveau_sync *sync = &job->in_sync.data[i];
+
+               ret = sync_find_fence(job, sync, &in_fence);
+               if (ret) {
+                       NV_PRINTK(warn, job->cli,
+                                 "Failed to find syncobj (-> in): handle=%d\n",
+                                 sync->handle);
+                       return ret;
+               }
+
+               ret = drm_sched_job_add_dependency(&job->base, in_fence);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void
+nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
+{
+       int i;
+
+       for (i = 0; i < job->out_sync.count; i++) {
+               struct drm_syncobj *obj = job->out_sync.objs[i];
+               struct dma_fence_chain *chain = job->out_sync.chains[i];
+
+               if (obj)
+                       drm_syncobj_put(obj);
+
+               if (chain)
+                       dma_fence_chain_free(chain);
+       }
+}
+
+static int
+nouveau_job_fence_attach_prepare(struct nouveau_job *job)
+{
+       int i, ret;
+
+       for (i = 0; i < job->out_sync.count; i++) {
+               struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+               struct drm_syncobj **pobj = &job->out_sync.objs[i];
+               struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+               u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+               if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+                   stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+                       ret = -EINVAL;
+                       goto err_sync_cleanup;
+               }
+
+               *pobj = drm_syncobj_find(job->file_priv, sync->handle);
+               if (!*pobj) {
+                       NV_PRINTK(warn, job->cli,
+                                 "Failed to find syncobj (-> out): handle=%d\n",
+                                 sync->handle);
+                       ret = -ENOENT;
+                       goto err_sync_cleanup;
+               }
+
+               if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+                       *pchain = dma_fence_chain_alloc();
+                       if (!*pchain) {
+                               ret = -ENOMEM;
+                               goto err_sync_cleanup;
+                       }
+               }
+       }
+
+       return 0;
+
+err_sync_cleanup:
+       nouveau_job_fence_attach_cleanup(job);
+       return ret;
+}
+
+static void
+nouveau_job_fence_attach(struct nouveau_job *job)
+{
+       struct dma_fence *fence = job->done_fence;
+       int i;
+
+       for (i = 0; i < job->out_sync.count; i++) {
+               struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+               struct drm_syncobj **pobj = &job->out_sync.objs[i];
+               struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+               u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+               if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+                       drm_syncobj_add_point(*pobj, *pchain, fence,
+                                             sync->timeline_value);
+               } else {
+                       drm_syncobj_replace_fence(*pobj, fence);
+               }
+
+               drm_syncobj_put(*pobj);
+               *pobj = NULL;
+               *pchain = NULL;
+       }
+}
+
+int
+nouveau_job_submit(struct nouveau_job *job)
+{
+       struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+       struct dma_fence *done_fence = NULL;
+       int ret;
+
+       ret = nouveau_job_add_deps(job);
+       if (ret)
+               goto err;
+
+       ret = nouveau_job_fence_attach_prepare(job);
+       if (ret)
+               goto err;
+
+       /* Make sure the job appears on the sched_entity's queue in the same
+        * order as it was submitted.
+        */
+       mutex_lock(&entity->mutex);
+
+       /* Guarantee we won't fail after the submit() callback returned
+        * successfully.
+        */
+       if (job->ops->submit) {
+               ret = job->ops->submit(job);
+               if (ret)
+                       goto err_cleanup;
+       }
+
+       drm_sched_job_arm(&job->base);
+       job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+       if (job->sync)
+               done_fence = dma_fence_get(job->done_fence);
+
+       if (job->ops->armed_submit)
+               job->ops->armed_submit(job);
+
+       nouveau_job_fence_attach(job);
+
+       /* Set job state before pushing the job to the scheduler,
+        * such that we do not overwrite the job state set in run().
+        */
+       job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
+
+       drm_sched_entity_push_job(&job->base);
+
+       mutex_unlock(&entity->mutex);
+
+       if (done_fence) {
+               dma_fence_wait(done_fence, true);
+               dma_fence_put(done_fence);
+       }
+
+       return 0;
+
+err_cleanup:
+       mutex_unlock(&entity->mutex);
+       nouveau_job_fence_attach_cleanup(job);
+err:
+       job->state = NOUVEAU_JOB_SUBMIT_FAILED;
+       return ret;
+}
+
+bool
+nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+                          struct work_struct *work)
+{
+       return queue_work(entity->sched_wq, work);
+}
+
+static struct dma_fence *
+nouveau_job_run(struct nouveau_job *job)
+{
+       struct dma_fence *fence;
+
+       fence = job->ops->run(job);
+       if (IS_ERR(fence))
+               job->state = NOUVEAU_JOB_RUN_FAILED;
+       else
+               job->state = NOUVEAU_JOB_RUN_SUCCESS;
+
+       return fence;
+}
+
+static struct dma_fence *
+nouveau_sched_run_job(struct drm_sched_job *sched_job)
+{
+       struct nouveau_job *job = to_nouveau_job(sched_job);
+
+       return nouveau_job_run(job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+       struct nouveau_job *job = to_nouveau_job(sched_job);
+
+       NV_PRINTK(warn, job->cli, "Job timed out.\n");
+
+       if (job->ops->timeout)
+               return job->ops->timeout(job);
+
+       return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static void
+nouveau_sched_free_job(struct drm_sched_job *sched_job)
+{
+       struct nouveau_job *job = to_nouveau_job(sched_job);
+
+       nouveau_job_fini(job);
+}
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+                             struct drm_gpu_scheduler *sched,
+                             struct workqueue_struct *sched_wq)
+{
+       mutex_init(&entity->mutex);
+       spin_lock_init(&entity->job.list.lock);
+       INIT_LIST_HEAD(&entity->job.list.head);
+       init_waitqueue_head(&entity->job.wq);
+
+       entity->sched_wq = sched_wq;
+       return drm_sched_entity_init(&entity->base,
+                                    DRM_SCHED_PRIORITY_NORMAL,
+                                    &sched, 1, NULL);
+}
+
+void
+nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
+{
+       drm_sched_entity_destroy(&entity->base);
+}
+
+static const struct drm_sched_backend_ops nouveau_sched_ops = {
+       .run_job = nouveau_sched_run_job,
+       .timedout_job = nouveau_sched_timedout_job,
+       .free_job = nouveau_sched_free_job,
+};
+
+int nouveau_sched_init(struct nouveau_drm *drm)
+{
+       struct drm_gpu_scheduler *sched = &drm->sched;
+       long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+
+       drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
+       if (!drm->sched_wq)
+               return -ENOMEM;
+
+       return drm_sched_init(sched, &nouveau_sched_ops,
+                             NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
+                             NULL, NULL, "nouveau_sched", drm->dev->dev);
+}
+
+void nouveau_sched_fini(struct nouveau_drm *drm)
+{
+       destroy_workqueue(drm->sched_wq);
+       drm_sched_fini(&drm->sched);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
new file mode 100644 (file)
index 0000000..27ac197
--- /dev/null
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef NOUVEAU_SCHED_H
+#define NOUVEAU_SCHED_H
+
+#include <linux/types.h>
+
+#include <drm/drm_exec.h>
+#include <drm/gpu_scheduler.h>
+
+#include "nouveau_drv.h"
+
+#define to_nouveau_job(sched_job)              \
+               container_of((sched_job), struct nouveau_job, base)
+
+struct nouveau_job_ops;
+
+enum nouveau_job_state {
+       NOUVEAU_JOB_UNINITIALIZED = 0,
+       NOUVEAU_JOB_INITIALIZED,
+       NOUVEAU_JOB_SUBMIT_SUCCESS,
+       NOUVEAU_JOB_SUBMIT_FAILED,
+       NOUVEAU_JOB_RUN_SUCCESS,
+       NOUVEAU_JOB_RUN_FAILED,
+};
+
+struct nouveau_job_args {
+       struct drm_file *file_priv;
+       struct nouveau_sched_entity *sched_entity;
+
+       enum dma_resv_usage resv_usage;
+       bool sync;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } in_sync;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } out_sync;
+
+       struct nouveau_job_ops *ops;
+};
+
+struct nouveau_job {
+       struct drm_sched_job base;
+
+       enum nouveau_job_state state;
+
+       struct nouveau_sched_entity *entity;
+
+       struct drm_file *file_priv;
+       struct nouveau_cli *cli;
+
+       struct drm_exec exec;
+       enum dma_resv_usage resv_usage;
+       struct dma_fence *done_fence;
+
+       bool sync;
+
+       struct {
+               struct drm_nouveau_sync *data;
+               u32 count;
+       } in_sync;
+
+       struct {
+               struct drm_nouveau_sync *data;
+               struct drm_syncobj **objs;
+               struct dma_fence_chain **chains;
+               u32 count;
+       } out_sync;
+
+       struct nouveau_job_ops {
+               /* If .submit() returns without any error, it is guaranteed that
+                * armed_submit() is called.
+                */
+               int (*submit)(struct nouveau_job *);
+               void (*armed_submit)(struct nouveau_job *);
+               struct dma_fence *(*run)(struct nouveau_job *);
+               void (*free)(struct nouveau_job *);
+               enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
+       } *ops;
+};
+
+int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
+                           u32 inc, u64 ins,
+                           u32 outc, u64 outs);
+
+int nouveau_job_init(struct nouveau_job *job,
+                    struct nouveau_job_args *args);
+void nouveau_job_free(struct nouveau_job *job);
+
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_fini(struct nouveau_job *job);
+
+#define to_nouveau_sched_entity(entity)                \
+               container_of((entity), struct nouveau_sched_entity, base)
+
+struct nouveau_sched_entity {
+       struct drm_sched_entity base;
+       struct mutex mutex;
+
+       struct workqueue_struct *sched_wq;
+
+       struct {
+               struct {
+                       struct list_head head;
+                       spinlock_t lock;
+               } list;
+               struct wait_queue_head wq;
+       } job;
+};
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+                             struct drm_gpu_scheduler *sched,
+                             struct workqueue_struct *sched_wq);
+void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
+
+bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+                               struct work_struct *work);
+
+int nouveau_sched_init(struct nouveau_drm *drm);
+void nouveau_sched_fini(struct nouveau_drm *drm);
+
+#endif
index a74ba8d84ba7f889e2873a008fc26618d0f26572..186351ecf72fd7e28b451203d0ae0e9d66842e93 100644 (file)
@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
         * VMM instead of the standard one.
         */
        ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
-                           cli->vmm.vmm.object.oclass, true,
+                           cli->vmm.vmm.object.oclass, MANAGED,
                            args->unmanaged_addr, args->unmanaged_size,
                            &(struct gp100_vmm_v0) {
                                .fault_replay = true,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
new file mode 100644 (file)
index 0000000..3a1e853
--- /dev/null
@@ -0,0 +1,1916 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Locking:
+ *
+ * The uvmm mutex protects any operations on the GPU VA space provided by the
+ * DRM GPU VA manager.
+ *
+ * The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a
+ * mapping to it's backing GEM must be performed under this lock.
+ *
+ * Actual map/unmap operations within the fence signalling critical path are
+ * protected by installing DMA fences to the corresponding GEMs DMA
+ * reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA
+ * list in order to map/unmap it's entries, can't occur concurrently.
+ *
+ * Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate
+ * protection, since there are no accesses other than from BO move callbacks
+ * and from the fence signalling critical path, which are already protected by
+ * the corresponding GEMs DMA reservation fence.
+ */
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_uvmm.h"
+
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/class.h>
+#include <nvif/if000c.h>
+#include <nvif/if900d.h>
+
+#define NOUVEAU_VA_SPACE_BITS          47 /* FIXME */
+#define NOUVEAU_VA_SPACE_START         0x0
+#define NOUVEAU_VA_SPACE_END           (1ULL << NOUVEAU_VA_SPACE_BITS)
+
+#define list_last_op(_ops) list_last_entry(_ops, struct bind_job_op, entry)
+#define list_prev_op(_op) list_prev_entry(_op, entry)
+#define list_for_each_op(_op, _ops) list_for_each_entry(_op, _ops, entry)
+#define list_for_each_op_from_reverse(_op, _ops) \
+       list_for_each_entry_from_reverse(_op, _ops, entry)
+#define list_for_each_op_safe(_op, _n, _ops) list_for_each_entry_safe(_op, _n, _ops, entry)
+
+enum vm_bind_op {
+       OP_MAP = DRM_NOUVEAU_VM_BIND_OP_MAP,
+       OP_UNMAP = DRM_NOUVEAU_VM_BIND_OP_UNMAP,
+       OP_MAP_SPARSE,
+       OP_UNMAP_SPARSE,
+};
+
+struct nouveau_uvma_prealloc {
+       struct nouveau_uvma *map;
+       struct nouveau_uvma *prev;
+       struct nouveau_uvma *next;
+};
+
+struct bind_job_op {
+       struct list_head entry;
+
+       enum vm_bind_op op;
+       u32 flags;
+
+       struct {
+               u64 addr;
+               u64 range;
+       } va;
+
+       struct {
+               u32 handle;
+               u64 offset;
+               struct drm_gem_object *obj;
+       } gem;
+
+       struct nouveau_uvma_region *reg;
+       struct nouveau_uvma_prealloc new;
+       struct drm_gpuva_ops *ops;
+};
+
+struct uvmm_map_args {
+       struct nouveau_uvma_region *region;
+       u64 addr;
+       u64 range;
+       u8 kind;
+};
+
+static int
+nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
+                           u64 addr, u64 range)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+       return nvif_vmm_raw_sparse(vmm, addr, range, true);
+}
+
+static int
+nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
+                             u64 addr, u64 range)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+       return nvif_vmm_raw_sparse(vmm, addr, range, false);
+}
+
+static int
+nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
+                    u64 addr, u64 range)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+       return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
+                    u64 addr, u64 range)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+       return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
+                      u64 addr, u64 range, bool sparse)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+       return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+}
+
+static int
+nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
+                    u64 addr, u64 range,
+                    u64 bo_offset, u8 kind,
+                    struct nouveau_mem *mem)
+{
+       struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+       union {
+               struct gf100_vmm_map_v0 gf100;
+       } args;
+       u32 argc = 0;
+
+       switch (vmm->object.oclass) {
+       case NVIF_CLASS_VMM_GF100:
+       case NVIF_CLASS_VMM_GM200:
+       case NVIF_CLASS_VMM_GP100:
+               args.gf100.version = 0;
+               if (mem->mem.type & NVIF_MEM_VRAM)
+                       args.gf100.vol = 0;
+               else
+                       args.gf100.vol = 1;
+               args.gf100.ro = 0;
+               args.gf100.priv = 0;
+               args.gf100.kind = kind;
+               argc = sizeof(args.gf100);
+               break;
+       default:
+               WARN_ON(1);
+               return -ENOSYS;
+       }
+
+       return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+                               &args, argc,
+                               &mem->mem, bo_offset);
+}
+
+static int
+nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg)
+{
+       u64 addr = reg->va.addr;
+       u64 range = reg->va.range;
+
+       return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
+}
+
+static int
+nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
+{
+       u64 addr = uvma->va.va.addr;
+       u64 range = uvma->va.va.range;
+
+       return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+}
+
+static int
+nouveau_uvma_map(struct nouveau_uvma *uvma,
+                struct nouveau_mem *mem)
+{
+       u64 addr = uvma->va.va.addr;
+       u64 offset = uvma->va.gem.offset;
+       u64 range = uvma->va.va.range;
+
+       return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
+                                   offset, uvma->kind, mem);
+}
+
+static int
+nouveau_uvma_unmap(struct nouveau_uvma *uvma)
+{
+       u64 addr = uvma->va.va.addr;
+       u64 range = uvma->va.va.range;
+       bool sparse = !!uvma->region;
+
+       if (drm_gpuva_invalidated(&uvma->va))
+               return 0;
+
+       return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static int
+nouveau_uvma_alloc(struct nouveau_uvma **puvma)
+{
+       *puvma = kzalloc(sizeof(**puvma), GFP_KERNEL);
+       if (!*puvma)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void
+nouveau_uvma_free(struct nouveau_uvma *uvma)
+{
+       kfree(uvma);
+}
+
+static void
+nouveau_uvma_gem_get(struct nouveau_uvma *uvma)
+{
+       drm_gem_object_get(uvma->va.gem.obj);
+}
+
+static void
+nouveau_uvma_gem_put(struct nouveau_uvma *uvma)
+{
+       drm_gem_object_put(uvma->va.gem.obj);
+}
+
+static int
+nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg)
+{
+       *preg = kzalloc(sizeof(**preg), GFP_KERNEL);
+       if (!*preg)
+               return -ENOMEM;
+
+       kref_init(&(*preg)->kref);
+
+       return 0;
+}
+
+static void
+nouveau_uvma_region_free(struct kref *kref)
+{
+       struct nouveau_uvma_region *reg =
+               container_of(kref, struct nouveau_uvma_region, kref);
+
+       kfree(reg);
+}
+
+static void
+nouveau_uvma_region_get(struct nouveau_uvma_region *reg)
+{
+       kref_get(&reg->kref);
+}
+
+static void
+nouveau_uvma_region_put(struct nouveau_uvma_region *reg)
+{
+       kref_put(&reg->kref, nouveau_uvma_region_free);
+}
+
+static int
+__nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+                            struct nouveau_uvma_region *reg)
+{
+       u64 addr = reg->va.addr;
+       u64 range = reg->va.range;
+       u64 last = addr + range - 1;
+       MA_STATE(mas, &uvmm->region_mt, addr, addr);
+
+       if (unlikely(mas_walk(&mas)))
+               return -EEXIST;
+
+       if (unlikely(mas.last < last))
+               return -EEXIST;
+
+       mas.index = addr;
+       mas.last = last;
+
+       mas_store_gfp(&mas, reg, GFP_KERNEL);
+
+       reg->uvmm = uvmm;
+
+       return 0;
+}
+
+static int
+nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+                          struct nouveau_uvma_region *reg,
+                          u64 addr, u64 range)
+{
+       int ret;
+
+       reg->uvmm = uvmm;
+       reg->va.addr = addr;
+       reg->va.range = range;
+
+       ret = __nouveau_uvma_region_insert(uvmm, reg);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void
+nouveau_uvma_region_remove(struct nouveau_uvma_region *reg)
+{
+       struct nouveau_uvmm *uvmm = reg->uvmm;
+       MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
+
+       mas_erase(&mas);
+}
+
+static int
+nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
+                          u64 addr, u64 range)
+{
+       struct nouveau_uvma_region *reg;
+       int ret;
+
+       if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
+               return -ENOSPC;
+
+       ret = nouveau_uvma_region_alloc(&reg);
+       if (ret)
+               return ret;
+
+       ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
+       if (ret)
+               goto err_free_region;
+
+       ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
+       if (ret)
+               goto err_region_remove;
+
+       return 0;
+
+err_region_remove:
+       nouveau_uvma_region_remove(reg);
+err_free_region:
+       nouveau_uvma_region_put(reg);
+       return ret;
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
+                              u64 addr, u64 range)
+{
+       MA_STATE(mas, &uvmm->region_mt, addr, 0);
+
+       return mas_find(&mas, addr + range - 1);
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
+                        u64 addr, u64 range)
+{
+       struct nouveau_uvma_region *reg;
+
+       reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+       if (!reg)
+               return NULL;
+
+       if (reg->va.addr != addr ||
+           reg->va.range != range)
+               return NULL;
+
+       return reg;
+}
+
+static bool
+nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
+{
+       struct nouveau_uvmm *uvmm = reg->uvmm;
+
+       return drm_gpuva_interval_empty(&uvmm->umgr,
+                                       reg->va.addr,
+                                       reg->va.range);
+}
+
+static int
+__nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg)
+{
+       struct nouveau_uvmm *uvmm = reg->uvmm;
+       u64 addr = reg->va.addr;
+       u64 range = reg->va.range;
+
+       if (!nouveau_uvma_region_empty(reg))
+               return -EBUSY;
+
+       nouveau_uvma_region_remove(reg);
+       nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
+       nouveau_uvma_region_put(reg);
+
+       return 0;
+}
+
+static int
+nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
+                           u64 addr, u64 range)
+{
+       struct nouveau_uvma_region *reg;
+
+       reg = nouveau_uvma_region_find(uvmm, addr, range);
+       if (!reg)
+               return -ENOENT;
+
+       return __nouveau_uvma_region_destroy(reg);
+}
+
+static void
+nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg)
+{
+
+       init_completion(&reg->complete);
+       reg->dirty = true;
+}
+
+static void
+nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
+{
+       complete_all(&reg->complete);
+}
+
+static void
+op_map_prepare_unwind(struct nouveau_uvma *uvma)
+{
+       nouveau_uvma_gem_put(uvma);
+       drm_gpuva_remove(&uvma->va);
+       nouveau_uvma_free(uvma);
+}
+
+static void
+op_unmap_prepare_unwind(struct drm_gpuva *va)
+{
+       drm_gpuva_insert(va->mgr, va);
+}
+
+static void
+nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
+                              struct nouveau_uvma_prealloc *new,
+                              struct drm_gpuva_ops *ops,
+                              struct drm_gpuva_op *last,
+                              struct uvmm_map_args *args)
+{
+       struct drm_gpuva_op *op = last;
+       u64 vmm_get_start = args ? args->addr : 0;
+       u64 vmm_get_end = args ? args->addr + args->range : 0;
+
+       /* Unwind GPUVA space. */
+       drm_gpuva_for_each_op_from_reverse(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP:
+                       op_map_prepare_unwind(new->map);
+                       break;
+               case DRM_GPUVA_OP_REMAP: {
+                       struct drm_gpuva_op_remap *r = &op->remap;
+
+                       if (r->next)
+                               op_map_prepare_unwind(new->next);
+
+                       if (r->prev)
+                               op_map_prepare_unwind(new->prev);
+
+                       op_unmap_prepare_unwind(r->unmap->va);
+                       break;
+               }
+               case DRM_GPUVA_OP_UNMAP:
+                       op_unmap_prepare_unwind(op->unmap.va);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /* Unmap operation don't allocate page tables, hence skip the following
+        * page table unwind.
+        */
+       if (!args)
+               return;
+
+       drm_gpuva_for_each_op(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP: {
+                       u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+                       if (vmm_get_range)
+                               nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+                                                    vmm_get_range);
+                       break;
+               }
+               case DRM_GPUVA_OP_REMAP: {
+                       struct drm_gpuva_op_remap *r = &op->remap;
+                       struct drm_gpuva *va = r->unmap->va;
+                       u64 ustart = va->va.addr;
+                       u64 urange = va->va.range;
+                       u64 uend = ustart + urange;
+
+                       if (r->prev)
+                               vmm_get_start = uend;
+
+                       if (r->next)
+                               vmm_get_end = ustart;
+
+                       if (r->prev && r->next)
+                               vmm_get_start = vmm_get_end = 0;
+
+                       break;
+               }
+               case DRM_GPUVA_OP_UNMAP: {
+                       struct drm_gpuva_op_unmap *u = &op->unmap;
+                       struct drm_gpuva *va = u->va;
+                       u64 ustart = va->va.addr;
+                       u64 urange = va->va.range;
+                       u64 uend = ustart + urange;
+
+                       /* Nothing to do for mappings we merge with. */
+                       if (uend == vmm_get_start ||
+                           ustart == vmm_get_end)
+                               break;
+
+                       if (ustart > vmm_get_start) {
+                               u64 vmm_get_range = ustart - vmm_get_start;
+
+                               nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+                                                    vmm_get_range);
+                       }
+                       vmm_get_start = uend;
+                       break;
+               }
+               default:
+                       break;
+               }
+
+               if (op == last)
+                       break;
+       }
+}
+
+static void
+nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
+                                  struct nouveau_uvma_prealloc *new,
+                                  struct drm_gpuva_ops *ops,
+                                  u64 addr, u64 range)
+{
+       struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+       struct uvmm_map_args args = {
+               .addr = addr,
+               .range = range,
+       };
+
+       nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
+}
+
+static void
+nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
+                                    struct nouveau_uvma_prealloc *new,
+                                    struct drm_gpuva_ops *ops)
+{
+       struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+
+       nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
+}
+
+static int
+op_map_prepare(struct nouveau_uvmm *uvmm,
+              struct nouveau_uvma **puvma,
+              struct drm_gpuva_op_map *op,
+              struct uvmm_map_args *args)
+{
+       struct nouveau_uvma *uvma;
+       int ret;
+
+       ret = nouveau_uvma_alloc(&uvma);
+       if (ret)
+               return ret;
+
+       uvma->region = args->region;
+       uvma->kind = args->kind;
+
+       drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+
+       /* Keep a reference until this uvma is destroyed. */
+       nouveau_uvma_gem_get(uvma);
+
+       *puvma = uvma;
+       return 0;
+}
+
+static void
+op_unmap_prepare(struct drm_gpuva_op_unmap *u)
+{
+       drm_gpuva_unmap(u);
+}
+
+static int
+nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
+                       struct nouveau_uvma_prealloc *new,
+                       struct drm_gpuva_ops *ops,
+                       struct uvmm_map_args *args)
+{
+       struct drm_gpuva_op *op;
+       u64 vmm_get_start = args ? args->addr : 0;
+       u64 vmm_get_end = args ? args->addr + args->range : 0;
+       int ret;
+
+       drm_gpuva_for_each_op(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP: {
+                       u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+                       ret = op_map_prepare(uvmm, &new->map, &op->map, args);
+                       if (ret)
+                               goto unwind;
+
+                       if (args && vmm_get_range) {
+                               ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+                                                          vmm_get_range);
+                               if (ret) {
+                                       op_map_prepare_unwind(new->map);
+                                       goto unwind;
+                               }
+                       }
+                       break;
+               }
+               case DRM_GPUVA_OP_REMAP: {
+                       struct drm_gpuva_op_remap *r = &op->remap;
+                       struct drm_gpuva *va = r->unmap->va;
+                       struct uvmm_map_args remap_args = {
+                               .kind = uvma_from_va(va)->kind,
+                       };
+                       u64 ustart = va->va.addr;
+                       u64 urange = va->va.range;
+                       u64 uend = ustart + urange;
+
+                       op_unmap_prepare(r->unmap);
+
+                       if (r->prev) {
+                               ret = op_map_prepare(uvmm, &new->prev, r->prev,
+                                                    &remap_args);
+                               if (ret)
+                                       goto unwind;
+
+                               if (args)
+                                       vmm_get_start = uend;
+                       }
+
+                       if (r->next) {
+                               ret = op_map_prepare(uvmm, &new->next, r->next,
+                                                    &remap_args);
+                               if (ret) {
+                                       if (r->prev)
+                                               op_map_prepare_unwind(new->prev);
+                                       goto unwind;
+                               }
+
+                               if (args)
+                                       vmm_get_end = ustart;
+                       }
+
+                       if (args && (r->prev && r->next))
+                               vmm_get_start = vmm_get_end = 0;
+
+                       break;
+               }
+               case DRM_GPUVA_OP_UNMAP: {
+                       struct drm_gpuva_op_unmap *u = &op->unmap;
+                       struct drm_gpuva *va = u->va;
+                       u64 ustart = va->va.addr;
+                       u64 urange = va->va.range;
+                       u64 uend = ustart + urange;
+
+                       op_unmap_prepare(u);
+
+                       if (!args)
+                               break;
+
+                       /* Nothing to do for mappings we merge with. */
+                       if (uend == vmm_get_start ||
+                           ustart == vmm_get_end)
+                               break;
+
+                       if (ustart > vmm_get_start) {
+                               u64 vmm_get_range = ustart - vmm_get_start;
+
+                               ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+                                                          vmm_get_range);
+                               if (ret) {
+                                       op_unmap_prepare_unwind(va);
+                                       goto unwind;
+                               }
+                       }
+                       vmm_get_start = uend;
+
+                       break;
+               }
+               default:
+                       ret = -EINVAL;
+                       goto unwind;
+               }
+       }
+
+       return 0;
+
+unwind:
+       if (op != drm_gpuva_first_op(ops))
+               nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
+                                              drm_gpuva_prev_op(op),
+                                              args);
+       return ret;
+}
+
+static int
+nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
+                           struct nouveau_uvma_prealloc *new,
+                           struct nouveau_uvma_region *region,
+                           struct drm_gpuva_ops *ops,
+                           u64 addr, u64 range, u8 kind)
+{
+       struct uvmm_map_args args = {
+               .region = region,
+               .addr = addr,
+               .range = range,
+               .kind = kind,
+       };
+
+       return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
+}
+
+static int
+nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
+                             struct nouveau_uvma_prealloc *new,
+                             struct drm_gpuva_ops *ops)
+{
+       return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
+}
+
+static struct drm_gem_object *
+op_gem_obj(struct drm_gpuva_op *op)
+{
+       switch (op->op) {
+       case DRM_GPUVA_OP_MAP:
+               return op->map.gem.obj;
+       case DRM_GPUVA_OP_REMAP:
+               /* Actually, we're looking for the GEMs backing remap.prev and
+                * remap.next, but since this is a remap they're identical to
+                * the GEM backing the unmapped GPUVA.
+                */
+               return op->remap.unmap->va->gem.obj;
+       case DRM_GPUVA_OP_UNMAP:
+               return op->unmap.va->gem.obj;
+       default:
+               WARN(1, "Unknown operation.\n");
+               return NULL;
+       }
+}
+
+static void
+op_map(struct nouveau_uvma *uvma)
+{
+       struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj);
+
+       nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource));
+}
+
+static void
+op_unmap(struct drm_gpuva_op_unmap *u)
+{
+       struct drm_gpuva *va = u->va;
+       struct nouveau_uvma *uvma = uvma_from_va(va);
+
+       /* nouveau_uvma_unmap() does not unmap if backing BO is evicted. */
+       if (!u->keep)
+               nouveau_uvma_unmap(uvma);
+}
+
+static void
+op_unmap_range(struct drm_gpuva_op_unmap *u,
+              u64 addr, u64 range)
+{
+       struct nouveau_uvma *uvma = uvma_from_va(u->va);
+       bool sparse = !!uvma->region;
+
+       if (!drm_gpuva_invalidated(u->va))
+               nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static void
+op_remap(struct drm_gpuva_op_remap *r,
+        struct nouveau_uvma_prealloc *new)
+{
+       struct drm_gpuva_op_unmap *u = r->unmap;
+       struct nouveau_uvma *uvma = uvma_from_va(u->va);
+       u64 addr = uvma->va.va.addr;
+       u64 range = uvma->va.va.range;
+
+       if (r->prev)
+               addr = r->prev->va.addr + r->prev->va.range;
+
+       if (r->next)
+               range = r->next->va.addr - addr;
+
+       op_unmap_range(u, addr, range);
+}
+
+static int
+nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
+               struct nouveau_uvma_prealloc *new,
+               struct drm_gpuva_ops *ops)
+{
+       struct drm_gpuva_op *op;
+
+       drm_gpuva_for_each_op(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP:
+                       op_map(new->map);
+                       break;
+               case DRM_GPUVA_OP_REMAP:
+                       op_remap(&op->remap, new);
+                       break;
+               case DRM_GPUVA_OP_UNMAP:
+                       op_unmap(&op->unmap);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int
+nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
+                   struct nouveau_uvma_prealloc *new,
+                   struct drm_gpuva_ops *ops)
+{
+       return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static int
+nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
+                     struct nouveau_uvma_prealloc *new,
+                     struct drm_gpuva_ops *ops)
+{
+       return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static void
+nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
+                       struct nouveau_uvma_prealloc *new,
+                       struct drm_gpuva_ops *ops, bool unmap)
+{
+       struct drm_gpuva_op *op;
+
+       drm_gpuva_for_each_op(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP:
+                       break;
+               case DRM_GPUVA_OP_REMAP: {
+                       struct drm_gpuva_op_remap *r = &op->remap;
+                       struct drm_gpuva_op_map *p = r->prev;
+                       struct drm_gpuva_op_map *n = r->next;
+                       struct drm_gpuva *va = r->unmap->va;
+                       struct nouveau_uvma *uvma = uvma_from_va(va);
+
+                       if (unmap) {
+                               u64 addr = va->va.addr;
+                               u64 end = addr + va->va.range;
+
+                               if (p)
+                                       addr = p->va.addr + p->va.range;
+
+                               if (n)
+                                       end = n->va.addr;
+
+                               nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+                       }
+
+                       nouveau_uvma_gem_put(uvma);
+                       nouveau_uvma_free(uvma);
+                       break;
+               }
+               case DRM_GPUVA_OP_UNMAP: {
+                       struct drm_gpuva_op_unmap *u = &op->unmap;
+                       struct drm_gpuva *va = u->va;
+                       struct nouveau_uvma *uvma = uvma_from_va(va);
+
+                       if (unmap)
+                               nouveau_uvma_vmm_put(uvma);
+
+                       nouveau_uvma_gem_put(uvma);
+                       nouveau_uvma_free(uvma);
+                       break;
+               }
+               default:
+                       break;
+               }
+       }
+}
+
+static void
+nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
+                           struct nouveau_uvma_prealloc *new,
+                           struct drm_gpuva_ops *ops)
+{
+       nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
+}
+
+static void
+nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
+                             struct nouveau_uvma_prealloc *new,
+                             struct drm_gpuva_ops *ops)
+{
+       nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
+}
+
+static int
+nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
+{
+       u64 end = addr + range;
+       u64 kernel_managed_end = uvmm->kernel_managed_addr +
+                                uvmm->kernel_managed_size;
+
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (range & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (end <= addr)
+               return -EINVAL;
+
+       if (addr < NOUVEAU_VA_SPACE_START ||
+           end > NOUVEAU_VA_SPACE_END)
+               return -EINVAL;
+
+       if (addr < kernel_managed_end &&
+           end > uvmm->kernel_managed_addr)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob)
+{
+       *pjob = kzalloc(sizeof(**pjob), GFP_KERNEL);
+       if (!*pjob)
+               return -ENOMEM;
+
+       kref_init(&(*pjob)->kref);
+
+       return 0;
+}
+
+static void
+nouveau_uvmm_bind_job_free(struct kref *kref)
+{
+       struct nouveau_uvmm_bind_job *job =
+               container_of(kref, struct nouveau_uvmm_bind_job, kref);
+
+       nouveau_job_free(&job->base);
+       kfree(job);
+}
+
+static void
+nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job)
+{
+       kref_get(&job->kref);
+}
+
+static void
+nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job)
+{
+       kref_put(&job->kref, nouveau_uvmm_bind_job_free);
+}
+
+static int
+bind_validate_op(struct nouveau_job *job,
+                struct bind_job_op *op)
+{
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+       struct drm_gem_object *obj = op->gem.obj;
+
+       if (op->op == OP_MAP) {
+               if (op->gem.offset & ~PAGE_MASK)
+                       return -EINVAL;
+
+               if (obj->size <= op->gem.offset)
+                       return -EINVAL;
+
+               if (op->va.range > (obj->size - op->gem.offset))
+                       return -EINVAL;
+       }
+
+       return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
+}
+
+static void
+bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
+{
+       struct nouveau_uvmm_bind_job *bind_job;
+       struct nouveau_sched_entity *entity = job->entity;
+       struct bind_job_op *op;
+       u64 end = addr + range;
+
+again:
+       spin_lock(&entity->job.list.lock);
+       list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+               list_for_each_op(op, &bind_job->ops) {
+                       if (op->op == OP_UNMAP) {
+                               u64 op_addr = op->va.addr;
+                               u64 op_end = op_addr + op->va.range;
+
+                               if (!(end <= op_addr || addr >= op_end)) {
+                                       nouveau_uvmm_bind_job_get(bind_job);
+                                       spin_unlock(&entity->job.list.lock);
+                                       wait_for_completion(&bind_job->complete);
+                                       nouveau_uvmm_bind_job_put(bind_job);
+                                       goto again;
+                               }
+                       }
+               }
+       }
+       spin_unlock(&entity->job.list.lock);
+}
+
+static int
+bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
+                        bool sparse)
+{
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+       struct nouveau_uvma_region *reg;
+       u64 reg_addr, reg_end;
+       u64 end = addr + range;
+
+again:
+       nouveau_uvmm_lock(uvmm);
+       reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+       if (!reg) {
+               nouveau_uvmm_unlock(uvmm);
+               return 0;
+       }
+
+       /* Generally, job submits are serialized, hence only
+        * dirty regions can be modified concurrently.
+        */
+       if (reg->dirty) {
+               nouveau_uvma_region_get(reg);
+               nouveau_uvmm_unlock(uvmm);
+               wait_for_completion(&reg->complete);
+               nouveau_uvma_region_put(reg);
+               goto again;
+       }
+       nouveau_uvmm_unlock(uvmm);
+
+       if (sparse)
+               return -ENOSPC;
+
+       reg_addr = reg->va.addr;
+       reg_end = reg_addr + reg->va.range;
+
+       /* Make sure the mapping is either outside of a
+        * region or fully enclosed by a region.
+        */
+       if (reg_addr > addr || reg_end < end)
+               return -ENOSPC;
+
+       return 0;
+}
+
+static int
+bind_validate_region(struct nouveau_job *job)
+{
+       struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+       struct bind_job_op *op;
+       int ret;
+
+       list_for_each_op(op, &bind_job->ops) {
+               u64 op_addr = op->va.addr;
+               u64 op_range = op->va.range;
+               bool sparse = false;
+
+               switch (op->op) {
+               case OP_MAP_SPARSE:
+                       sparse = true;
+                       bind_validate_map_sparse(job, op_addr, op_range);
+                       fallthrough;
+               case OP_MAP:
+                       ret = bind_validate_map_common(job, op_addr, op_range,
+                                                      sparse);
+                       if (ret)
+                               return ret;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void
+bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+{
+       struct drm_gpuva_op *op;
+
+       drm_gpuva_for_each_op(op, ops) {
+               switch (op->op) {
+               case DRM_GPUVA_OP_MAP:
+                       drm_gpuva_link(&new->map->va);
+                       break;
+               case DRM_GPUVA_OP_REMAP:
+                       if (op->remap.prev)
+                               drm_gpuva_link(&new->prev->va);
+                       if (op->remap.next)
+                               drm_gpuva_link(&new->next->va);
+                       drm_gpuva_unlink(op->remap.unmap->va);
+                       break;
+               case DRM_GPUVA_OP_UNMAP:
+                       drm_gpuva_unlink(op->unmap.va);
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+{
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+       struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+       struct nouveau_sched_entity *entity = job->entity;
+       struct drm_exec *exec = &job->exec;
+       struct bind_job_op *op;
+       int ret;
+
+       list_for_each_op(op, &bind_job->ops) {
+               if (op->op == OP_MAP) {
+                       op->gem.obj = drm_gem_object_lookup(job->file_priv,
+                                                           op->gem.handle);
+                       if (!op->gem.obj)
+                               return -ENOENT;
+               }
+
+               ret = bind_validate_op(job, op);
+               if (ret)
+                       return ret;
+       }
+
+       /* If a sparse region or mapping overlaps a dirty region, we need to
+        * wait for the region to complete the unbind process. This is due to
+        * how page table management is currently implemented. A future
+        * implementation might change this.
+        */
+       ret = bind_validate_region(job);
+       if (ret)
+               return ret;
+
+       /* Once we start modifying the GPU VA space we need to keep holding the
+        * uvmm lock until we can't fail anymore. This is due to the set of GPU
+        * VA space changes must appear atomically and we need to be able to
+        * unwind all GPU VA space changes on failure.
+        */
+       nouveau_uvmm_lock(uvmm);
+       list_for_each_op(op, &bind_job->ops) {
+               switch (op->op) {
+               case OP_MAP_SPARSE:
+                       ret = nouveau_uvma_region_create(uvmm,
+                                                        op->va.addr,
+                                                        op->va.range);
+                       if (ret)
+                               goto unwind_continue;
+
+                       break;
+               case OP_UNMAP_SPARSE:
+                       op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
+                                                          op->va.range);
+                       if (!op->reg || op->reg->dirty) {
+                               ret = -ENOENT;
+                               goto unwind_continue;
+                       }
+
+                       op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+                                                               op->va.addr,
+                                                               op->va.range);
+                       if (IS_ERR(op->ops)) {
+                               ret = PTR_ERR(op->ops);
+                               goto unwind_continue;
+                       }
+
+                       ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+                                                           op->ops);
+                       if (ret) {
+                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               op->ops = NULL;
+                               op->reg = NULL;
+                               goto unwind_continue;
+                       }
+
+                       nouveau_uvma_region_dirty(op->reg);
+
+                       break;
+               case OP_MAP: {
+                       struct nouveau_uvma_region *reg;
+
+                       reg = nouveau_uvma_region_find_first(uvmm,
+                                                            op->va.addr,
+                                                            op->va.range);
+                       if (reg) {
+                               u64 reg_addr = reg->va.addr;
+                               u64 reg_end = reg_addr + reg->va.range;
+                               u64 op_addr = op->va.addr;
+                               u64 op_end = op_addr + op->va.range;
+
+                               if (unlikely(reg->dirty)) {
+                                       ret = -EINVAL;
+                                       goto unwind_continue;
+                               }
+
+                               /* Make sure the mapping is either outside of a
+                                * region or fully enclosed by a region.
+                                */
+                               if (reg_addr > op_addr || reg_end < op_end) {
+                                       ret = -ENOSPC;
+                                       goto unwind_continue;
+                               }
+                       }
+
+                       op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
+                                                             op->va.addr,
+                                                             op->va.range,
+                                                             op->gem.obj,
+                                                             op->gem.offset);
+                       if (IS_ERR(op->ops)) {
+                               ret = PTR_ERR(op->ops);
+                               goto unwind_continue;
+                       }
+
+                       ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
+                                                         reg, op->ops,
+                                                         op->va.addr,
+                                                         op->va.range,
+                                                         op->flags & 0xff);
+                       if (ret) {
+                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               op->ops = NULL;
+                               goto unwind_continue;
+                       }
+
+                       break;
+               }
+               case OP_UNMAP:
+                       op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+                                                               op->va.addr,
+                                                               op->va.range);
+                       if (IS_ERR(op->ops)) {
+                               ret = PTR_ERR(op->ops);
+                               goto unwind_continue;
+                       }
+
+                       ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+                                                           op->ops);
+                       if (ret) {
+                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               op->ops = NULL;
+                               goto unwind_continue;
+                       }
+
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto unwind_continue;
+               }
+       }
+
+       drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+                           DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_until_all_locked(exec) {
+               list_for_each_op(op, &bind_job->ops) {
+                       struct drm_gpuva_op *va_op;
+
+                       if (IS_ERR_OR_NULL(op->ops))
+                               continue;
+
+                       drm_gpuva_for_each_op(va_op, op->ops) {
+                               struct drm_gem_object *obj = op_gem_obj(va_op);
+
+                               if (unlikely(!obj))
+                                       continue;
+
+                               ret = drm_exec_prepare_obj(exec, obj, 1);
+                               drm_exec_retry_on_contention(exec);
+                               if (ret) {
+                                       op = list_last_op(&bind_job->ops);
+                                       goto unwind;
+                               }
+                       }
+               }
+       }
+
+       list_for_each_op(op, &bind_job->ops) {
+               struct drm_gpuva_op *va_op;
+
+               if (IS_ERR_OR_NULL(op->ops))
+                       continue;
+
+               drm_gpuva_for_each_op(va_op, op->ops) {
+                       struct drm_gem_object *obj = op_gem_obj(va_op);
+
+                       if (unlikely(!obj))
+                               continue;
+
+                       /* Don't validate GEMs backing mappings we're about to
+                        * unmap, it's not worth the effort.
+                        */
+                       if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
+                               continue;
+
+                       ret = nouveau_bo_validate(nouveau_gem_object(obj),
+                                                 true, false);
+                       if (ret) {
+                               op = list_last_op(&bind_job->ops);
+                               goto unwind;
+                       }
+               }
+       }
+
+       /* Link and unlink GPUVAs while holding the dma_resv lock.
+        *
+        * As long as we validate() all GEMs and add fences to all GEMs DMA
+        * reservations backing map and remap operations we can be sure there
+        * won't be any concurrent (in)validations during job execution, hence
+        * we're safe to check drm_gpuva_invalidated() within the fence
+        * signalling critical path without holding a separate lock.
+        *
+        * GPUVAs about to be unmapped are safe as well, since they're unlinked
+        * already.
+        *
+        * GEMs from map and remap operations must be validated before linking
+        * their corresponding mappings to prevent the actual PT update to
+        * happen right away in validate() rather than asynchronously as
+        * intended.
+        *
+        * Note that after linking and unlinking the GPUVAs in this loop this
+        * function cannot fail anymore, hence there is no need for an unwind
+        * path.
+        */
+       list_for_each_op(op, &bind_job->ops) {
+               switch (op->op) {
+               case OP_UNMAP_SPARSE:
+               case OP_MAP:
+               case OP_UNMAP:
+                       bind_link_gpuvas(op->ops, &op->new);
+                       break;
+               default:
+                       break;
+               }
+       }
+       nouveau_uvmm_unlock(uvmm);
+
+       spin_lock(&entity->job.list.lock);
+       list_add(&bind_job->entry, &entity->job.list.head);
+       spin_unlock(&entity->job.list.lock);
+
+       return 0;
+
+unwind_continue:
+       op = list_prev_op(op);
+unwind:
+       list_for_each_op_from_reverse(op, &bind_job->ops) {
+               switch (op->op) {
+               case OP_MAP_SPARSE:
+                       nouveau_uvma_region_destroy(uvmm, op->va.addr,
+                                                   op->va.range);
+                       break;
+               case OP_UNMAP_SPARSE:
+                       __nouveau_uvma_region_insert(uvmm, op->reg);
+                       nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+                                                            op->ops);
+                       break;
+               case OP_MAP:
+                       nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
+                                                          op->ops,
+                                                          op->va.addr,
+                                                          op->va.range);
+                       break;
+               case OP_UNMAP:
+                       nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+                                                            op->ops);
+                       break;
+               }
+
+               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+               op->ops = NULL;
+               op->reg = NULL;
+       }
+
+       nouveau_uvmm_unlock(uvmm);
+       drm_exec_fini(exec);
+       return ret;
+}
+
+static void
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+{
+       struct drm_exec *exec = &job->exec;
+       struct drm_gem_object *obj;
+       unsigned long index;
+
+       drm_exec_for_each_locked_object(exec, index, obj)
+               dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+       drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_uvmm_bind_job_run(struct nouveau_job *job)
+{
+       struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+       struct bind_job_op *op;
+       int ret = 0;
+
+       list_for_each_op(op, &bind_job->ops) {
+               switch (op->op) {
+               case OP_MAP_SPARSE:
+                       /* noop */
+                       break;
+               case OP_MAP:
+                       ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
+                       if (ret)
+                               goto out;
+                       break;
+               case OP_UNMAP_SPARSE:
+                       fallthrough;
+               case OP_UNMAP:
+                       ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
+                       if (ret)
+                               goto out;
+                       break;
+               }
+       }
+
+out:
+       if (ret)
+               NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret);
+       return ERR_PTR(ret);
+}
+
+static void
+nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+{
+       struct nouveau_uvmm_bind_job *bind_job =
+               container_of(work, struct nouveau_uvmm_bind_job, work);
+       struct nouveau_job *job = &bind_job->base;
+       struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+       struct nouveau_sched_entity *entity = job->entity;
+       struct bind_job_op *op, *next;
+
+       list_for_each_op(op, &bind_job->ops) {
+               struct drm_gem_object *obj = op->gem.obj;
+
+               /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg
+                * will be NULL, hence skip the cleanup.
+                */
+               switch (op->op) {
+               case OP_MAP_SPARSE:
+                       /* noop */
+                       break;
+               case OP_UNMAP_SPARSE:
+                       if (!IS_ERR_OR_NULL(op->ops))
+                               nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+                                                             op->ops);
+
+                       if (op->reg) {
+                               nouveau_uvma_region_sparse_unref(op->reg);
+                               nouveau_uvmm_lock(uvmm);
+                               nouveau_uvma_region_remove(op->reg);
+                               nouveau_uvmm_unlock(uvmm);
+                               nouveau_uvma_region_complete(op->reg);
+                               nouveau_uvma_region_put(op->reg);
+                       }
+
+                       break;
+               case OP_MAP:
+                       if (!IS_ERR_OR_NULL(op->ops))
+                               nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
+                                                           op->ops);
+                       break;
+               case OP_UNMAP:
+                       if (!IS_ERR_OR_NULL(op->ops))
+                               nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+                                                             op->ops);
+                       break;
+               }
+
+               if (!IS_ERR_OR_NULL(op->ops))
+                       drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+
+               if (obj)
+                       drm_gem_object_put(obj);
+       }
+
+       spin_lock(&entity->job.list.lock);
+       list_del(&bind_job->entry);
+       spin_unlock(&entity->job.list.lock);
+
+       complete_all(&bind_job->complete);
+       wake_up(&entity->job.wq);
+
+       /* Remove and free ops after removing the bind job from the job list to
+        * avoid races against bind_validate_map_sparse().
+        */
+       list_for_each_op_safe(op, next, &bind_job->ops) {
+               list_del(&op->entry);
+               kfree(op);
+       }
+
+       nouveau_uvmm_bind_job_put(bind_job);
+}
+
+static void
+nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
+{
+       struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+       struct nouveau_sched_entity *entity = job->entity;
+
+       nouveau_sched_entity_qwork(entity, &bind_job->work);
+}
+
+static struct nouveau_job_ops nouveau_bind_job_ops = {
+       .submit = nouveau_uvmm_bind_job_submit,
+       .armed_submit = nouveau_uvmm_bind_job_armed_submit,
+       .run = nouveau_uvmm_bind_job_run,
+       .free = nouveau_uvmm_bind_job_free_qwork,
+};
+
+static int
+bind_job_op_from_uop(struct bind_job_op **pop,
+                    struct drm_nouveau_vm_bind_op *uop)
+{
+       struct bind_job_op *op;
+
+       op = *pop = kzalloc(sizeof(*op), GFP_KERNEL);
+       if (!op)
+               return -ENOMEM;
+
+       switch (uop->op) {
+       case OP_MAP:
+               op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+                        OP_MAP_SPARSE : OP_MAP;
+               break;
+       case OP_UNMAP:
+               op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+                        OP_UNMAP_SPARSE : OP_UNMAP;
+               break;
+       default:
+               op->op = uop->op;
+               break;
+       }
+
+       op->flags = uop->flags;
+       op->va.addr = uop->addr;
+       op->va.range = uop->range;
+       op->gem.handle = uop->handle;
+       op->gem.offset = uop->bo_offset;
+
+       return 0;
+}
+
+static void
+bind_job_ops_free(struct list_head *ops)
+{
+       struct bind_job_op *op, *next;
+
+       list_for_each_op_safe(op, next, ops) {
+               list_del(&op->entry);
+               kfree(op);
+       }
+}
+
+static int
+nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
+                          struct nouveau_uvmm_bind_job_args *__args)
+{
+       struct nouveau_uvmm_bind_job *job;
+       struct nouveau_job_args args = {};
+       struct bind_job_op *op;
+       int i, ret;
+
+       ret = nouveau_uvmm_bind_job_alloc(&job);
+       if (ret)
+               return ret;
+
+       INIT_LIST_HEAD(&job->ops);
+       INIT_LIST_HEAD(&job->entry);
+
+       for (i = 0; i < __args->op.count; i++) {
+               ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
+               if (ret)
+                       goto err_free;
+
+               list_add_tail(&op->entry, &job->ops);
+       }
+
+       init_completion(&job->complete);
+       INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
+
+       args.sched_entity = __args->sched_entity;
+       args.file_priv = __args->file_priv;
+
+       args.in_sync.count = __args->in_sync.count;
+       args.in_sync.s = __args->in_sync.s;
+
+       args.out_sync.count = __args->out_sync.count;
+       args.out_sync.s = __args->out_sync.s;
+
+       args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC);
+       args.ops = &nouveau_bind_job_ops;
+       args.resv_usage = DMA_RESV_USAGE_BOOKKEEP;
+
+       ret = nouveau_job_init(&job->base, &args);
+       if (ret)
+               goto err_free;
+
+       *pjob = job;
+       return 0;
+
+err_free:
+       bind_job_ops_free(&job->ops);
+       kfree(job);
+       *pjob = NULL;
+
+       return ret;
+}
+
+int
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+                          void *data,
+                          struct drm_file *file_priv)
+{
+       struct nouveau_cli *cli = nouveau_cli(file_priv);
+       struct drm_nouveau_vm_init *init = data;
+
+       return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
+                                init->kernel_managed_size);
+}
+
+static int
+nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
+{
+       struct nouveau_uvmm_bind_job *job;
+       int ret;
+
+       ret = nouveau_uvmm_bind_job_init(&job, args);
+       if (ret)
+               return ret;
+
+       ret = nouveau_job_submit(&job->base);
+       if (ret)
+               goto err_job_fini;
+
+       return 0;
+
+err_job_fini:
+       nouveau_job_fini(&job->base);
+       return ret;
+}
+
+static int
+nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args,
+                          struct drm_nouveau_vm_bind *req)
+{
+       struct drm_nouveau_sync **s;
+       u32 inc = req->wait_count;
+       u64 ins = req->wait_ptr;
+       u32 outc = req->sig_count;
+       u64 outs = req->sig_ptr;
+       u32 opc = req->op_count;
+       u64 ops = req->op_ptr;
+       int ret;
+
+       args->flags = req->flags;
+
+       if (opc) {
+               args->op.count = opc;
+               args->op.s = u_memcpya(ops, opc,
+                                      sizeof(*args->op.s));
+               if (IS_ERR(args->op.s))
+                       return PTR_ERR(args->op.s);
+       }
+
+       if (inc) {
+               s = &args->in_sync.s;
+
+               args->in_sync.count = inc;
+               *s = u_memcpya(ins, inc, sizeof(**s));
+               if (IS_ERR(*s)) {
+                       ret = PTR_ERR(*s);
+                       goto err_free_ops;
+               }
+       }
+
+       if (outc) {
+               s = &args->out_sync.s;
+
+               args->out_sync.count = outc;
+               *s = u_memcpya(outs, outc, sizeof(**s));
+               if (IS_ERR(*s)) {
+                       ret = PTR_ERR(*s);
+                       goto err_free_ins;
+               }
+       }
+
+       return 0;
+
+err_free_ops:
+       u_free(args->op.s);
+err_free_ins:
+       u_free(args->in_sync.s);
+       return ret;
+}
+
+static void
+nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args)
+{
+       u_free(args->op.s);
+       u_free(args->in_sync.s);
+       u_free(args->out_sync.s);
+}
+
+int
+nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
+                          void *data,
+                          struct drm_file *file_priv)
+{
+       struct nouveau_cli *cli = nouveau_cli(file_priv);
+       struct nouveau_uvmm_bind_job_args args = {};
+       struct drm_nouveau_vm_bind *req = data;
+       int ret = 0;
+
+       if (unlikely(!nouveau_cli_uvmm_locked(cli)))
+               return -ENOSYS;
+
+       ret = nouveau_uvmm_vm_bind_ucopy(&args, req);
+       if (ret)
+               return ret;
+
+       args.sched_entity = &cli->sched_entity;
+       args.file_priv = file_priv;
+
+       ret = nouveau_uvmm_vm_bind(&args);
+       if (ret)
+               goto out_free_args;
+
+out_free_args:
+       nouveau_uvmm_vm_bind_ufree(&args);
+       return ret;
+}
+
+void
+nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
+{
+       struct drm_gem_object *obj = &nvbo->bo.base;
+       struct drm_gpuva *va;
+
+       dma_resv_assert_held(obj->resv);
+
+       drm_gem_for_each_gpuva(va, obj) {
+               struct nouveau_uvma *uvma = uvma_from_va(va);
+
+               nouveau_uvma_map(uvma, mem);
+               drm_gpuva_invalidate(va, false);
+       }
+}
+
+void
+nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
+{
+       struct drm_gem_object *obj = &nvbo->bo.base;
+       struct drm_gpuva *va;
+
+       dma_resv_assert_held(obj->resv);
+
+       drm_gem_for_each_gpuva(va, obj) {
+               struct nouveau_uvma *uvma = uvma_from_va(va);
+
+               nouveau_uvma_unmap(uvma);
+               drm_gpuva_invalidate(va, true);
+       }
+}
+
+int
+nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+                 u64 kernel_managed_addr, u64 kernel_managed_size)
+{
+       int ret;
+       u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
+
+       mutex_init(&uvmm->mutex);
+       dma_resv_init(&uvmm->resv);
+       mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+       mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+
+       mutex_lock(&cli->mutex);
+
+       if (unlikely(cli->uvmm.disabled)) {
+               ret = -ENOSYS;
+               goto out_unlock;
+       }
+
+       if (kernel_managed_end <= kernel_managed_addr) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       uvmm->kernel_managed_addr = kernel_managed_addr;
+       uvmm->kernel_managed_size = kernel_managed_size;
+
+       drm_gpuva_manager_init(&uvmm->umgr, cli->name,
+                              NOUVEAU_VA_SPACE_START,
+                              NOUVEAU_VA_SPACE_END,
+                              kernel_managed_addr, kernel_managed_size,
+                              NULL);
+
+       ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
+                           cli->vmm.vmm.object.oclass, RAW,
+                           kernel_managed_addr, kernel_managed_size,
+                           NULL, 0, &cli->uvmm.vmm.vmm);
+       if (ret)
+               goto out_free_gpuva_mgr;
+
+       cli->uvmm.vmm.cli = cli;
+       mutex_unlock(&cli->mutex);
+
+       return 0;
+
+out_free_gpuva_mgr:
+       drm_gpuva_manager_destroy(&uvmm->umgr);
+out_unlock:
+       mutex_unlock(&cli->mutex);
+       return ret;
+}
+
+void
+nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
+{
+       MA_STATE(mas, &uvmm->region_mt, 0, 0);
+       struct nouveau_uvma_region *reg;
+       struct nouveau_cli *cli = uvmm->vmm.cli;
+       struct nouveau_sched_entity *entity = &cli->sched_entity;
+       struct drm_gpuva *va, *next;
+
+       if (!cli)
+               return;
+
+       rmb(); /* for list_empty to work without lock */
+       wait_event(entity->job.wq, list_empty(&entity->job.list.head));
+
+       nouveau_uvmm_lock(uvmm);
+       drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
+               struct nouveau_uvma *uvma = uvma_from_va(va);
+               struct drm_gem_object *obj = va->gem.obj;
+
+               if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+                       continue;
+
+               drm_gpuva_remove(va);
+
+               dma_resv_lock(obj->resv, NULL);
+               drm_gpuva_unlink(va);
+               dma_resv_unlock(obj->resv);
+
+               nouveau_uvma_unmap(uvma);
+               nouveau_uvma_vmm_put(uvma);
+
+               nouveau_uvma_gem_put(uvma);
+               nouveau_uvma_free(uvma);
+       }
+
+       mas_for_each(&mas, reg, ULONG_MAX) {
+               mas_erase(&mas);
+               nouveau_uvma_region_sparse_unref(reg);
+               nouveau_uvma_region_put(reg);
+       }
+
+       WARN(!mtree_empty(&uvmm->region_mt),
+            "nouveau_uvma_region tree not empty, potentially leaking memory.");
+       __mt_destroy(&uvmm->region_mt);
+       nouveau_uvmm_unlock(uvmm);
+
+       mutex_lock(&cli->mutex);
+       nouveau_vmm_fini(&uvmm->vmm);
+       drm_gpuva_manager_destroy(&uvmm->umgr);
+       mutex_unlock(&cli->mutex);
+
+       dma_resv_fini(&uvmm->resv);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
new file mode 100644 (file)
index 0000000..fc7f6fd
--- /dev/null
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_UVMM_H__
+#define __NOUVEAU_UVMM_H__
+
+#include <drm/drm_gpuva_mgr.h>
+
+#include "nouveau_drv.h"
+
+struct nouveau_uvmm {
+       struct nouveau_vmm vmm;
+       struct drm_gpuva_manager umgr;
+       struct maple_tree region_mt;
+       struct mutex mutex;
+       struct dma_resv resv;
+
+       u64 kernel_managed_addr;
+       u64 kernel_managed_size;
+
+       bool disabled;
+};
+
+struct nouveau_uvma_region {
+       struct nouveau_uvmm *uvmm;
+
+       struct {
+               u64 addr;
+               u64 range;
+       } va;
+
+       struct kref kref;
+
+       struct completion complete;
+       bool dirty;
+};
+
+struct nouveau_uvma {
+       struct drm_gpuva va;
+
+       struct nouveau_uvma_region *region;
+       u8 kind;
+};
+
+#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
+#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
+
+#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
+
+struct nouveau_uvmm_bind_job {
+       struct nouveau_job base;
+
+       struct kref kref;
+       struct list_head entry;
+       struct work_struct work;
+       struct completion complete;
+
+       /* struct bind_job_op */
+       struct list_head ops;
+};
+
+struct nouveau_uvmm_bind_job_args {
+       struct drm_file *file_priv;
+       struct nouveau_sched_entity *sched_entity;
+
+       unsigned int flags;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } in_sync;
+
+       struct {
+               struct drm_nouveau_sync *s;
+               u32 count;
+       } out_sync;
+
+       struct {
+               struct drm_nouveau_vm_bind_op *s;
+               u32 count;
+       } op;
+};
+
+#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
+
+int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+                     u64 kernel_managed_addr, u64 kernel_managed_size);
+void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
+
+void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
+void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo);
+
+int nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv);
+
+int nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv);
+
+static inline void nouveau_uvmm_lock(struct nouveau_uvmm *uvmm)
+{
+       mutex_lock(&uvmm->mutex);
+}
+
+static inline void nouveau_uvmm_unlock(struct nouveau_uvmm *uvmm)
+{
+       mutex_unlock(&uvmm->mutex);
+}
+
+#endif
index 67d6619fcd5ed98dab997859b0f7d758942f9045..a6602c0126715635d6328c2fb295d4195b7dd873 100644 (file)
@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
 int
 nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
 {
-       int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
-                               0, NULL, 0, &vmm->vmm);
+       int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
+                               PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
        if (ret)
                return ret;
 
index 3709cbbc19a1edfc2937ddcb74aee9e7fdc86814..c9dd3cff49a002e45ccd4a4d08eee1ea5568c7cc 100644 (file)
@@ -27,6 +27,9 @@
 void
 nvif_mmu_dtor(struct nvif_mmu *mmu)
 {
+       if (!nvif_object_constructed(&mmu->object))
+               return;
+
        kfree(mmu->kind);
        kfree(mmu->type);
        kfree(mmu->heap);
index 6053d6dc2184775e48ee5ea9dd02dd6dfe8a38e2..99296f03371ae06998f165d69cbb9b0096e26a2d 100644 (file)
@@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
        return ret;
 }
 
+int
+nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
+                u8 shift)
+{
+       struct nvif_vmm_raw_v0 args = {
+               .version = 0,
+               .op = NVIF_VMM_RAW_V0_GET,
+               .addr = addr,
+               .size = size,
+               .shift = shift,
+       };
+
+       return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+                               &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
+{
+       struct nvif_vmm_raw_v0 args = {
+               .version = 0,
+               .op = NVIF_VMM_RAW_V0_PUT,
+               .addr = addr,
+               .size = size,
+               .shift = shift,
+       };
+
+       return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+                               &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+                void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
+{
+       struct nvif_vmm_raw_v0 args = {
+               .version = 0,
+               .op = NVIF_VMM_RAW_V0_MAP,
+               .addr = addr,
+               .size = size,
+               .shift = shift,
+               .memory = nvif_handle(&mem->object),
+               .offset = offset,
+               .argv = (u64)(uintptr_t)argv,
+               .argc = argc,
+       };
+
+
+       return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+                               &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+                  u8 shift, bool sparse)
+{
+       struct nvif_vmm_raw_v0 args = {
+               .version = 0,
+               .op = NVIF_VMM_RAW_V0_UNMAP,
+               .addr = addr,
+               .size = size,
+               .shift = shift,
+               .sparse = sparse,
+       };
+
+       return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+                               &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+       struct nvif_vmm_raw_v0 args = {
+               .version = 0,
+               .op = NVIF_VMM_RAW_V0_SPARSE,
+               .addr = addr,
+               .size = size,
+               .ref = ref,
+       };
+
+       return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+                               &args, sizeof(args));
+}
+
 void
 nvif_vmm_dtor(struct nvif_vmm *vmm)
 {
@@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
 }
 
 int
-nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
-             u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+             enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
+             struct nvif_vmm *vmm)
 {
        struct nvif_vmm_v0 *args;
        u32 argn = sizeof(*args) + argc;
@@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
        if (!(args = kmalloc(argn, GFP_KERNEL)))
                return -ENOMEM;
        args->version = 0;
-       args->managed = managed;
        args->addr = addr;
        args->size = size;
+
+       switch (type) {
+       case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
+       case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
+       case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
        memcpy(args->data, argv, argc);
 
        ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
index e20b7ca218c3dfdd8f44fd58b6a8eb4f244bde5b..36a747f0039e384e1daa65299edea67da018bdb1 100644 (file)
@@ -212,8 +212,8 @@ nvkm_intr(int irq, void *arg)
                list_for_each_entry(intr, &device->intr.intr, head) {
                        for (leaf = 0; leaf < intr->leaves; leaf++) {
                                if (intr->stat[leaf]) {
-                                       nvkm_warn(intr->subdev, "intr%d: %08x\n",
-                                                 leaf, intr->stat[leaf]);
+                                       nvkm_debug(intr->subdev, "intr%d: %08x\n",
+                                                  leaf, intr->stat[leaf]);
                                        nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
                                }
                        }
index 301a5e5b5f7f9ca94b2a1817f05107cc9cdc490c..7c554c14e8841da1bb0374f25d9a47512c6f3765 100644 (file)
@@ -185,7 +185,7 @@ nvkm_object_fini(struct nvkm_object *object, bool suspend)
 
        nvif_debug(object, "%s children...\n", action);
        time = ktime_to_us(ktime_get());
-       list_for_each_entry(child, &object->tree, head) {
+       list_for_each_entry_reverse(child, &object->tree, head) {
                ret = nvkm_object_fini(child, suspend);
                if (ret && suspend)
                        goto fail_child;
index 6648ed62daa6898d04f4ff58c52474a51dccf1dc..315a69f7fdd128d3d57a3de27c97b03bdb9f8630 100644 (file)
@@ -35,6 +35,15 @@ ga100_ce_intr(struct nvkm_inth *inth)
        return IRQ_NONE;
 }
 
+int
+ga100_ce_nonstall(struct nvkm_engine *engine)
+{
+       struct nvkm_subdev *subdev = &engine->subdev;
+       struct nvkm_device *device = subdev->device;
+
+       return nvkm_rd32(device, 0x104424 + (subdev->inst * 0x80)) & 0x00000fff;
+}
+
 int
 ga100_ce_fini(struct nvkm_engine *engine, bool suspend)
 {
@@ -67,6 +76,7 @@ ga100_ce = {
        .oneinit = ga100_ce_oneinit,
        .init = ga100_ce_init,
        .fini = ga100_ce_fini,
+       .nonstall = ga100_ce_nonstall,
        .cclass = &gv100_ce_cclass,
        .sclass = {
                { -1, -1, AMPERE_DMA_COPY_A },
index 9f3448ad625f36017c66a42750cf0423bbbc29b1..461b73c7e2e0f351506597714acc85cf2f643d5a 100644 (file)
@@ -28,6 +28,7 @@ ga102_ce = {
        .oneinit = ga100_ce_oneinit,
        .init = ga100_ce_init,
        .fini = ga100_ce_fini,
+       .nonstall = ga100_ce_nonstall,
        .cclass = &gv100_ce_cclass,
        .sclass = {
                { -1, -1, AMPERE_DMA_COPY_A },
index c4c046916fa6e720369d54be4c4aa89e20177498..0be72c463b21a2c9f42a6b5a1aec62a4ed9cc456 100644 (file)
@@ -3,7 +3,7 @@
 #define __NVKM_CE_PRIV_H__
 #include <engine/ce.h>
 
-void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
 void gp100_ce_intr(struct nvkm_engine *);
 
@@ -12,4 +12,5 @@ extern const struct nvkm_object_func gv100_ce_cclass;
 int ga100_ce_oneinit(struct nvkm_engine *);
 int ga100_ce_init(struct nvkm_engine *);
 int ga100_ce_fini(struct nvkm_engine *, bool);
+int ga100_ce_nonstall(struct nvkm_engine *);
 #endif
index 40c8ea43c42f2bb6888ebbb45fbe4f50eb1d94f5..b8ac66b4a2c4b491e6291b82b7bd383bbd6cbc59 100644 (file)
@@ -26,6 +26,8 @@
 #include "head.h"
 #include "ior.h"
 
+#include <drm/display/drm_dp.h>
+
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
 #include <subdev/gpio.h>
@@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
        return outp->dp.rates != 0;
 }
 
+/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
+ * converted to work inside nvkm. This is a temporary holdover until we start
+ * passing the drm_dp_aux device through NVKM
+ */
+static int
+nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
+{
+       struct nvkm_i2c_aux *aux = outp->dp.aux;
+       u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
+       int ret;
+
+       ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Prior to DP1.3 the bit represented by
+        * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+        * If it is set DP_DPCD_REV at 0000h could be at a value less than
+        * the true capability of the panel. The only way to check is to
+        * then compare 0000h and 2200h.
+        */
+       if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+             DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+               return 0;
+
+       ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
+       if (ret < 0)
+               return ret;
+
+       if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+               OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
+                        outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
+               return 0;
+       }
+
+       if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
+               return 0;
+
+       memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
+
+       return 0;
+}
+
 void
 nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
 {
@@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
                        memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
                }
 
-               if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
+               if (!nvkm_dp_read_dpcd_caps(outp)) {
                        const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
                        const u8 *rate;
                        int rate_max;
index a4853c4e5ee3a8257564ac818f258e035617b88c..67ef889a0c5f4728d62d80be74357b13881acebc 100644 (file)
@@ -295,6 +295,7 @@ g94_sor = {
        .clock = nv50_sor_clock,
        .war_2 = g94_sor_war_2,
        .war_3 = g94_sor_war_3,
+       .hdmi = &g84_sor_hdmi,
        .dp = &g94_sor_dp,
 };
 
index a2c7c6f83dcdbda80b5474482974cb51721403b7..506ffbe7b84216fd7045a759e68648c9c8c6f3a5 100644 (file)
@@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
        pack_hdmi_infoframe(&avi, data, size);
 
        nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
-       if (size)
+       if (!size)
                return;
 
        nvkm_wr32(device, 0x61c528 + soff, avi.header);
index 5ea9a2ff0663bd3b17c09d8077ed690977297146..5db37247dc29b2f1f9062981b2bf535fe2632c69 100644 (file)
@@ -283,11 +283,21 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
        }
 
        /* Initialise non-stall intr handling. */
-       if (fifo->func->nonstall_ctor) {
-               ret = fifo->func->nonstall_ctor(fifo);
-               if (ret) {
-                       nvkm_error(subdev, "nonstall %d\n", ret);
+       if (fifo->func->nonstall) {
+               if (fifo->func->nonstall_ctor) {
+                       ret = fifo->func->nonstall_ctor(fifo);
+                       if (ret < 0) {
+                               nvkm_error(subdev, "nonstall %d\n", ret);
+                               return ret;
+                       }
+               } else {
+                       ret = 1;
                }
+
+               ret = nvkm_event_init(fifo->func->nonstall, &fifo->engine.subdev, 1, ret,
+                                     &fifo->nonstall.event);
+               if (ret)
+                       return ret;
        }
 
        /* Allocate USERD + BAR1 polling area. */
@@ -358,7 +368,6 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
               enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
 {
        struct nvkm_fifo *fifo;
-       int ret;
 
        if (!(fifo = *pfifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
                return -ENOMEM;
@@ -374,16 +383,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
        spin_lock_init(&fifo->lock);
        mutex_init(&fifo->mutex);
 
-       ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
-       if (ret)
-               return ret;
-
-       if (func->nonstall) {
-               ret = nvkm_event_init(func->nonstall, &fifo->engine.subdev, 1, 1,
-                                     &fifo->nonstall.event);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
+       return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
 }
index 12a5d99d5e772123419bcdb2b070141880666807..c56d2a839efbaffdb6c0a01843973863b56c30de 100644 (file)
@@ -32,9 +32,6 @@
 
 #include <nvif/class.h>
 
-/*TODO: allocate? */
-#define GA100_FIFO_NONSTALL_VECTOR 0
-
 static u32
 ga100_chan_doorbell_handle(struct nvkm_chan *chan)
 {
@@ -83,7 +80,7 @@ ga100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
        nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
        nvkm_wo32(chan->inst, 0x0e8, chan->id);
        nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
-       nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | GA100_FIFO_NONSTALL_VECTOR);
+       nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | chan->cgrp->runl->nonstall.vector);
        nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
        nvkm_done(chan->inst);
        return 0;
@@ -148,8 +145,20 @@ ga100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
        return -ENODEV;
 }
 
+static int
+ga100_engn_nonstall(struct nvkm_engn *engn)
+{
+       struct nvkm_engine *engine = engn->engine;
+
+       if (WARN_ON(!engine->func->nonstall))
+               return -EINVAL;
+
+       return engine->func->nonstall(engine);
+}
+
 const struct nvkm_engn_func
 ga100_engn = {
+       .nonstall = ga100_engn_nonstall,
        .cxid = ga100_engn_cxid,
        .ctor = gk104_ectx_ctor,
        .bind = gv100_ectx_bind,
@@ -157,6 +166,7 @@ ga100_engn = {
 
 const struct nvkm_engn_func
 ga100_engn_ce = {
+       .nonstall = ga100_engn_nonstall,
        .cxid = ga100_engn_cxid,
        .ctor = gv100_ectx_ce_ctor,
        .bind = gv100_ectx_ce_bind,
@@ -429,7 +439,9 @@ static int
 ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prunl)
 {
        struct nvkm_device *device = fifo->engine.subdev.device;
+       struct nvkm_top_device *tdev;
        struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
        u32 chcfg  = nvkm_rd32(device, addr + 0x004);
        u32 chnum  = 1 << (chcfg & 0x0000000f);
        u32 chaddr = (chcfg & 0xfffffff0);
@@ -437,26 +449,55 @@ ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prun
        u32 vector = nvkm_rd32(device, addr + 0x160);
        int i, ret;
 
-       runl = *prunl = nvkm_runl_new(fifo, id, addr, chnum);
+       runl = nvkm_runl_new(fifo, id, addr, chnum);
        if (IS_ERR(runl))
                return PTR_ERR(runl);
 
+       *prunl = runl;
+
        for (i = 0; i < 2; i++) {
                u32 pbcfg = nvkm_rd32(device, addr + 0x010 + (i * 0x04));
                if (pbcfg & 0x80000000) {
                        runl->runq[runl->runq_nr] =
                                nvkm_runq_new(fifo, ((pbcfg & 0x03fffc00) - 0x040000) / 0x800);
-                       if (!runl->runq[runl->runq_nr])
+                       if (!runl->runq[runl->runq_nr]) {
+                               RUNL_ERROR(runl, "runq %d", runl->runq_nr);
                                return -ENOMEM;
+                       }
 
                        runl->runq_nr++;
                }
        }
 
+       nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist == runl->addr) {
+               if (tdev->engine < 0) {
+                       RUNL_DEBUG(runl, "engn !top");
+                       return -EINVAL;
+               }
+
+               engn = nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
+                                    fifo->func->engn_ce : fifo->func->engn,
+                                    tdev->type, tdev->inst);
+               if (!engn)
+                       return -EINVAL;
+
+               if (!engn->engine->func->nonstall) {
+                       RUNL_DEBUG(runl, "engn %s !nonstall", engn->engine->subdev.name);
+                       return -EINVAL;
+               }
+       }
+
+       if (list_empty(&runl->engns)) {
+               RUNL_DEBUG(runl, "!engns");
+               return -EINVAL;
+       }
+
        ret = nvkm_inth_add(&device->vfn->intr, vector & 0x00000fff, NVKM_INTR_PRIO_NORMAL,
                            &fifo->engine.subdev, ga100_runl_intr, &runl->inth);
-       if (ret)
+       if (ret) {
+               RUNL_ERROR(runl, "inth %d", ret);
                return ret;
+       }
 
        runl->chan = chaddr;
        runl->doorbell = dbcfg >> 16;
@@ -466,9 +507,9 @@ ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prun
 static irqreturn_t
 ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
 {
-       struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), nonstall.intr);
+       struct nvkm_runl *runl = container_of(inth, typeof(*runl), nonstall.inth);
 
-       nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+       nvkm_event_ntfy(&runl->fifo->nonstall.event, runl->id, NVKM_FIFO_NONSTALL_EVENT);
        return IRQ_HANDLED;
 }
 
@@ -476,16 +517,18 @@ static void
 ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
 {
        struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+       struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
 
-       nvkm_inth_block(&fifo->nonstall.intr);
+       nvkm_inth_block(&runl->nonstall.inth);
 }
 
 static void
 ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
 {
        struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+       struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
 
-       nvkm_inth_allow(&fifo->nonstall.intr);
+       nvkm_inth_allow(&runl->nonstall.inth);
 }
 
 const struct nvkm_event_func
@@ -497,9 +540,29 @@ ga100_fifo_nonstall = {
 int
 ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
 {
-       return nvkm_inth_add(&fifo->engine.subdev.device->vfn->intr, GA100_FIFO_NONSTALL_VECTOR,
-                            NVKM_INTR_PRIO_NORMAL, &fifo->engine.subdev, ga100_fifo_nonstall_intr,
-                            &fifo->nonstall.intr);
+       struct nvkm_subdev *subdev = &fifo->engine.subdev;
+       struct nvkm_vfn *vfn = subdev->device->vfn;
+       struct nvkm_runl *runl;
+       int ret, nr = 0;
+
+       nvkm_runl_foreach(runl, fifo) {
+               struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
+
+               runl->nonstall.vector = engn->func->nonstall(engn);
+               if (runl->nonstall.vector < 0) {
+                       RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
+                       return runl->nonstall.vector;
+               }
+
+               ret = nvkm_inth_add(&vfn->intr, runl->nonstall.vector, NVKM_INTR_PRIO_NORMAL,
+                                   subdev, ga100_fifo_nonstall_intr, &runl->nonstall.inth);
+               if (ret)
+                       return ret;
+
+               nr = max(nr, runl->id + 1);
+       }
+
+       return nr;
 }
 
 int
@@ -514,15 +577,13 @@ ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
                runl = nvkm_runl_get(fifo, -1, tdev->runlist);
                if (!runl) {
                        ret = ga100_runl_new(fifo, id++, tdev->runlist, &runl);
-                       if (ret)
-                               return ret;
-               }
-
-               if (tdev->engine < 0)
-                       continue;
+                       if (ret) {
+                               if (runl)
+                                       nvkm_runl_del(runl);
 
-               nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
-                             fifo->func->engn_ce : fifo->func->engn, tdev->type, tdev->inst);
+                               continue;
+                       }
+               }
        }
 
        return 0;
index 93d628d7d508b66824d6d75190b7c1349cbcd6c0..454a481a0aef3edb5a5b4656683abde466eb77be 100644 (file)
@@ -399,7 +399,7 @@ nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
        int ret;
 
        if (!(runl = kzalloc(sizeof(*runl), GFP_KERNEL)))
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        runl->func = fifo->func->runl;
        runl->fifo = fifo;
@@ -419,7 +419,7 @@ nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
                    (ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->chid))) {
                        RUNL_ERROR(runl, "cgid/chid: %d", ret);
                        nvkm_runl_del(runl);
-                       return NULL;
+                       return ERR_PTR(ret);
                }
        } else {
                runl->cgid = nvkm_chid_ref(fifo->cgid);
index c93d21bb7bd5c6293b6565d50ad90dc7f02f5a1a..5421321f8e85f3a7e94cae5a76b869ad0d33641c 100644 (file)
@@ -11,6 +11,7 @@ enum nvkm_subdev_type;
 
 struct nvkm_engn {
        const struct nvkm_engn_func {
+               int (*nonstall)(struct nvkm_engn *);
                bool (*chsw)(struct nvkm_engn *);
                int (*cxid)(struct nvkm_engn *, bool *cgid);
                void (*mmu_fault_trigger)(struct nvkm_engn *);
@@ -69,6 +70,11 @@ struct nvkm_runl {
 
        struct nvkm_inth inth;
 
+       struct {
+               int vector;
+               struct nvkm_inth inth;
+       } nonstall;
+
        struct list_head cgrps;
        int cgrp_nr;
        int chan_nr;
index 1dac95ae7b43eace112014e9b0800ca24619cc23..04140e0110beb0f5c6a85ca6ecb7c1b891f00476 100644 (file)
@@ -52,7 +52,7 @@ nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
 
        switch (args->v0.type) {
        case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
-               return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, 0,
+               return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, runl->id,
                                       NVKM_FIFO_NONSTALL_EVENT, NULL);
        case NVIF_CHAN_EVENT_V0_KILLED:
                return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
index 71b824e6da9d695577263d5365cc2aff0aae8278..0096ad401b15321d2b535107cc09ab2f542b9f0e 100644 (file)
@@ -109,8 +109,7 @@ nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
 }
 
 static int
-nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
-                  const struct nvkm_oclass *oclass,
+nvkm_gr_cclass_new(struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
                   struct nvkm_object **pobject)
 {
        struct nvkm_gr *gr = nvkm_gr(oclass->engine);
@@ -126,6 +125,17 @@ nvkm_gr_intr(struct nvkm_engine *engine)
        gr->func->intr(gr);
 }
 
+static int
+nvkm_gr_nonstall(struct nvkm_engine *engine)
+{
+       struct nvkm_gr *gr = nvkm_gr(engine);
+
+       if (gr->func->nonstall)
+               return gr->func->nonstall(gr);
+
+       return -EINVAL;
+}
+
 static int
 nvkm_gr_oneinit(struct nvkm_engine *engine)
 {
@@ -178,6 +188,7 @@ nvkm_gr = {
        .init = nvkm_gr_init,
        .fini = nvkm_gr_fini,
        .reset = nvkm_gr_reset,
+       .nonstall = nvkm_gr_nonstall,
        .intr = nvkm_gr_intr,
        .tile = nvkm_gr_tile,
        .chsw_load = nvkm_gr_chsw_load,
index a5b5ac2755a2a1207cbfa3a2197fde08d200c511..00cd70abad67e2cb601916035666f1934f3090db 100644 (file)
@@ -137,8 +137,15 @@ ga102_gr_oneinit_intr(struct gf100_gr *gr, enum nvkm_intr_type *pvector)
        return &device->vfn->intr;
 }
 
+static int
+ga102_gr_nonstall(struct gf100_gr *gr)
+{
+       return nvkm_rd32(gr->base.engine.subdev.device, 0x400160) & 0x00000fff;
+}
+
 static const struct gf100_gr_func
 ga102_gr = {
+       .nonstall = ga102_gr_nonstall,
        .oneinit_intr = ga102_gr_oneinit_intr,
        .oneinit_tiles = gm200_gr_oneinit_tiles,
        .oneinit_sm_id = gv100_gr_oneinit_sm_id,
index 5f20079c3660f7b2918759c6dea300db248fa2e9..3648868bb9fc5138d6f94fc281ff49168808028e 100644 (file)
@@ -374,7 +374,7 @@ gf100_gr_chan = {
 };
 
 static int
-gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                  const struct nvkm_oclass *oclass,
                  struct nvkm_object **pobject)
 {
@@ -2494,12 +2494,24 @@ gf100_gr_gpccs_ucode = {
        .data.size = sizeof(gf100_grgpc_data),
 };
 
+static int
+gf100_gr_nonstall(struct nvkm_gr *base)
+{
+       struct gf100_gr *gr = gf100_gr(base);
+
+       if (gr->func->nonstall)
+               return gr->func->nonstall(gr);
+
+       return -EINVAL;
+}
+
 static const struct nvkm_gr_func
 gf100_gr_ = {
        .dtor = gf100_gr_dtor,
        .oneinit = gf100_gr_oneinit,
        .init = gf100_gr_init_,
        .fini = gf100_gr_fini,
+       .nonstall = gf100_gr_nonstall,
        .reset = gf100_gr_reset,
        .units = gf100_gr_units,
        .chan_new = gf100_gr_chan_new,
index 94ca7ac16acfdfc4084c360cf04639833648e58f..54f686ba39ac2fff3cdbed960539c053e8ac9154 100644 (file)
@@ -147,6 +147,7 @@ struct gf100_gr_func_zbc {
 };
 
 struct gf100_gr_func {
+       int (*nonstall)(struct gf100_gr *);
        struct nvkm_intr *(*oneinit_intr)(struct gf100_gr *, enum nvkm_intr_type *);
        void (*oneinit_tiles)(struct gf100_gr *);
        int (*oneinit_sm_id)(struct gf100_gr *);
index 81bd682c21021129ebcc3015a4daca5b926f3a1c..ca822f07b63e9ef41276b68fd1f95c66e38acfc6 100644 (file)
@@ -1181,7 +1181,7 @@ nv04_gr_chan = {
 };
 
 static int
-nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv04_gr *gr = nv04_gr(base);
index 7fe6e58f6bab6beecc00784d86e0617e9164d1d0..92ef7c9b29101cc726e0a6b4dee5b71019f0d778 100644 (file)
@@ -999,7 +999,7 @@ nv10_gr_chan = {
        } while (0)
 
 int
-nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv10_gr *gr = nv10_gr(base);
index 5cfe927c9123ea64999af16df0a839f37d74315b..b86090c080604b8f6e9d4fef4386fec1137f1c49 100644 (file)
@@ -9,6 +9,6 @@ int nv10_gr_init(struct nvkm_gr *);
 void nv10_gr_intr(struct nvkm_gr *);
 void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
 
-int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
                     const struct nvkm_oclass *, struct nvkm_object **);
 #endif
index 75434f5de7addefed4a653b50f44647f907d2c2a..02a8c62a0a3285a0649119b47e8a094e0a14e749 100644 (file)
@@ -72,7 +72,7 @@ nv20_gr_chan = {
 };
 
 static int
-nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index 94685e4d4f87ca3251300a412651671b51bddf4a..d6bc6904dcc8d0a65c5271334696bfc5dcdb5852 100644 (file)
@@ -18,7 +18,7 @@ nv25_gr_chan = {
 };
 
 static int
-nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index 2d6273675291a134b52295ab6ea7836e8f205613..e5a351b51eb97419a64d86dca898e051cfe516fd 100644 (file)
@@ -18,7 +18,7 @@ nv2a_gr_chan = {
 };
 
 static int
-nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index 647bd6fede0404391655c7c40ed45a53d4f9522f..80370323755ef4b0aefe4c903049e7d750d5736a 100644 (file)
@@ -19,7 +19,7 @@ nv30_gr_chan = {
 };
 
 static int
-nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index 2eae3fe4ef4ebdf79408763727ff1d7b900d2242..cdf043bbdd59da35b37337720717116074009462 100644 (file)
@@ -18,7 +18,7 @@ nv34_gr_chan = {
 };
 
 static int
-nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index 657d7cdba369aed39d0182c0ff4e6dd679dd3611..fa5a6ccb871deb2c1efe2375bbf983a07d688cc7 100644 (file)
@@ -18,7 +18,7 @@ nv35_gr_chan = {
 };
 
 static int
-nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv20_gr *gr = nv20_gr(base);
index d2df097a6cf63643e0d879ef6e5270b18eb20910..a5e1f02791b44e47776612873f530630465d0ba0 100644 (file)
@@ -145,7 +145,7 @@ nv40_gr_chan = {
 };
 
 int
-nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv40_gr *gr = nv40_gr(base);
index f3d3d3a5ae5b2a5eaab52c4e8bbc008e76d58fef..84fbc99139e55bc1ba24c6aac4e621cb3b94870f 100644 (file)
@@ -22,12 +22,12 @@ u64 nv40_gr_units(struct nvkm_gr *);
 struct nv40_gr_chan {
        struct nvkm_object object;
        struct nv40_gr *gr;
-       struct nvkm_fifo_chan *fifo;
+       struct nvkm_chan *fifo;
        u32 inst;
        struct list_head head;
 };
 
-int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
                     const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nvkm_object_func nv40_gr_object;
index 1ba18a8e380f62d3f26654c3956f8678c1039445..c8a0288c092d59653655fed04132ad7ebc3be1cc 100644 (file)
@@ -86,7 +86,7 @@ nv50_gr_chan = {
 };
 
 int
-nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv50_gr *gr = nv50_gr(base);
index 84388c42e5c6882d4e41e0f79f8400258ae6e4d8..97ead004235710b4fec70b05f82e8998602e9ca4 100644 (file)
@@ -27,7 +27,7 @@ struct nv50_gr_chan {
        struct nv50_gr *gr;
 };
 
-int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
                     const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nvkm_object_func nv50_gr_object;
index 08d5c96e6458354472763ebeb4a17929f9b942f1..0884abc73a9d793b34612086e70130cec0668fba 100644 (file)
@@ -5,7 +5,7 @@
 #include <engine/gr.h>
 #include <core/enum.h>
 struct nvkm_fb_tile;
-struct nvkm_fifo_chan;
+struct nvkm_chan;
 
 int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                 bool enable, struct nvkm_gr *);
@@ -18,10 +18,11 @@ struct nvkm_gr_func {
        int (*init)(struct nvkm_gr *);
        int (*fini)(struct nvkm_gr *, bool);
        int (*reset)(struct nvkm_gr *);
+       int (*nonstall)(struct nvkm_gr *);
        void (*intr)(struct nvkm_gr *);
        void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
        int (*tlb_flush)(struct nvkm_gr *);
-       int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+       int (*chan_new)(struct nvkm_gr *, struct nvkm_chan *,
                        const struct nvkm_oclass *, struct nvkm_object **);
        int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
        /* Returns chipset-specific counts of units packed into an u64.
index cb0c3991b2ad974ca78629e2e6850847903bc6ec..db9fc1ecae0d273124289f112857cded0fc9ae52 100644 (file)
@@ -81,8 +81,7 @@ nv31_mpeg_chan = {
 };
 
 int
-nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
-                  const struct nvkm_oclass *oclass,
+nv31_mpeg_chan_new(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
                   struct nvkm_object **pobject)
 {
        struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
index 9f30aaaf809efdec566532445d37f50d0fea2e09..251d659565dedc1f59d4fca0f3e70ea6ac05df42 100644 (file)
@@ -24,9 +24,9 @@ struct nv31_mpeg_func {
 struct nv31_mpeg_chan {
        struct nvkm_object object;
        struct nv31_mpeg *mpeg;
-       struct nvkm_fifo_chan *fifo;
+       struct nvkm_chan *fifo;
 };
 
-int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+int nv31_mpeg_chan_new(struct nvkm_chan *, const struct nvkm_oclass *,
                       struct nvkm_object **);
 #endif
index 0890a279458ed6d1e3d592b824c20ac4ea2c7d15..4b1374adbda3a1dc61a1b3514fda6a38ec361d22 100644 (file)
@@ -43,7 +43,7 @@ struct nv44_mpeg {
 struct nv44_mpeg_chan {
        struct nvkm_object object;
        struct nv44_mpeg *mpeg;
-       struct nvkm_fifo_chan *fifo;
+       struct nvkm_chan *fifo;
        struct list_head head;
        u32 inst;
 };
@@ -100,8 +100,7 @@ nv44_mpeg_chan = {
 };
 
 static int
-nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
-                  const struct nvkm_oclass *oclass,
+nv44_mpeg_chan_new(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
                   struct nvkm_object **pobject)
 {
        struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
index 667a2d05dd893a3f0d8006595e4bd7f9bbdeb567..044ff4133874575049522c36180544d4ae857182 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __NVKM_MPEG_PRIV_H__
 #define __NVKM_MPEG_PRIV_H__
 #include <engine/mpeg.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
 
 int nv31_mpeg_init(struct nvkm_engine *);
 void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
index a9d464db69749402662707519bd804592f9ea4c7..20220d6d4a13e8d449bfe6c49a4679332fbd3614 100644 (file)
@@ -74,8 +74,7 @@ nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
 }
 
 static int
-nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
-                  const struct nvkm_oclass *oclass,
+nvkm_sw_cclass_get(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
                   struct nvkm_object **pobject)
 {
        struct nvkm_sw *sw = nvkm_sw(oclass->engine);
index 834b8cbed51d932e6aefe2ff563499e6742e7fc5..2bf45141de609e499d236e222705b8c947c57849 100644 (file)
@@ -74,7 +74,7 @@ nvkm_sw_chan = {
 
 int
 nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
-                 struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+                 struct nvkm_chan *fifo, const struct nvkm_oclass *oclass,
                  struct nvkm_sw_chan *chan)
 {
        unsigned long flags;
index 67b2e5ea93d92210200a84cb8ee17b3ebd174add..c313aea16a17c2d76ae31efcbba6f4611edc4129 100644 (file)
@@ -11,7 +11,7 @@ struct nvkm_sw_chan {
        const struct nvkm_sw_chan_func *func;
        struct nvkm_object object;
        struct nvkm_sw *sw;
-       struct nvkm_fifo_chan *fifo;
+       struct nvkm_chan *fifo;
        struct list_head head;
 
 #define NVKM_SW_CHAN_EVENT_PAGE_FLIP BIT(0)
@@ -24,7 +24,7 @@ struct nvkm_sw_chan_func {
 };
 
 int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
-                     struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+                     struct nvkm_chan *, const struct nvkm_oclass *,
                      struct nvkm_sw_chan *);
 bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
 #endif
index c3cf6f2ff86c329df392c8157846757eb0ae4809..a0273baf4c677a0905c48b20d718a074814ed613 100644 (file)
@@ -102,7 +102,7 @@ gf100_sw_chan = {
 };
 
 static int
-gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifoch,
                  const struct nvkm_oclass *oclass,
                  struct nvkm_object **pobject)
 {
index 4aa57573869c95b5152b07a82412594f65931123..8a1d112da8943070a5bbb9718cbb630c30412a6c 100644 (file)
@@ -106,7 +106,7 @@ nv04_sw_chan = {
 };
 
 static int
-nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifo,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nv04_sw_chan *chan;
index e79e640ae535feb0606db25b75704dcb49a496eb..742c75859569258dfe185ad5b352bbc540fd5c75 100644 (file)
@@ -36,7 +36,7 @@ nv10_sw_chan = {
 };
 
 static int
-nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifo,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nvkm_sw_chan *chan;
index 9d7a9b7d5be3acd97a66ac79fd2c7072eba8f1d4..99476d32c5af58b4847f771712c55adbe60dd226 100644 (file)
@@ -99,7 +99,7 @@ nv50_sw_chan = {
 };
 
 static int
-nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifoch,
                 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
        struct nvkm_disp *disp = sw->engine.subdev.device->disp;
index d9d83b1b884955e1916b3a479456f037b7540893..8015afaba947017033dd1db1d65afc964a488ef3 100644 (file)
@@ -15,7 +15,7 @@ struct nvkm_sw_chan_sclass {
 };
 
 struct nvkm_sw_func {
-       int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+       int (*chan_new)(struct nvkm_sw *, struct nvkm_chan *,
                        const struct nvkm_oclass *, struct nvkm_object **);
        const struct nvkm_sw_chan_sclass sclass[];
 };
index 795f3a649b122172013a50f879419d6dcdf6d1a5..9b8ca4e898f903661eb588843698ca11e0d7c200 100644 (file)
@@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
        u64 falcons;
        int ret, i;
 
-       if (list_empty(&acr->hsfw)) {
+       if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
                nvkm_debug(subdev, "No HSFW(s)\n");
                nvkm_acr_cleanup(acr);
                return 0;
index 6ba5120a2ebe1d8c6f7804f4cd17634c3deba887..394c305e759ad8f008e2e0fa784a03f9aa9851c6 100644 (file)
@@ -55,7 +55,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
 nvkm-y += nvkm/subdev/fb/ramgm107.o
 nvkm-y += nvkm/subdev/fb/ramgm200.o
 nvkm-y += nvkm/subdev/fb/ramgp100.o
-nvkm-y += nvkm/subdev/fb/ramga102.o
+nvkm-y += nvkm/subdev/fb/ramgp102.o
 nvkm-y += nvkm/subdev/fb/sddr2.o
 nvkm-y += nvkm/subdev/fb/sddr3.o
 nvkm-y += nvkm/subdev/fb/gddr3.o
index 0955340cc421898625fa06d33dfb3a34658d17a1..8a286a9349ac62485ef4c486cc5a8ded917686ea 100644 (file)
@@ -174,6 +174,18 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
        return 0;
 }
 
+u64
+nvkm_fb_vidmem_size(struct nvkm_device *device)
+{
+       struct nvkm_fb *fb = device->fb;
+
+       if (fb && fb->func->vidmem.size)
+               return fb->func->vidmem.size(fb);
+
+       WARN_ON(1);
+       return 0;
+}
+
 static int
 nvkm_fb_init(struct nvkm_subdev *subdev)
 {
index a7456e7864636978b0fad6da210ce1e4a201a1ce..12037fd4fdf27fd708c345700b7a297448fae4e9 100644 (file)
@@ -30,7 +30,8 @@ ga100_fb = {
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
-       .ram_new = gp100_ram_new,
+       .vidmem.size = gp102_fb_vidmem_size,
+       .ram_new = gp102_ram_new,
        .default_bigpage = 16,
 };
 
index dd476e079fe1cbf0c0abb876585de69d60c9f36a..76f6877b54c6f239cca61ebaef114d1b882c7e64 100644 (file)
 
 #include <engine/nvdec.h>
 
+static u64
+ga102_fb_vidmem_size(struct nvkm_fb *fb)
+{
+       return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
+}
+
 static int
 ga102_fb_oneinit(struct nvkm_fb *fb)
 {
@@ -43,7 +49,8 @@ ga102_fb = {
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
-       .ram_new = ga102_ram_new,
+       .vidmem.size = ga102_fb_vidmem_size,
+       .ram_new = gp102_ram_new,
        .default_bigpage = 16,
        .vpr.scrub_required = tu102_fb_vpr_scrub_required,
        .vpr.scrub = gp102_fb_vpr_scrub,
index 14d942e8b857f43566fb7359d52464a092f69480..534553c6480547cd4ffdc7578c59838741b1b588 100644 (file)
@@ -40,6 +40,20 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
        return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
 }
 
+u64
+gp102_fb_vidmem_size(struct nvkm_fb *fb)
+{
+       const u32 data = nvkm_rd32(fb->subdev.device, 0x100ce0);
+       const u32 lmag = (data & 0x000003f0) >> 4;
+       const u32 lsca = (data & 0x0000000f);
+       const u64 size = (u64)lmag << (lsca + 20);
+
+       if (data & 0x40000000)
+               return size / 16 * 15;
+
+       return size;
+}
+
 int
 gp102_fb_oneinit(struct nvkm_fb *fb)
 {
@@ -59,9 +73,10 @@ gp102_fb = {
        .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
        .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+       .vidmem.size = gp102_fb_vidmem_size,
        .vpr.scrub_required = gp102_fb_vpr_scrub_required,
        .vpr.scrub = gp102_fb_vpr_scrub,
-       .ram_new = gp100_ram_new,
+       .ram_new = gp102_ram_new,
 };
 
 int
index 4d8a286a7a348ce2e918720716a12143b152da66..f422564bee5b0afb1e81aaf7d085842324dab4f2 100644 (file)
@@ -36,9 +36,10 @@ gv100_fb = {
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+       .vidmem.size = gp102_fb_vidmem_size,
        .vpr.scrub_required = gp102_fb_vpr_scrub_required,
        .vpr.scrub = gp102_fb_vpr_scrub,
-       .ram_new = gp100_ram_new,
+       .ram_new = gp102_ram_new,
        .default_bigpage = 16,
 };
 
index 726c30c8bf95d3fa27864d06fc26704b25242e49..77d6a8c108298c78f4c6082ca4c2a2c65fa247b4 100644 (file)
@@ -20,6 +20,10 @@ struct nvkm_fb_func {
                void (*flush_page_init)(struct nvkm_fb *);
        } sysmem;
 
+       struct nvkm_fb_func_vidmem {
+               u64 (*size)(struct nvkm_fb *);
+       } vidmem;
+
        struct {
                bool (*scrub_required)(struct nvkm_fb *);
                int (*scrub)(struct nvkm_fb *);
@@ -84,6 +88,7 @@ void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
 
 int gp102_fb_oneinit(struct nvkm_fb *);
+u64 gp102_fb_vidmem_size(struct nvkm_fb *);
 bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
 int gp102_fb_vpr_scrub(struct nvkm_fb *);
 
index ea7d66f3dd825dba1f4eb6162e9a66e163d1677a..50f0c1914f58e8352f8db2aca1ddf9fae0a48e27 100644 (file)
@@ -70,5 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
-int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
deleted file mode 100644 (file)
index 298c136..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2021 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "ram.h"
-
-#include <subdev/bios.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/rammap.h>
-
-static const struct nvkm_ram_func
-ga102_ram = {
-};
-
-int
-ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
-{
-       struct nvkm_device *device = fb->subdev.device;
-       enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
-       u32 size = nvkm_rd32(device, 0x1183a4);
-
-       return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
new file mode 100644 (file)
index 0000000..8550f5e
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+#include "ram.h"
+
+#include <subdev/bios.h>
+
+static const struct nvkm_ram_func
+gp102_ram = {
+};
+
+int
+gp102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       enum nvkm_ram_type type = nvkm_fb_bios_memtype(fb->subdev.device->bios);
+       const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+       u64 size = fb->func->vidmem.size(fb);
+       int ret;
+
+       ret = nvkm_ram_new_(&gp102_ram, fb, type, size, pram);
+       if (ret)
+               return ret;
+
+       nvkm_mm_fini(&(*pram)->vram);
+
+       return nvkm_mm_init(&(*pram)->vram, NVKM_RAM_MM_NORMAL,
+                           rsvd_head >> NVKM_RAM_MM_SHIFT,
+                           (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+                           1);
+
+}
index b8803c124c3b2307df5214ca3463d9c0d683fbb8..bcc23d4c8115d1d6b44c0dc1776ce5bdaeb3bf6f 100644 (file)
@@ -36,9 +36,10 @@ tu102_fb = {
        .init_page = gv100_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+       .vidmem.size = gp102_fb_vidmem_size,
        .vpr.scrub_required = tu102_fb_vpr_scrub_required,
        .vpr.scrub = gp102_fb_vpr_scrub,
-       .ram_new = gp100_ram_new,
+       .ram_new = gp102_ram_new,
        .default_bigpage = 16,
 };
 
index 524cd3c0e3fec095c86bc75ad5f3212897152330..8e459d88ff8f89426e163e43069a1a0698b9c44f 100644 (file)
@@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
+       if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+               return -EINVAL;
+
        if (size) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
        }
 
        return ret;
@@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
+       if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+               return -EINVAL;
+
        if (size) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
        }
 
        return ret;
@@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       mutex_lock(&vmm->mutex);
+       if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
+               return -EINVAL;
+
+       mutex_lock(&vmm->mutex.vmm);
        vma = nvkm_vmm_node_search(vmm, addr);
        if (ret = -ENOENT, !vma || vma->addr != addr) {
                VMM_DEBUG(vmm, "lookup %016llx: %016llx",
@@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        nvkm_vmm_unmap_locked(vmm, vma, false);
        ret = 0;
 done:
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
        return ret;
 }
 
@@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
+       if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+               return -EINVAL;
+
        memory = nvkm_umem_search(client, handle);
        if (IS_ERR(memory)) {
                VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
                return PTR_ERR(memory);
        }
 
-       mutex_lock(&vmm->mutex);
+       mutex_lock(&vmm->mutex.vmm);
        if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
                VMM_DEBUG(vmm, "lookup %016llx", addr);
                goto fail;
@@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                }
        }
        vma->busy = true;
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
 
        ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
        if (ret == 0) {
@@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                return 0;
        }
 
-       mutex_lock(&vmm->mutex);
+       mutex_lock(&vmm->mutex.vmm);
        vma->busy = false;
        nvkm_vmm_unmap_region(vmm, vma);
 fail:
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
        nvkm_memory_unref(&memory);
        return ret;
 }
@@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       mutex_lock(&vmm->mutex);
+       mutex_lock(&vmm->mutex.vmm);
        vma = nvkm_vmm_node_search(vmm, args->v0.addr);
        if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
                VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
@@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        nvkm_vmm_put_locked(vmm, vma);
        ret = 0;
 done:
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
        return ret;
 }
 
@@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       mutex_lock(&vmm->mutex);
+       mutex_lock(&vmm->mutex.vmm);
        ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
                                  page, align, size, &vma);
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
        if (ret)
                return ret;
 
@@ -314,6 +326,168 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        return 0;
 }
 
+static inline int
+nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
+{
+       struct nvkm_vmm *vmm = uvmm->vmm;
+       const struct nvkm_vmm_page *page;
+
+       if (likely(shift)) {
+               for (page = vmm->func->page; page->shift; page++) {
+                       if (shift == page->shift)
+                               break;
+               }
+
+               if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+                       VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+                       return -EINVAL;
+               }
+       } else {
+               return -EINVAL;
+       }
+       *refd = page - vmm->func->page;
+
+       return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+       struct nvkm_vmm *vmm = uvmm->vmm;
+       u8 refd;
+       int ret;
+
+       if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+               return -EINVAL;
+
+       ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+       if (ret)
+               return ret;
+
+       return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
+}
+
+static int
+nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+       struct nvkm_vmm *vmm = uvmm->vmm;
+       u8 refd;
+       int ret;
+
+       if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+               return -EINVAL;
+
+       ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+       if (ret)
+               return ret;
+
+       nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
+
+       return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+       struct nvkm_client *client = uvmm->object.client;
+       struct nvkm_vmm *vmm = uvmm->vmm;
+       struct nvkm_vma vma = {
+               .addr = args->addr,
+               .size = args->size,
+               .used = true,
+               .mapref = false,
+               .no_comp = true,
+       };
+       struct nvkm_memory *memory;
+       void *argv = (void *)(uintptr_t)args->argv;
+       unsigned int argc = args->argc;
+       u64 handle = args->memory;
+       u8 refd;
+       int ret;
+
+       if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+               return -EINVAL;
+
+       ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+       if (ret)
+               return ret;
+
+       vma.page = vma.refd = refd;
+
+       memory = nvkm_umem_search(client, args->memory);
+       if (IS_ERR(memory)) {
+               VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+               return PTR_ERR(memory);
+       }
+
+       ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
+
+       nvkm_memory_unref(&vma.memory);
+       nvkm_memory_unref(&memory);
+       return ret;
+}
+
+static int
+nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+       struct nvkm_vmm *vmm = uvmm->vmm;
+       u8 refd;
+       int ret;
+
+       if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+               return -EINVAL;
+
+       ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+       if (ret)
+               return ret;
+
+       nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
+                          args->sparse, refd);
+
+       return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+       struct nvkm_vmm *vmm = uvmm->vmm;
+
+       if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+               return -EINVAL;
+
+       return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
+}
+
+static int
+nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+       union {
+               struct nvif_vmm_raw_v0 v0;
+       } *args = argv;
+       int ret = -ENOSYS;
+
+       if (!uvmm->vmm->managed.raw)
+               return -EINVAL;
+
+       if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
+               return ret;
+
+       switch (args->v0.op) {
+       case NVIF_VMM_RAW_V0_GET:
+               return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
+       case NVIF_VMM_RAW_V0_PUT:
+               return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
+       case NVIF_VMM_RAW_V0_MAP:
+               return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
+       case NVIF_VMM_RAW_V0_UNMAP:
+               return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
+       case NVIF_VMM_RAW_V0_SPARSE:
+               return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
+       default:
+               return -EINVAL;
+       };
+}
+
 static int
 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
 {
@@ -326,6 +500,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
        case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
        case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
        case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+       case NVIF_VMM_V0_RAW   : return nvkm_uvmm_mthd_raw   (uvmm, argv, argc);
        case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
                if (uvmm->vmm->func->mthd) {
                        return uvmm->vmm->func->mthd(uvmm->vmm,
@@ -366,10 +541,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        struct nvkm_uvmm *uvmm;
        int ret = -ENOSYS;
        u64 addr, size;
-       bool managed;
+       bool managed, raw;
 
        if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
-               managed = args->v0.managed != 0;
+               managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
+               raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
                addr = args->v0.addr;
                size = args->v0.size;
        } else
@@ -377,12 +553,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 
        if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
                return -ENOMEM;
+
        nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
        *pobject = &uvmm->object;
 
        if (!mmu->vmm) {
-               ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
-                                         NULL, "user", &uvmm->vmm);
+               ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
+                                         argv, argc, NULL, "user", &uvmm->vmm);
                if (ret)
                        return ret;
 
@@ -393,6 +570,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 
                uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
        }
+       uvmm->vmm->managed.raw = raw;
 
        page = uvmm->vmm->func->page;
        args->v0.page_nr = 0;
index ae793f400ba1e766904fcebe27637eb565a2101a..eb5fcadcb39aa66caaf27308eb36de602a483019 100644 (file)
@@ -676,41 +676,18 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
        return 0;
 }
 
-static void
-nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-                       u64 addr, u64 size, bool sparse, bool pfn)
-{
-       const struct nvkm_vmm_desc_func *func = page->desc->func;
-       nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
-                     false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
-                     sparse ? func->sparse : func->invalid ? func->invalid :
-                                                             func->unmap);
-}
-
-static int
-nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-                     u64 addr, u64 size, struct nvkm_vmm_map *map,
-                     nvkm_vmm_pte_func func)
-{
-       u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
-                                false, nvkm_vmm_ref_ptes, func, map, NULL);
-       if (fail != ~0ULL) {
-               if ((size = fail - addr))
-                       nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
-               return -ENOMEM;
-       }
-       return 0;
-}
-
 static void
 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                    u64 addr, u64 size, bool sparse, bool pfn)
 {
        const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+       mutex_lock(&vmm->mutex.map);
        nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
                      NULL, NULL, NULL,
                      sparse ? func->sparse : func->invalid ? func->invalid :
                                                              func->unmap);
+       mutex_unlock(&vmm->mutex.map);
 }
 
 static void
@@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size, struct nvkm_vmm_map *map,
                  nvkm_vmm_pte_func func)
 {
+       mutex_lock(&vmm->mutex.map);
        nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
                      NULL, func, map, NULL);
+       mutex_unlock(&vmm->mutex.map);
 }
 
 static void
-nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-                 u64 addr, u64 size)
+nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                        u64 addr, u64 size)
 {
        nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
                      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
 }
 
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                 u64 addr, u64 size)
+{
+       mutex_lock(&vmm->mutex.ref);
+       nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
+       mutex_unlock(&vmm->mutex.ref);
+}
+
 static int
 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size)
 {
-       u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
-                                nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+       u64 fail;
+
+       mutex_lock(&vmm->mutex.ref);
+       fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
+                            nvkm_vmm_ref_ptes, NULL, NULL, NULL);
        if (fail != ~0ULL) {
                if (fail != addr)
-                       nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+                       nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
+               mutex_unlock(&vmm->mutex.ref);
+               return -ENOMEM;
+       }
+       mutex_unlock(&vmm->mutex.ref);
+       return 0;
+}
+
+static void
+__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                         u64 addr, u64 size, bool sparse, bool pfn)
+{
+       const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+       nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+                     false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
+                     sparse ? func->sparse : func->invalid ? func->invalid :
+                                                             func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                       u64 addr, u64 size, bool sparse, bool pfn)
+{
+       if (vmm->managed.raw) {
+               nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
+               nvkm_vmm_ptes_put(vmm, page, addr, size);
+       } else {
+               __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
+       }
+}
+
+static int
+__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                       u64 addr, u64 size, struct nvkm_vmm_map *map,
+                       nvkm_vmm_pte_func func)
+{
+       u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+                                false, nvkm_vmm_ref_ptes, func, map, NULL);
+       if (fail != ~0ULL) {
+               if ((size = fail - addr))
+                       nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
                return -ENOMEM;
        }
        return 0;
 }
 
-static inline struct nvkm_vma *
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+                     u64 addr, u64 size, struct nvkm_vmm_map *map,
+                     nvkm_vmm_pte_func func)
+{
+       int ret;
+
+       if (vmm->managed.raw) {
+               ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
+               if (ret)
+                       return ret;
+
+               nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
+
+               return 0;
+       } else {
+               return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
+       }
+}
+
+struct nvkm_vma *
 nvkm_vma_new(u64 addr, u64 size)
 {
        struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
@@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
        vmm->debug = mmu->subdev.debug;
        kref_init(&vmm->kref);
 
-       __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+       __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
+       mutex_init(&vmm->mutex.ref);
+       mutex_init(&vmm->mutex.map);
 
        /* Locate the smallest page size supported by the backend, it will
         * have the deepest nesting of page tables.
@@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
                if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
                        return ret;
 
+               vmm->managed.p.addr = 0;
+               vmm->managed.p.size = addr;
+
                /* NVKM-managed area. */
                if (size) {
                        if (!(vma = nvkm_vma_new(addr, size)))
@@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
                size = vmm->limit - addr;
                if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
                        return ret;
+
+               vmm->managed.n.addr = addr;
+               vmm->managed.n.size = size;
        } else {
                /* Address-space fully managed by NVKM, requiring calls to
                 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
@@ -1362,9 +1422,9 @@ void
 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
        if (vma->memory) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                nvkm_vmm_unmap_locked(vmm, vma, false);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
        }
 }
 
@@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
        nvkm_vmm_pte_func func;
        int ret;
 
+       map->no_comp = vma->no_comp;
+
        /* Make sure we won't overrun the end of the memory object. */
        if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
                VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
@@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
             struct nvkm_vmm_map *map)
 {
        int ret;
-       mutex_lock(&vmm->mutex);
+
+       if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
+           vmm->managed.raw)
+               return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+
+       mutex_lock(&vmm->mutex.vmm);
        ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
        vma->busy = false;
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
        return ret;
 }
 
@@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
 {
        struct nvkm_vma *vma = *pvma;
        if (vma) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                nvkm_vmm_put_locked(vmm, vma);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
                *pvma = NULL;
        }
 }
@@ -1769,9 +1836,49 @@ int
 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
 {
        int ret;
-       mutex_lock(&vmm->mutex);
+       mutex_lock(&vmm->mutex.vmm);
        ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
-       mutex_unlock(&vmm->mutex);
+       mutex_unlock(&vmm->mutex.vmm);
+       return ret;
+}
+
+void
+nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+                  bool sparse, u8 refd)
+{
+       const struct nvkm_vmm_page *page = &vmm->func->page[refd];
+
+       nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
+}
+
+void
+nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+       const struct nvkm_vmm_page *page = vmm->func->page;
+
+       nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+       const struct nvkm_vmm_page *page = vmm->func->page;
+
+       if (unlikely(!size))
+               return -EINVAL;
+
+       return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+       int ret;
+
+       mutex_lock(&vmm->mutex.ref);
+       ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
+       mutex_unlock(&vmm->mutex.ref);
+
        return ret;
 }
 
@@ -1779,9 +1886,9 @@ void
 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        if (inst && vmm && vmm->func->part) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                vmm->func->part(vmm, inst);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
        }
 }
 
@@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        int ret = 0;
        if (vmm->func->join) {
-               mutex_lock(&vmm->mutex);
+               mutex_lock(&vmm->mutex.vmm);
                ret = vmm->func->join(vmm, inst);
-               mutex_unlock(&vmm->mutex);
+               mutex_unlock(&vmm->mutex.vmm);
        }
        return ret;
 }
index f6188aa9171cdc6b0a8c3278e51a9a3e1a1dee4b..f9bc30cdb2b3e3361006301bc5207b4c14542b79 100644 (file)
@@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
                  u32 pd_header, bool managed, u64 addr, u64 size,
                  struct lock_class_key *, const char *name,
                  struct nvkm_vmm **);
+struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
 struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
 struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
                                     u64 addr, u64 size);
@@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
 void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
 void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
 
+int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+                       bool sparse, u8 refd);
+int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
+
+static inline bool
+nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
+{
+       u64 p_start = vmm->managed.p.addr;
+       u64 p_end = p_start + vmm->managed.p.size;
+       u64 n_start = vmm->managed.n.addr;
+       u64 n_end = n_start + vmm->managed.n.size;
+       u64 end = start + size;
+
+       if (start >= p_start && end <= p_end)
+               return true;
+
+       if (start >= n_start && end <= n_end)
+               return true;
+
+       return false;
+}
+
 #define NVKM_VMM_PFN_ADDR                                 0xfffffffffffff000ULL
 #define NVKM_VMM_PFN_ADDR_SHIFT                                              12
 #define NVKM_VMM_PFN_APER                                 0x00000000000000f0ULL
index 5438384d9a67445625fce05b77565704424495d5..5e857c02e9aab299fc699b82730e9f143107dd99 100644 (file)
@@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
                        return -EINVAL;
                }
 
-               ret = nvkm_memory_tags_get(memory, device, tags,
-                                          nvkm_ltc_tags_clear,
-                                          &map->tags);
-               if (ret) {
-                       VMM_DEBUG(vmm, "comp %d", ret);
-                       return ret;
+               if (!map->no_comp) {
+                       ret = nvkm_memory_tags_get(memory, device, tags,
+                                                  nvkm_ltc_tags_clear,
+                                                  &map->tags);
+                       if (ret) {
+                               VMM_DEBUG(vmm, "comp %d", ret);
+                               return ret;
+                       }
                }
 
-               if (map->tags->mn) {
+               if (!map->no_comp && map->tags->mn) {
                        u64 tags = map->tags->mn->offset + (map->offset >> 17);
                        if (page->shift == 17 || !gm20x) {
                                map->type |= tags << 44;
index 17899fc95b2d99d7d3be57b0adffce71169c6dce..f3630d0e0d55d8f1d53184c09f675aae25a36485 100644 (file)
@@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
                        return -EINVAL;
                }
 
-               ret = nvkm_memory_tags_get(memory, device, tags,
-                                          nvkm_ltc_tags_clear,
-                                          &map->tags);
-               if (ret) {
-                       VMM_DEBUG(vmm, "comp %d", ret);
-                       return ret;
+               if (!map->no_comp) {
+                       ret = nvkm_memory_tags_get(memory, device, tags,
+                                                  nvkm_ltc_tags_clear,
+                                                  &map->tags);
+                       if (ret) {
+                               VMM_DEBUG(vmm, "comp %d", ret);
+                               return ret;
+                       }
                }
 
-               if (map->tags->mn) {
+               if (!map->no_comp && map->tags->mn) {
                        tags = map->tags->mn->offset + (map->offset >> 16);
                        map->ctag |= ((1ULL << page->shift) >> 16) << 36;
                        map->type |= tags << 36;
index b7548dcd72c779d68885f56357f8ceb148c90ee1..ff08ad5005a92ddb5d3918c08a3ed6b8bde84a8f 100644 (file)
@@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
                        return -EINVAL;
                }
 
-               ret = nvkm_memory_tags_get(memory, device, tags, NULL,
-                                          &map->tags);
-               if (ret) {
-                       VMM_DEBUG(vmm, "comp %d", ret);
-                       return ret;
-               }
+               if (!map->no_comp) {
+                       ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+                                                  &map->tags);
+                       if (ret) {
+                               VMM_DEBUG(vmm, "comp %d", ret);
+                               return ret;
+                       }
 
-               if (map->tags->mn) {
-                       u32 tags = map->tags->mn->offset + (map->offset >> 16);
-                       map->ctag |= (u64)comp << 49;
-                       map->type |= (u64)comp << 47;
-                       map->type |= (u64)tags << 49;
-                       map->next |= map->ctag;
+                       if (map->tags->mn) {
+                               u32 tags = map->tags->mn->offset +
+                                          (map->offset >> 16);
+                               map->ctag |= (u64)comp << 49;
+                               map->type |= (u64)comp << 47;
+                               map->type |= (u64)tags << 49;
+                               map->next |= map->ctag;
+                       }
                }
        }
 
index b4ac76c9f31bffc75281eaed198a696da72952c1..b715301ec79f66d5cee22f7d0ea12ccd5568a673 100644 (file)
@@ -4,7 +4,7 @@ config DRM_OMAP
        depends on DRM && OF
        depends on ARCH_OMAP2PLUS
        select DRM_KMS_HELPER
-       select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+       select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
        select VIDEOMODE_HELPERS
        select HDMI
        default n
index aacad5045e958d02d58c3b651e10d0a454f85069..c26aab4939fa00c6c1ffc81f06711904c8c141b8 100644 (file)
@@ -4858,10 +4858,9 @@ static int dispc_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &dispc_component_ops);
 }
 
-static int dispc_remove(struct platform_device *pdev)
+static void dispc_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &dispc_component_ops);
-       return 0;
 }
 
 static __maybe_unused int dispc_runtime_suspend(struct device *dev)
@@ -4913,7 +4912,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
 
 struct platform_driver omap_dispchw_driver = {
        .probe          = dispc_probe,
-       .remove         = dispc_remove,
+       .remove_new     = dispc_remove,
        .driver         = {
                .name   = "omapdss_dispc",
                .pm     = &dispc_pm_ops,
index 4c1084eb01759e938e763ceb93c9b73c8555da2c..ea63c64d3a1abea20820c282cafc75c4c6b7d573 100644 (file)
@@ -5044,7 +5044,7 @@ err_pm_disable:
        return r;
 }
 
-static int dsi_remove(struct platform_device *pdev)
+static void dsi_remove(struct platform_device *pdev)
 {
        struct dsi_data *dsi = platform_get_drvdata(pdev);
 
@@ -5060,8 +5060,6 @@ static int dsi_remove(struct platform_device *pdev)
                regulator_disable(dsi->vdds_dsi_reg);
                dsi->vdds_dsi_enabled = false;
        }
-
-       return 0;
 }
 
 static __maybe_unused int dsi_runtime_suspend(struct device *dev)
@@ -5095,7 +5093,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
 
 struct platform_driver omap_dsihw_driver = {
        .probe          = dsi_probe,
-       .remove         = dsi_remove,
+       .remove_new     = dsi_remove,
        .driver         = {
                .name   = "omapdss_dsi",
                .pm     = &dsi_pm_ops,
index c4febb8619103721c3aad7787f7aa79bece2a751..02955f976845967f5fb1aa9645ee2282b4516767 100644 (file)
@@ -1532,7 +1532,7 @@ err_free_dss:
        return r;
 }
 
-static int dss_remove(struct platform_device *pdev)
+static void dss_remove(struct platform_device *pdev)
 {
        struct dss_device *dss = platform_get_drvdata(pdev);
 
@@ -1557,8 +1557,6 @@ static int dss_remove(struct platform_device *pdev)
        dss_put_clocks(dss);
 
        kfree(dss);
-
-       return 0;
 }
 
 static void dss_shutdown(struct platform_device *pdev)
@@ -1607,7 +1605,7 @@ static const struct dev_pm_ops dss_pm_ops = {
 
 struct platform_driver omap_dsshw_driver = {
        .probe          = dss_probe,
-       .remove         = dss_remove,
+       .remove_new     = dss_remove,
        .shutdown       = dss_shutdown,
        .driver         = {
                .name   = "omapdss_dss",
index a8a75dc247512c165b9d66d078980668dfa34152..a26b77d99d52ae111f4901114fa35e667d869e27 100644 (file)
@@ -824,7 +824,7 @@ err_free:
        return r;
 }
 
-static int hdmi4_remove(struct platform_device *pdev)
+static void hdmi4_remove(struct platform_device *pdev)
 {
        struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
 
@@ -835,7 +835,6 @@ static int hdmi4_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        kfree(hdmi);
-       return 0;
 }
 
 static const struct of_device_id hdmi_of_match[] = {
@@ -845,7 +844,7 @@ static const struct of_device_id hdmi_of_match[] = {
 
 struct platform_driver omapdss_hdmi4hw_driver = {
        .probe          = hdmi4_probe,
-       .remove         = hdmi4_remove,
+       .remove_new     = hdmi4_remove,
        .driver         = {
                .name   = "omapdss_hdmi",
                .of_match_table = hdmi_of_match,
index 868712cd8a3a3084c23ab987778c95d23bc4cdab..e6611c683857a51d330039a4242b0fa922f5af2c 100644 (file)
@@ -798,7 +798,7 @@ err_free:
        return r;
 }
 
-static int hdmi5_remove(struct platform_device *pdev)
+static void hdmi5_remove(struct platform_device *pdev)
 {
        struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
 
@@ -809,7 +809,6 @@ static int hdmi5_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        kfree(hdmi);
-       return 0;
 }
 
 static const struct of_device_id hdmi_of_match[] = {
@@ -820,7 +819,7 @@ static const struct of_device_id hdmi_of_match[] = {
 
 struct platform_driver omapdss_hdmi5hw_driver = {
        .probe          = hdmi5_probe,
-       .remove         = hdmi5_remove,
+       .remove_new     = hdmi5_remove,
        .driver         = {
                .name   = "omapdss_hdmi5",
                .of_match_table = hdmi_of_match,
index 4480b69ab5a78236bf25da6d9bcdfff4c438d5db..f163d52a7c7daec8e732ed3a37d5e0e60c7b1401 100644 (file)
@@ -865,7 +865,7 @@ err_free:
        return r;
 }
 
-static int venc_remove(struct platform_device *pdev)
+static void venc_remove(struct platform_device *pdev)
 {
        struct venc_device *venc = platform_get_drvdata(pdev);
 
@@ -876,7 +876,6 @@ static int venc_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        kfree(venc);
-       return 0;
 }
 
 static __maybe_unused int venc_runtime_suspend(struct device *dev)
@@ -913,7 +912,7 @@ static const struct of_device_id venc_of_match[] = {
 
 struct platform_driver omap_venchw_driver = {
        .probe          = venc_probe,
-       .remove         = venc_remove,
+       .remove_new     = venc_remove,
        .driver         = {
                .name   = "omapdss_venc",
                .pm     = &venc_pm_ops,
index 61a27dd7392eaf49887c0fb063b7db9b025ac4b3..9753c1e1f9944fcd00240ccde88a352a825b27e6 100644 (file)
@@ -723,7 +723,7 @@ bool dmm_is_available(void)
        return omap_dmm ? true : false;
 }
 
-static int omap_dmm_remove(struct platform_device *dev)
+static void omap_dmm_remove(struct platform_device *dev)
 {
        struct tiler_block *block, *_block;
        int i;
@@ -763,8 +763,6 @@ static int omap_dmm_remove(struct platform_device *dev)
                kfree(omap_dmm);
                omap_dmm = NULL;
        }
-
-       return 0;
 }
 
 static int omap_dmm_probe(struct platform_device *dev)
@@ -982,8 +980,7 @@ static int omap_dmm_probe(struct platform_device *dev)
        return 0;
 
 fail:
-       if (omap_dmm_remove(dev))
-               dev_err(&dev->dev, "cleanup failed\n");
+       omap_dmm_remove(dev);
        return ret;
 }
 
@@ -1213,7 +1210,7 @@ static const struct of_device_id dmm_of_match[] = {
 
 struct platform_driver omap_dmm_driver = {
        .probe = omap_dmm_probe,
-       .remove = omap_dmm_remove,
+       .remove_new = omap_dmm_remove,
        .driver = {
                .owner = THIS_MODULE,
                .name = DMM_DRIVER_NAME,
index 5ed549726104331776235bd76fb52a31bde9469a..afeeb773755252cb5d34bbd63ad0106234a5d267 100644 (file)
@@ -636,17 +636,7 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-static const struct file_operations omapdriver_fops = {
-       .owner = THIS_MODULE,
-       .open = drm_open,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl = drm_compat_ioctl,
-       .release = drm_release,
-       .mmap = omap_gem_mmap,
-       .poll = drm_poll,
-       .read = drm_read,
-       .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(omapdriver_fops);
 
 static const struct drm_driver omap_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM  |
@@ -655,8 +645,6 @@ static const struct drm_driver omap_drm_driver = {
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init = omap_debugfs_init,
 #endif
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = omap_gem_prime_import,
        .dumb_create = omap_gem_dumb_create,
        .dumb_map_offset = omap_gem_dumb_map_offset,
@@ -821,14 +809,12 @@ static int pdev_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int pdev_remove(struct platform_device *pdev)
+static void pdev_remove(struct platform_device *pdev)
 {
        struct omap_drm_private *priv = platform_get_drvdata(pdev);
 
        omapdrm_cleanup(priv);
        kfree(priv);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -859,7 +845,7 @@ static struct platform_driver pdev = {
                .pm = &omapdrm_pm_ops,
        },
        .probe = pdev_probe,
-       .remove = pdev_remove,
+       .remove_new = pdev_remove,
 };
 
 static struct platform_driver * const drivers[] = {
index b7ccce0704a39de45e17983a9dc5056efc080e09..6b08b137af1ad8847cae2f7bb5e4c5651f5c1151 100644 (file)
@@ -76,6 +76,15 @@ fallback:
        return drm_fb_helper_pan_display(var, fbi);
 }
 
+static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct drm_framebuffer *fb = helper->fb;
+       struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
+
+       return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
+}
+
 static void omap_fbdev_fb_destroy(struct fb_info *info)
 {
        struct drm_fb_helper *helper = info->par;
@@ -97,14 +106,16 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops omap_fb_ops = {
        .owner = THIS_MODULE,
-       FB_DEFAULT_SYS_OPS,
+       __FB_DEFAULT_DMAMEM_OPS_RDWR,
        .fb_check_var   = drm_fb_helper_check_var,
        .fb_set_par     = drm_fb_helper_set_par,
        .fb_setcmap     = drm_fb_helper_setcmap,
        .fb_blank       = drm_fb_helper_blank,
        .fb_pan_display = omap_fbdev_pan_display,
+       __FB_DEFAULT_DMAMEM_OPS_DRAW,
        .fb_ioctl       = drm_fb_helper_ioctl,
-       .fb_destroy = omap_fbdev_fb_destroy,
+       .fb_mmap        = omap_fbdev_fb_mmap,
+       .fb_destroy     = omap_fbdev_fb_destroy,
 };
 
 static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -196,6 +207,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
 
        drm_fb_helper_fill_info(fbi, helper, sizes);
 
+       fbi->flags |= FBINFO_VIRTFB;
        fbi->screen_buffer = omap_gem_vaddr(bo);
        fbi->screen_size = bo->size;
        fbi->fix.smem_start = dma_addr;
@@ -318,10 +330,6 @@ void omap_fbdev_setup(struct drm_device *dev)
 
        INIT_WORK(&fbdev->work, pan_worker);
 
-       ret = omap_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index 6b58a5bb7b44b4ccdf9b9e251a9e256f9a0dc9b7..c48fa531ca321c6071278f314bf140309e3b3e14 100644 (file)
@@ -524,26 +524,11 @@ fail:
        return ret;
 }
 
-/** We override mainly to fix up some of the vm mapping flags.. */
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       int ret;
-
-       ret = drm_gem_mmap(filp, vma);
-       if (ret) {
-               DBG("mmap failed: %d", ret);
-               return ret;
-       }
-
-       return omap_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
-               struct vm_area_struct *vma)
+static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
-       vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
 
        if (omap_obj->flags & OMAP_BO_WC) {
                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
@@ -563,12 +548,14 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
                 * address_space (so unmap_mapping_range does what we want,
                 * in particular in the case of mmap'd dmabufs)
                 */
-               vma->vm_pgoff = 0;
+               vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
                vma_set_file(vma, obj->filp);
 
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        }
 
+       vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
        return 0;
 }
 
@@ -1282,6 +1269,7 @@ static const struct vm_operations_struct omap_gem_vm_ops = {
 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
        .free = omap_gem_free_object,
        .export = omap_gem_prime_export,
+       .mmap = omap_gem_object_mmap,
        .vm_ops = &omap_gem_vm_ops,
 };
 
index 4d4488939f6bdcd7f3e884f23fa88f88a550b00d..fec3fa0e4c33e7d546006ed6a9512de0cab0571e 100644 (file)
@@ -57,9 +57,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 
 /* mmap() Interface */
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
-               struct vm_area_struct *vma);
 u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
 size_t omap_gem_mmap_size(struct drm_gem_object *obj);
 
index 3abc47521b2c97b7bb237f22a3079f4805b5d5db..36f9ee4baad3734b2f47e86fea165de08544c05f 100644 (file)
@@ -64,15 +64,8 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
                struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = buffer->priv;
-       int ret = 0;
 
-       dma_resv_assert_held(buffer->resv);
-
-       ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
-       if (ret < 0)
-               return ret;
-
-       return omap_gem_mmap_obj(obj, vma);
+       return drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
 }
 
 static const struct dma_buf_ops omap_dmabuf_ops = {
index 203c0ef0bbfdf56e0d2e459b538011865352ab8a..869e535faefa38727dc960fd4cb8c7fabb260356 100644 (file)
@@ -300,6 +300,7 @@ config DRM_PANEL_LEADTEK_LTK500HD1829
 config DRM_PANEL_SAMSUNG_LD9040
        tristate "Samsung LD9040 RGB/SPI panel"
        depends on OF && SPI
+       depends on BACKLIGHT_CLASS_DEVICE
        select VIDEOMODE_HELPERS
 
 config DRM_PANEL_LG_LB035Q02
@@ -733,6 +734,17 @@ config DRM_PANEL_SONY_TULIP_TRULY_NT35521
          NT35521 1280x720 video mode panel as found on Sony Xperia M4
          Aqua phone.
 
+config DRM_PANEL_STARTEK_KD070FHFID015
+       tristate "STARTEK KD070FHFID015 panel"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel
+         based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display
+         with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
+         the host, a built-in LED backlight and touch controller.
+
 config DRM_PANEL_TDO_TL070WSH30
        tristate "TDO TL070WSH30 DSI panel"
        depends on OF
@@ -793,6 +805,17 @@ config DRM_PANEL_VISIONOX_VTDR6130
          Say Y here if you want to enable support for Visionox
          VTDR6130 1080x2400 AMOLED DSI panel.
 
+config DRM_PANEL_VISIONOX_R66451
+       tristate "Visionox R66451"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       select DRM_DISPLAY_DP_HELPER
+       select DRM_DISPLAY_HELPER
+       help
+         Say Y here if you want to enable support for Visionox
+         R66451 1080x2340 AMOLED DSI panel.
+
 config DRM_PANEL_WIDECHIPS_WS2401
        tristate "Widechips WS2401 DPI panel driver"
        depends on SPI && GPIOLIB
index 30cf553c8d1db4d4c21f1f8c4af287c63b2c232d..433e93d57949c86f912395ca2050eb3f5704db2c 100644 (file)
@@ -74,6 +74,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
 obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
 obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
 obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
+obj-$(CONFIG_DRM_PANEL_STARTEK_KD070FHFID015) += panel-startek-kd070fhfid015.o
 obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
 obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
@@ -81,5 +82,6 @@ obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
 obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
 obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
 obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_R66451) += panel-visionox-r66451.o
 obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
 obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
index 1cc0f1d09684c8b620cdd3199b1401d38fac4079..662c7bcbe6e5e9419a680131a262078f971300fe 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index 3c976a98de6a2baaf1da61693e3493abe313d197..6c86ebf2cad7b13e1ce1497ceaf040836ddcb279 100644 (file)
@@ -11,8 +11,8 @@
 #include <linux/device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index d879b3b14c484922019ead73f15b8df299a46e21..11b64acbe8a9f23107b6c5118b4d114c446c4957 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 
 #include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
index dc276c346fd1a0d982215b6994fb760ac5fe5b80..5ac926281d2c65e1c6e7b652a29570940a33f017 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_connector.h>
index ba17bcc4461c76472acce1abfe8b4a429bd2f03d..6b3f4d664d2ade668525660394cf81924436492e 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_connector.h>
index fbd114b4f0be0b379f9929c06495a95421a23838..feb665df35a1d925b0739c2ae350c660e9db772d 100644 (file)
@@ -1866,6 +1866,7 @@ static const struct panel_delay delay_200_500_e200 = {
  */
 static const struct edp_panel_entry edp_panels[] = {
        EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+       EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
        EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
        EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
        EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
@@ -1889,6 +1890,7 @@ static const struct edp_panel_entry edp_panels[] = {
        EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
        EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
        EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+       EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
 
        EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
        EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
index 76572c9229836eb8c687dc7c0214b38199eb8827..986e3e192881b50ad3cdbbbc41a2adbf77e8791a 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/delay.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_mipi_dsi.h>
index df493da50afece6b7faa1d2f87458e8cfdeb38ea..48e3acaecdf33de5b82c3eea1c44f0409ebf5f8f 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/regulator/consumer.h>
 
 #define FEIYANG_INIT_CMD_LEN   2
index d4fb5d1b295bead221f59f2d77f5b3e3c6278937..c73243d85de71875db8b489125efbf3856afdf5e 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/media-bus-format.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
index 3dfafa585127ff909a9b389b8da114e9474379db..61c872f0f7ca882c6e83b22b07f42948b1987ed6 100644 (file)
@@ -22,7 +22,8 @@
 #include <linux/bitops.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index 3fdf884b3257fcdcfc853c4149808a2c8aeb15b6..3574681891e816f5f32a012814e22f7335f687d2 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
index 1ec696adf9ded5ec90a2206e62482b1f7b2284e6..7838947a1bf3c95e836559dd82b2854f9c1af78f 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 
 #include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
@@ -455,6 +455,174 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
        ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
 };
 
+static const struct ili9881c_instr tl050hdv35_init[] = {
+       ILI9881C_SWITCH_PAGE_INSTR(3),
+       ILI9881C_COMMAND_INSTR(0x01, 0x00),
+       ILI9881C_COMMAND_INSTR(0x02, 0x00),
+       ILI9881C_COMMAND_INSTR(0x03, 0x73),
+       ILI9881C_COMMAND_INSTR(0x04, 0x00),
+       ILI9881C_COMMAND_INSTR(0x05, 0x00),
+       ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+       ILI9881C_COMMAND_INSTR(0x07, 0x00),
+       ILI9881C_COMMAND_INSTR(0x08, 0x00),
+       ILI9881C_COMMAND_INSTR(0x09, 0x01),
+       ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+       ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0f, 0x1d),
+       ILI9881C_COMMAND_INSTR(0x10, 0x1d),
+       ILI9881C_COMMAND_INSTR(0x15, 0x00),
+       ILI9881C_COMMAND_INSTR(0x16, 0x00),
+       ILI9881C_COMMAND_INSTR(0x17, 0x00),
+       ILI9881C_COMMAND_INSTR(0x18, 0x00),
+       ILI9881C_COMMAND_INSTR(0x19, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+       ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+       ILI9881C_COMMAND_INSTR(0x20, 0x06),
+       ILI9881C_COMMAND_INSTR(0x21, 0x02),
+       ILI9881C_COMMAND_INSTR(0x28, 0x33),
+       ILI9881C_COMMAND_INSTR(0x29, 0x03),
+       ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x35, 0x00),
+       ILI9881C_COMMAND_INSTR(0x36, 0x00),
+       ILI9881C_COMMAND_INSTR(0x37, 0x00),
+       ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+       ILI9881C_COMMAND_INSTR(0x39, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+       ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+       ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x40, 0x00),
+       ILI9881C_COMMAND_INSTR(0x41, 0x00),
+       ILI9881C_COMMAND_INSTR(0x42, 0x00),
+       ILI9881C_COMMAND_INSTR(0x43, 0x00),
+       ILI9881C_COMMAND_INSTR(0x44, 0x00),
+       ILI9881C_COMMAND_INSTR(0x55, 0xab),
+       ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+       ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+       ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+       ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+       ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+       ILI9881C_COMMAND_INSTR(0x5f, 0x01),
+       ILI9881C_COMMAND_INSTR(0x60, 0x00),
+       ILI9881C_COMMAND_INSTR(0x61, 0x15),
+       ILI9881C_COMMAND_INSTR(0x62, 0x14),
+       ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+       ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+       ILI9881C_COMMAND_INSTR(0x65, 0x0c),
+       ILI9881C_COMMAND_INSTR(0x66, 0x0d),
+       ILI9881C_COMMAND_INSTR(0x67, 0x06),
+       ILI9881C_COMMAND_INSTR(0x68, 0x02),
+       ILI9881C_COMMAND_INSTR(0x69, 0x07),
+       ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+       ILI9881C_COMMAND_INSTR(0x70, 0x02),
+       ILI9881C_COMMAND_INSTR(0x71, 0x02),
+       ILI9881C_COMMAND_INSTR(0x72, 0x02),
+       ILI9881C_COMMAND_INSTR(0x73, 0x02),
+       ILI9881C_COMMAND_INSTR(0x74, 0x02),
+       ILI9881C_COMMAND_INSTR(0x75, 0x01),
+       ILI9881C_COMMAND_INSTR(0x76, 0x00),
+       ILI9881C_COMMAND_INSTR(0x77, 0x14),
+       ILI9881C_COMMAND_INSTR(0x78, 0x15),
+       ILI9881C_COMMAND_INSTR(0x79, 0x0e),
+       ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+       ILI9881C_COMMAND_INSTR(0x7b, 0x0c),
+       ILI9881C_COMMAND_INSTR(0x7c, 0x0d),
+       ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+       ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7f, 0x07),
+       ILI9881C_COMMAND_INSTR(0x88, 0x02),
+       ILI9881C_COMMAND_INSTR(0x89, 0x02),
+       ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+       ILI9881C_SWITCH_PAGE_INSTR(4),
+       ILI9881C_COMMAND_INSTR(0x38, 0x01),
+       ILI9881C_COMMAND_INSTR(0x39, 0x00),
+       ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+       ILI9881C_COMMAND_INSTR(0x6e, 0x2b),
+       ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+       ILI9881C_COMMAND_INSTR(0x8d, 0x18),
+       ILI9881C_COMMAND_INSTR(0x87, 0xba),
+       ILI9881C_COMMAND_INSTR(0x26, 0x76),
+       ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+       ILI9881C_COMMAND_INSTR(0xb5, 0x06),
+       ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+       ILI9881C_COMMAND_INSTR(0x35, 0x1f),
+       ILI9881C_COMMAND_INSTR(0x33, 0x14),
+       ILI9881C_COMMAND_INSTR(0x3b, 0x98),
+       ILI9881C_SWITCH_PAGE_INSTR(1),
+       ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+       ILI9881C_COMMAND_INSTR(0x31, 0x00),
+       ILI9881C_COMMAND_INSTR(0x40, 0x33),
+       ILI9881C_COMMAND_INSTR(0x53, 0xa2),
+       ILI9881C_COMMAND_INSTR(0x55, 0x92),
+       ILI9881C_COMMAND_INSTR(0x50, 0x96),
+       ILI9881C_COMMAND_INSTR(0x51, 0x96),
+       ILI9881C_COMMAND_INSTR(0x60, 0x22),
+       ILI9881C_COMMAND_INSTR(0x61, 0x00),
+       ILI9881C_COMMAND_INSTR(0x62, 0x19),
+       ILI9881C_COMMAND_INSTR(0x63, 0x00),
+       ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+       ILI9881C_COMMAND_INSTR(0xa1, 0x11),
+       ILI9881C_COMMAND_INSTR(0xa2, 0x19),
+       ILI9881C_COMMAND_INSTR(0xa3, 0x0d),
+       ILI9881C_COMMAND_INSTR(0xa4, 0x0d),
+       ILI9881C_COMMAND_INSTR(0xa5, 0x1e),
+       ILI9881C_COMMAND_INSTR(0xa6, 0x14),
+       ILI9881C_COMMAND_INSTR(0xa7, 0x17),
+       ILI9881C_COMMAND_INSTR(0xa8, 0x4f),
+       ILI9881C_COMMAND_INSTR(0xa9, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xaa, 0x27),
+       ILI9881C_COMMAND_INSTR(0xab, 0x49),
+       ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xad, 0x18),
+       ILI9881C_COMMAND_INSTR(0xae, 0x4c),
+       ILI9881C_COMMAND_INSTR(0xaf, 0x22),
+       ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+       ILI9881C_COMMAND_INSTR(0xb1, 0x4b),
+       ILI9881C_COMMAND_INSTR(0xb2, 0x60),
+       ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+       ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+       ILI9881C_COMMAND_INSTR(0xc1, 0x11),
+       ILI9881C_COMMAND_INSTR(0xc2, 0x19),
+       ILI9881C_COMMAND_INSTR(0xc3, 0x0d),
+       ILI9881C_COMMAND_INSTR(0xc4, 0x0d),
+       ILI9881C_COMMAND_INSTR(0xc5, 0x1e),
+       ILI9881C_COMMAND_INSTR(0xc6, 0x14),
+       ILI9881C_COMMAND_INSTR(0xc7, 0x17),
+       ILI9881C_COMMAND_INSTR(0xc8, 0x4f),
+       ILI9881C_COMMAND_INSTR(0xc9, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xca, 0x27),
+       ILI9881C_COMMAND_INSTR(0xcb, 0x49),
+       ILI9881C_COMMAND_INSTR(0xcc, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+       ILI9881C_COMMAND_INSTR(0xce, 0x4c),
+       ILI9881C_COMMAND_INSTR(0xcf, 0x33),
+       ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+       ILI9881C_COMMAND_INSTR(0xd1, 0x4b),
+       ILI9881C_COMMAND_INSTR(0xd2, 0x60),
+       ILI9881C_COMMAND_INSTR(0xd3, 0x39),
+       ILI9881C_SWITCH_PAGE_INSTR(0),
+       ILI9881C_COMMAND_INSTR(0x36, 0x03),
+};
+
 static const struct ili9881c_instr w552946ab_init[] = {
        ILI9881C_SWITCH_PAGE_INSTR(3),
        ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -812,6 +980,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
        .height_mm      = 217,
 };
 
+static const struct drm_display_mode tl050hdv35_default_mode = {
+       .clock          = 59400,
+
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 18,
+       .hsync_end      = 720 + 18 + 3,
+       .htotal         = 720 + 18 + 3 + 20,
+
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 26,
+       .vsync_end      = 1280 + 26 + 6,
+       .vtotal         = 1280 + 26 + 6 + 28,
+
+       .width_mm       = 62,
+       .height_mm      = 110,
+};
+
 static const struct drm_display_mode w552946aba_default_mode = {
        .clock          = 64000,
 
@@ -944,6 +1129,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
        .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
 };
 
+static const struct ili9881c_desc tl050hdv35_desc = {
+       .init = tl050hdv35_init,
+       .init_length = ARRAY_SIZE(tl050hdv35_init),
+       .mode = &tl050hdv35_default_mode,
+       .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                     MIPI_DSI_MODE_LPM,
+};
+
 static const struct ili9881c_desc w552946aba_desc = {
        .init = w552946ab_init,
        .init_length = ARRAY_SIZE(w552946ab_init),
@@ -955,6 +1148,7 @@ static const struct ili9881c_desc w552946aba_desc = {
 static const struct of_device_id ili9881c_of_match[] = {
        { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
        { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
+       { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
        { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
        { }
 };
index b2b0ebc9e943f8f611ff44f2947016e748c84a60..8fdbda59be48ff08bfadd534b6c32200b084a128 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index 9992d0d4c0e5c54281fb201818d2644f931c403b..485178a99910631e64449ddc3c4adf96fde9d79b 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
index 323c33c9c37aa2b5545f76a9d1a698b35d62316a..4879835fe101d4b93f0e8c3e322f2a2d7283a846 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #define JD9365DA_INIT_CMD_LEN          2
index d2efd887484bc007360c61b4aec0e59886113ba5..d41482d3a34f64bb71e0bb6f1e20b4a6ea9c7085 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/display_timing.h>
index de8758c30e6e82c6f589c2492ce97815899b5006..1b8e3156914c1dd3e0eac49503e16a71a85fa1f0 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
@@ -228,15 +228,13 @@ static int panel_lvds_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int panel_lvds_remove(struct platform_device *pdev)
+static void panel_lvds_remove(struct platform_device *pdev)
 {
        struct panel_lvds *lvds = platform_get_drvdata(pdev);
 
        drm_panel_remove(&lvds->panel);
 
        drm_panel_disable(&lvds->panel);
-
-       return 0;
 }
 
 static const struct of_device_id panel_lvds_of_table[] = {
@@ -248,7 +246,7 @@ MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
 
 static struct platform_driver panel_lvds_driver = {
        .probe          = panel_lvds_probe,
-       .remove         = panel_lvds_remove,
+       .remove_new     = panel_lvds_remove,
        .driver         = {
                .name   = "panel-lvds",
                .of_match_table = panel_lvds_of_table,
index 26d358b9b85ad5f7096095cc24423ab3d726a552..799c2161fc85b24e1fb236fd63b397bf66fc15c8 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
index 9243b2ad828d090be2a0856007d377eb85ed3698..ea4a6bf6d35bbab860ba9e4cda82b7cf5c20e6aa 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
index a07958038ffd1e8a873cbb7752669e70439fcbd4..ad98dd9322b4a3283a6761a746bb2de3dedb4dc8 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/display_timing.h>
index cf078f0d3cd349d9345727f4b93d86ee1920b052..71e57de6d8b2c0e2e246f6ccf7e79a8f9e53ba9d 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 #include <video/mipi_display.h>
index 493c3c23f0d649f1ee7aff88281613b8ab50048d..d6dceb8580081eb58dd6dee3694cf89120646d8b 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/bitops.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
index cc7f96d70826322475b4a2345d8cf96608431dff..5bbea734123bc003e3d691040496b1fc679ac94b 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
index 8b108ac80b556ae553809367b2a5a4ca35288402..412ca84d05811053ce01189ffc812d5cae433b22 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 
index c3befa7f253dcdb141973cf03591209c7406ac6a..9632b9e95b7159d2e852593bf82bf7e4f60d574e 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 
index 73bcffa1e0c151eb3f1ca852b27c60b8fdc5e2cf..33fb3d715e54f0262c624443d146078a88b189ac 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 
 #include <linux/gpio/consumer.h>
 #include <linux/pinctrl/consumer.h>
index f58cfb10b58a2ad48d670ed53828d0a17a965051..059260262b5a9d646a4e5ae6b6984e071eac7fde 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index e46be5014d4224fcce08d47878722c3ab1037817..c415dacf18161d68bcabb45b0a3834ff4030e225 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
index 90ea91e4311d44b9a079ed9b5d3289b2b0c7dbe9..4618c892cdd652d513baf74fabaaef4b9672e215 100644 (file)
@@ -47,7 +47,6 @@
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/pm.h>
 
index 117b268450835cb3be58c33c4dfa76ef4ff2a136..14c6700e37b30a58e7081423c18bff8db7896c5b 100644 (file)
@@ -56,10 +56,6 @@ struct db7430 {
        struct mipi_dbi dbi;
        /** @panel: the DRM panel instance for this device */
        struct drm_panel panel;
-       /** @width: the width of this panel in mm */
-       u32 width;
-       /** @height: the height of this panel in mm */
-       u32 height;
        /** @reset: reset GPIO line */
        struct gpio_desc *reset;
        /** @regulators: VCCIO and VIO supply regulators */
index 01eb211f32f7578389835cfb9a7e012e4c359710..9f438683a6f6e872317af879d1d9518551b08987 100644 (file)
@@ -8,6 +8,7 @@
  * Andrzej Hajda <a.hajda@samsung.com>
 */
 
+#include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
@@ -180,15 +181,15 @@ static void ld9040_init(struct ld9040 *ctx)
 {
        ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
        ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
-               0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
-               0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
-               0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
+               0x05, 0x5e, 0x96, 0x6b, 0x7d, 0x0d, 0x3f, 0x00,
+               0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0x07, 0x05, 0x1f, 0x1f, 0x1f, 0x00, 0x00);
        ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
-               0x02, 0x08, 0x08, 0x10, 0x10);
+               0x02, 0x06, 0x0a, 0x10, 0x10);
        ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
        ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
                0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
-       ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
+       ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0f, 0x00, 0x16);
        ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
        ld9040_brightness_set(ctx);
        ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
@@ -310,8 +311,30 @@ static int ld9040_parse_dt(struct ld9040 *ctx)
        return 0;
 }
 
+static int ld9040_bl_update_status(struct backlight_device *dev)
+{
+       struct ld9040 *ctx = bl_get_data(dev);
+
+       ctx->brightness = backlight_get_brightness(dev);
+       ld9040_brightness_set(ctx);
+
+       return 0;
+}
+
+static const struct backlight_ops ld9040_bl_ops = {
+       .update_status  = ld9040_bl_update_status,
+};
+
+static const struct backlight_properties ld9040_bl_props = {
+       .type = BACKLIGHT_RAW,
+       .scale = BACKLIGHT_SCALE_NON_LINEAR,
+       .max_brightness = ARRAY_SIZE(ld9040_gammas) - 1,
+       .brightness = ARRAY_SIZE(ld9040_gammas) - 1,
+};
+
 static int ld9040_probe(struct spi_device *spi)
 {
+       struct backlight_device *bldev;
        struct device *dev = &spi->dev;
        struct ld9040 *ctx;
        int ret;
@@ -323,7 +346,7 @@ static int ld9040_probe(struct spi_device *spi)
        spi_set_drvdata(spi, ctx);
 
        ctx->dev = dev;
-       ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
+       ctx->brightness = ld9040_bl_props.brightness;
 
        ret = ld9040_parse_dt(ctx);
        if (ret < 0)
@@ -353,6 +376,12 @@ static int ld9040_probe(struct spi_device *spi)
        drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
                       DRM_MODE_CONNECTOR_DPI);
 
+       bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
+                                              ctx, &ld9040_bl_ops,
+                                              &ld9040_bl_props);
+       if (IS_ERR(bldev))
+               return PTR_ERR(bldev);
+
        drm_panel_add(&ctx->panel);
 
        return 0;
index 008e2b0d6652b6239bd4633a620988897493a74a..79f611963c6183218b42c7b6c3e2ab298e3fdb99 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
 #include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 
 struct s6d16d0 {
index 102e1fc7ee383a4ae97cd121e61525ef95ba458d..0583360b128068055b10f0bf888f6bf74cb7a962 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 
 #include <video/mipi_display.h>
 #include <drm/drm_mipi_dsi.h>
@@ -66,7 +65,6 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
 static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
 {
        struct mipi_dsi_device *dsi = ctx->dsi;
-       int ret = 0;
 
        if (lock) {
                mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
@@ -80,7 +78,7 @@ static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
                        mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
        }
 
-       return ret;
+       return 0;
 }
 
 static int s6d7aa0_on(struct s6d7aa0 *ctx)
index 39eef3dce7c94ce99de01edb870009dedb51e097..639a4fdf57bb546dc995053e77c25cf8d5f3641a 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drm_mipi_dsi.h>
index ed3895e4ca5e75aa0d6c8d2fd0d875a1c866ef24..a89d925fdfb2b1b5f2b8fc2c1fe6190413a9c09a 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_print.h>
index 1ebb79e3103c19d1c282c66e99e738bf7a87c77b..cbf9607dd5761bf55e55964c3892ff9028894950 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/backlight.h>
 
index c250ca36a5b3c6f07203bc446e1256eb9f69c13c..658c7c04057028989b3a11df927a4de73496a588 100644 (file)
@@ -278,14 +278,12 @@ static int seiko_panel_probe(struct device *dev,
        return 0;
 }
 
-static int seiko_panel_remove(struct platform_device *pdev)
+static void seiko_panel_remove(struct platform_device *pdev)
 {
        struct seiko_panel *panel = platform_get_drvdata(pdev);
 
        drm_panel_remove(&panel->base);
        drm_panel_disable(&panel->base);
-
-       return 0;
 }
 
 static void seiko_panel_shutdown(struct platform_device *pdev)
@@ -347,7 +345,7 @@ static struct platform_driver seiko_panel_platform_driver = {
                .of_match_table = platform_of_match,
        },
        .probe = seiko_panel_platform_probe,
-       .remove = seiko_panel_remove,
+       .remove_new = seiko_panel_remove,
        .shutdown = seiko_panel_shutdown,
 };
 module_platform_driver(seiko_panel_platform_driver);
index a07d0f6c3e69b6b8085bb35e5b4a84e1943fab5f..76bd9e81082768a3544fe3b6b974db2f1b341484 100644 (file)
@@ -189,15 +189,13 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ls037v7dw01_remove(struct platform_device *pdev)
+static void ls037v7dw01_remove(struct platform_device *pdev)
 {
        struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
 
        drm_panel_remove(&lcd->panel);
        drm_panel_disable(&lcd->panel);
        drm_panel_unprepare(&lcd->panel);
-
-       return 0;
 }
 
 static const struct of_device_id ls037v7dw01_of_match[] = {
@@ -209,7 +207,7 @@ MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
 
 static struct platform_driver ls037v7dw01_driver = {
        .probe          = ls037v7dw01_probe,
-       .remove         = ls037v7dw01_remove,
+       .remove_new     = ls037v7dw01_remove,
        .driver         = {
                .name = "panel-sharp-ls037v7dw01",
                .of_match_table = ls037v7dw01_of_match,
index a247a0e7c79983ec3869582f7e029f0fba0a86a8..72cef64441a60142d3d6ec4678aa55878a066fd0 100644 (file)
@@ -141,7 +141,6 @@ struct panel_simple {
 
        bool prepared;
 
-       ktime_t prepared_time;
        ktime_t unprepared_time;
 
        const struct panel_desc *desc;
@@ -351,8 +350,6 @@ static int panel_simple_resume(struct device *dev)
        if (p->desc->delay.prepare)
                msleep(p->desc->delay.prepare);
 
-       p->prepared_time = ktime_get_boottime();
-
        return 0;
 }
 
@@ -566,7 +563,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
                return -ENOMEM;
 
        panel->enabled = false;
-       panel->prepared_time = 0;
        panel->desc = desc;
 
        panel->supply = devm_regulator_get(dev, "power");
@@ -1189,7 +1185,9 @@ static const struct panel_desc auo_t215hvn01 = {
        .delay = {
                .disable = 5,
                .unprepare = 1000,
-       }
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
 static const struct drm_display_mode avic_tm070ddh03_mode = {
@@ -2178,6 +2176,7 @@ static const struct panel_desc innolux_at043tn24 = {
                .height = 54,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
 };
 
@@ -2377,6 +2376,37 @@ static const struct panel_desc innolux_g121x1_l03 = {
        },
 };
 
+static const struct display_timing innolux_g156hce_l01_timings = {
+       .pixelclock = { 120000000, 144000000, 150000000 },
+       .hactive = { 1920, 1920, 1920 },
+       .hfront_porch = { 80, 90, 100 },
+       .hback_porch = { 80, 90, 100 },
+       .hsync_len = { 20, 30, 30 },
+       .vactive = { 1080, 1080, 1080 },
+       .vfront_porch = { 3, 10, 20 },
+       .vback_porch = { 3, 10, 20 },
+       .vsync_len = { 4, 10, 10 },
+};
+
+static const struct panel_desc innolux_g156hce_l01 = {
+       .timings = &innolux_g156hce_l01_timings,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 344,
+               .height = 194,
+       },
+       .delay = {
+               .prepare = 1,           /* T1+T2 */
+               .enable = 450,          /* T5 */
+               .disable = 200,         /* T6 */
+               .unprepare = 10,        /* T3+T7 */
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode innolux_n156bge_l21_mode = {
        .clock = 69300,
        .hdisplay = 1366,
@@ -3202,11 +3232,13 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
        .vsync_start = 480 + 49,
        .vsync_end = 480 + 49 + 2,
        .vtotal = 480 + 49 + 2 + 22,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
 };
 
 static const struct panel_desc powertip_ph800480t013_idf02  = {
        .modes = &powertip_ph800480t013_idf02_mode,
        .num_modes = 1,
+       .bpc = 8,
        .size = {
                .width = 152,
                .height = 91,
@@ -4241,6 +4273,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "innolux,g121x1-l03",
                .data = &innolux_g121x1_l03,
+       }, {
+               .compatible = "innolux,g156hce-l01",
+               .data = &innolux_g156hce_l01,
        }, {
                .compatible = "innolux,n156bge-l21",
                .data = &innolux_n156bge_l21,
@@ -4457,20 +4492,18 @@ MODULE_DEVICE_TABLE(of, platform_of_match);
 
 static int panel_simple_platform_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *id;
+       const struct panel_desc *desc;
 
-       id = of_match_node(platform_of_match, pdev->dev.of_node);
-       if (!id)
+       desc = of_device_get_match_data(&pdev->dev);
+       if (!desc)
                return -ENODEV;
 
-       return panel_simple_probe(&pdev->dev, id->data);
+       return panel_simple_probe(&pdev->dev, desc);
 }
 
-static int panel_simple_platform_remove(struct platform_device *pdev)
+static void panel_simple_platform_remove(struct platform_device *pdev)
 {
        panel_simple_remove(&pdev->dev);
-
-       return 0;
 }
 
 static void panel_simple_platform_shutdown(struct platform_device *pdev)
@@ -4491,7 +4524,7 @@ static struct platform_driver panel_simple_platform_driver = {
                .pm = &panel_simple_pm_ops,
        },
        .probe = panel_simple_platform_probe,
-       .remove = panel_simple_platform_remove,
+       .remove_new = panel_simple_platform_remove,
        .shutdown = panel_simple_platform_shutdown,
 };
 
@@ -4736,15 +4769,12 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
 static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
 {
        const struct panel_desc_dsi *desc;
-       const struct of_device_id *id;
        int err;
 
-       id = of_match_node(dsi_of_match, dsi->dev.of_node);
-       if (!id)
+       desc = of_device_get_match_data(&dsi->dev);
+       if (!desc)
                return -ENODEV;
 
-       desc = id->data;
-
        err = panel_simple_probe(&dsi->dev, &desc->desc);
        if (err < 0)
                return err;
index 7eae83aa0ea18efcc8b8fcf7e247863b360e128c..0459965e1b4f7b47a9e1d173429329b191171a5a 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
index 3aa31f3d61574bf2d351661193f9dacd1df98630..6a394563953501336fac0b6b59e23bb438b6c288 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/media-bus-format.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/display_timing.h>
index bbc4569cbcdc227af2fa55e0e7955aa4ec21e9f7..88e80fe98112daa0019ec43217a94178a5b0b1a7 100644 (file)
 #include <linux/spi/spi.h>
 
 #include <video/mipi_display.h>
+#include <linux/media-bus-format.h>
 
 #include <drm/drm_device.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_panel.h>
 
-#define ST7789V_COLMOD_RGB_FMT_18BITS          (6 << 4)
-#define ST7789V_COLMOD_CTRL_FMT_18BITS         (6 << 0)
-
 #define ST7789V_RAMCTRL_CMD            0xb0
 #define ST7789V_RAMCTRL_RM_RGB                 BIT(4)
 #define ST7789V_RAMCTRL_DM_RGB                 BIT(0)
@@ -29,7 +27,8 @@
 #define ST7789V_RGBCTRL_RCM(n)                 (((n) & 3) << 5)
 #define ST7789V_RGBCTRL_VSYNC_HIGH             BIT(3)
 #define ST7789V_RGBCTRL_HSYNC_HIGH             BIT(2)
-#define ST7789V_RGBCTRL_PCLK_HIGH              BIT(1)
+#define ST7789V_RGBCTRL_PCLK_FALLING           BIT(1)
+#define ST7789V_RGBCTRL_DE_LOW                 BIT(0)
 #define ST7789V_RGBCTRL_VBP(n)                 ((n) & 0x7f)
 #define ST7789V_RGBCTRL_HBP(n)                 ((n) & 0x1f)
 
                        return val;             \
        } while (0)
 
+#define ST7789V_IDS { 0x85, 0x85, 0x52 }
+#define ST7789V_IDS_SIZE 3
+
+struct st7789_panel_info {
+       const struct drm_display_mode *mode;
+       u32 bus_format;
+       u32 bus_flags;
+       bool invert_mode;
+       bool partial_mode;
+       u16 partial_start;
+       u16 partial_end;
+};
+
 struct st7789v {
        struct drm_panel panel;
+       const struct st7789_panel_info *info;
        struct spi_device *spi;
        struct gpio_desc *reset;
        struct regulator *power;
+       enum drm_panel_orientation orientation;
 };
 
 enum st7789v_prefix {
@@ -132,17 +146,12 @@ static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
                             u8 data)
 {
        struct spi_transfer xfer = { };
-       struct spi_message msg;
        u16 txbuf = ((prefix & 1) << 8) | data;
 
-       spi_message_init(&msg);
-
        xfer.tx_buf = &txbuf;
-       xfer.bits_per_word = 9;
        xfer.len = sizeof(txbuf);
 
-       spi_message_add_tail(&xfer, &msg);
-       return spi_sync(ctx->spi, &msg);
+       return spi_sync_transfer(ctx->spi, &xfer, 1);
 }
 
 static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
@@ -155,6 +164,76 @@ static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
        return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
 }
 
+static int st7789v_read_data(struct st7789v *ctx, u8 cmd, u8 *buf,
+                            unsigned int len)
+{
+       struct spi_transfer xfer[2] = { };
+       struct spi_message msg;
+       u16 txbuf = ((ST7789V_COMMAND & 1) << 8) | cmd;
+       u16 rxbuf[4] = {};
+       u8 bit9 = 0;
+       int ret, i;
+
+       switch (len) {
+       case 1:
+       case 3:
+       case 4:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       spi_message_init(&msg);
+
+       xfer[0].tx_buf = &txbuf;
+       xfer[0].len = sizeof(txbuf);
+       spi_message_add_tail(&xfer[0], &msg);
+
+       xfer[1].rx_buf = rxbuf;
+       xfer[1].len = len * 2;
+       spi_message_add_tail(&xfer[1], &msg);
+
+       ret = spi_sync(ctx->spi, &msg);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < len; i++) {
+               buf[i] = rxbuf[i] >> i | (bit9 << (9 - i));
+               if (i)
+                       bit9 = rxbuf[i] & GENMASK(i - 1, 0);
+       }
+
+       return 0;
+}
+
+static int st7789v_check_id(struct drm_panel *panel)
+{
+       const u8 st7789v_ids[ST7789V_IDS_SIZE] = ST7789V_IDS;
+       struct st7789v *ctx = panel_to_st7789v(panel);
+       bool invalid_ids = false;
+       int ret, i;
+       u8 ids[3];
+
+       if (ctx->spi->mode & SPI_NO_RX)
+               return 0;
+
+       ret = st7789v_read_data(ctx, MIPI_DCS_GET_DISPLAY_ID, ids, ST7789V_IDS_SIZE);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < ST7789V_IDS_SIZE; i++) {
+               if (ids[i] != st7789v_ids[i]) {
+                       invalid_ids = true;
+                       break;
+               }
+       }
+
+       if (invalid_ids)
+               return -EIO;
+
+       return 0;
+}
+
 static const struct drm_display_mode default_mode = {
        .clock = 7000,
        .hdisplay = 240,
@@ -165,18 +244,102 @@ static const struct drm_display_mode default_mode = {
        .vsync_start = 320 + 8,
        .vsync_end = 320 + 8 + 4,
        .vtotal = 320 + 8 + 4 + 4,
+       .width_mm = 61,
+       .height_mm = 103,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct drm_display_mode t28cp45tn89_mode = {
+       .clock = 6008,
+       .hdisplay = 240,
+       .hsync_start = 240 + 38,
+       .hsync_end = 240 + 38 + 10,
+       .htotal = 240 + 38 + 10 + 10,
+       .vdisplay = 320,
+       .vsync_start = 320 + 8,
+       .vsync_end = 320 + 8 + 4,
+       .vtotal = 320 + 8 + 4 + 4,
+       .width_mm = 43,
+       .height_mm = 57,
+       .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct drm_display_mode et028013dma_mode = {
+       .clock = 3000,
+       .hdisplay = 240,
+       .hsync_start = 240 + 38,
+       .hsync_end = 240 + 38 + 10,
+       .htotal = 240 + 38 + 10 + 10,
+       .vdisplay = 320,
+       .vsync_start = 320 + 8,
+       .vsync_end = 320 + 8 + 4,
+       .vtotal = 320 + 8 + 4 + 4,
+       .width_mm = 43,
+       .height_mm = 58,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
+       .clock = 6000,
+       .hdisplay = 240,
+       .hsync_start = 240 + 28,
+       .hsync_end = 240 + 28 + 10,
+       .htotal = 240 + 28 + 10 + 10,
+       .vdisplay = 280,
+       .vsync_start = 280 + 8,
+       .vsync_end = 280 + 8 + 4,
+       .vtotal = 280 + 8 + 4 + 4,
+       .width_mm = 43,
+       .height_mm = 37,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct st7789_panel_info default_panel = {
+       .mode = &default_mode,
+       .invert_mode = true,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+                    DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
+static const struct st7789_panel_info t28cp45tn89_panel = {
+       .mode = &t28cp45tn89_mode,
+       .invert_mode = false,
+       .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+                    DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct st7789_panel_info et028013dma_panel = {
+       .mode = &et028013dma_mode,
+       .invert_mode = true,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+                    DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct st7789_panel_info jt240mhqs_hwt_ek_e3_panel = {
+       .mode = &jt240mhqs_hwt_ek_e3_mode,
+       .invert_mode = true,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+                    DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+       .partial_mode = true,
+       .partial_start = 38,
+       .partial_end = 318,
 };
 
 static int st7789v_get_modes(struct drm_panel *panel,
                             struct drm_connector *connector)
 {
+       struct st7789v *ctx = panel_to_st7789v(panel);
        struct drm_display_mode *mode;
 
-       mode = drm_mode_duplicate(connector->dev, &default_mode);
+       mode = drm_mode_duplicate(connector->dev, ctx->info->mode);
        if (!mode) {
-               dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
-                       default_mode.hdisplay, default_mode.vdisplay,
-                       drm_mode_vrefresh(&default_mode));
+               dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+                       ctx->info->mode->hdisplay, ctx->info->mode->vdisplay,
+                       drm_mode_vrefresh(ctx->info->mode));
                return -ENOMEM;
        }
 
@@ -185,17 +348,65 @@ static int st7789v_get_modes(struct drm_panel *panel,
        mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
        drm_mode_probed_add(connector, mode);
 
-       connector->display_info.width_mm = 61;
-       connector->display_info.height_mm = 103;
+       connector->display_info.bpc = 6;
+       connector->display_info.width_mm = ctx->info->mode->width_mm;
+       connector->display_info.height_mm = ctx->info->mode->height_mm;
+       connector->display_info.bus_flags = ctx->info->bus_flags;
+       drm_display_info_set_bus_formats(&connector->display_info,
+                                        &ctx->info->bus_format, 1);
+
+       /*
+        * TODO: Remove once all drm drivers call
+        * drm_connector_set_orientation_from_panel()
+        */
+       drm_connector_set_panel_orientation(connector, ctx->orientation);
 
        return 1;
 }
 
+static enum drm_panel_orientation st7789v_get_orientation(struct drm_panel *p)
+{
+       struct st7789v *ctx = panel_to_st7789v(p);
+
+       return ctx->orientation;
+}
+
 static int st7789v_prepare(struct drm_panel *panel)
 {
        struct st7789v *ctx = panel_to_st7789v(panel);
+       u8 mode, pixel_fmt, polarity;
        int ret;
 
+       if (!ctx->info->partial_mode)
+               mode = ST7789V_RGBCTRL_WO;
+       else
+               mode = 0;
+
+       switch (ctx->info->bus_format) {
+       case MEDIA_BUS_FMT_RGB666_1X18:
+               pixel_fmt = MIPI_DCS_PIXEL_FMT_18BIT;
+               break;
+       case MEDIA_BUS_FMT_RGB565_1X16:
+               pixel_fmt = MIPI_DCS_PIXEL_FMT_16BIT;
+               break;
+       default:
+               dev_err(panel->dev, "unsupported bus format: %d\n",
+                       ctx->info->bus_format);
+               return -EINVAL;
+       }
+
+       pixel_fmt = (pixel_fmt << 4) | pixel_fmt;
+
+       polarity = 0;
+       if (ctx->info->mode->flags & DRM_MODE_FLAG_PVSYNC)
+               polarity |= ST7789V_RGBCTRL_VSYNC_HIGH;
+       if (ctx->info->mode->flags & DRM_MODE_FLAG_PHSYNC)
+               polarity |= ST7789V_RGBCTRL_HSYNC_HIGH;
+       if (ctx->info->bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE)
+               polarity |= ST7789V_RGBCTRL_PCLK_FALLING;
+       if (ctx->info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+               polarity |= ST7789V_RGBCTRL_DE_LOW;
+
        ret = regulator_enable(ctx->power);
        if (ret)
                return ret;
@@ -205,6 +416,14 @@ static int st7789v_prepare(struct drm_panel *panel)
        gpiod_set_value(ctx->reset, 0);
        msleep(120);
 
+       /*
+        * Avoid failing if the IDs are invalid in case the Rx bus width
+        * description is missing.
+        */
+       ret = st7789v_check_id(panel);
+       if (ret)
+               dev_warn(panel->dev, "Unrecognized panel IDs");
+
        ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
 
        /* We need to wait 120ms after a sleep out command */
@@ -216,9 +435,7 @@ static int st7789v_prepare(struct drm_panel *panel)
 
        ST7789V_TEST(ret, st7789v_write_command(ctx,
                                                MIPI_DCS_SET_PIXEL_FORMAT));
-       ST7789V_TEST(ret, st7789v_write_data(ctx,
-                                            (MIPI_DCS_PIXEL_FMT_18BIT << 4) |
-                                            (MIPI_DCS_PIXEL_FMT_18BIT)));
+       ST7789V_TEST(ret, st7789v_write_data(ctx, pixel_fmt));
 
        ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
        ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
@@ -296,7 +513,44 @@ static int st7789v_prepare(struct drm_panel *panel)
        ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
        ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
 
-       ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE));
+       if (ctx->info->invert_mode) {
+               ST7789V_TEST(ret, st7789v_write_command(ctx,
+                                               MIPI_DCS_ENTER_INVERT_MODE));
+       } else {
+               ST7789V_TEST(ret, st7789v_write_command(ctx,
+                                               MIPI_DCS_EXIT_INVERT_MODE));
+       }
+
+       if (ctx->info->partial_mode) {
+               u8 area_data[4] = {
+                       (ctx->info->partial_start >> 8) & 0xff,
+                       (ctx->info->partial_start >> 0) & 0xff,
+                       ((ctx->info->partial_end - 1) >> 8) & 0xff,
+                       ((ctx->info->partial_end - 1) >> 0) & 0xff,
+               };
+
+               /* Caution: if userspace ever pushes a mode different from the
+                * expected one (i.e., the one advertised by get_modes), we'll
+                * add margins.
+                */
+
+               ST7789V_TEST(ret, st7789v_write_command(
+                                         ctx, MIPI_DCS_ENTER_PARTIAL_MODE));
+
+               ST7789V_TEST(ret, st7789v_write_command(
+                                         ctx, MIPI_DCS_SET_PAGE_ADDRESS));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+
+               ST7789V_TEST(ret, st7789v_write_command(
+                                         ctx, MIPI_DCS_SET_PARTIAL_ROWS));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+               ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+       }
 
        ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
        ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
@@ -305,11 +559,9 @@ static int st7789v_prepare(struct drm_panel *panel)
                                             ST7789V_RAMCTRL_MAGIC));
 
        ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
-       ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
+       ST7789V_TEST(ret, st7789v_write_data(ctx, mode |
                                             ST7789V_RGBCTRL_RCM(2) |
-                                            ST7789V_RGBCTRL_VSYNC_HIGH |
-                                            ST7789V_RGBCTRL_HSYNC_HIGH |
-                                            ST7789V_RGBCTRL_PCLK_HIGH));
+                                            polarity));
        ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
        ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
 
@@ -346,41 +598,52 @@ static int st7789v_unprepare(struct drm_panel *panel)
 }
 
 static const struct drm_panel_funcs st7789v_drm_funcs = {
-       .disable        = st7789v_disable,
-       .enable         = st7789v_enable,
-       .get_modes      = st7789v_get_modes,
-       .prepare        = st7789v_prepare,
-       .unprepare      = st7789v_unprepare,
+       .disable = st7789v_disable,
+       .enable = st7789v_enable,
+       .get_modes = st7789v_get_modes,
+       .get_orientation = st7789v_get_orientation,
+       .prepare = st7789v_prepare,
+       .unprepare = st7789v_unprepare,
 };
 
 static int st7789v_probe(struct spi_device *spi)
 {
+       struct device *dev = &spi->dev;
        struct st7789v *ctx;
        int ret;
 
-       ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
        spi_set_drvdata(spi, ctx);
        ctx->spi = spi;
 
-       drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+       spi->bits_per_word = 9;
+       ret = spi_setup(spi);
+       if (ret < 0)
+               return dev_err_probe(&spi->dev, ret, "Failed to setup spi\n");
+
+       ctx->info = device_get_match_data(&spi->dev);
+
+       drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
                       DRM_MODE_CONNECTOR_DPI);
 
-       ctx->power = devm_regulator_get(&spi->dev, "power");
-       if (IS_ERR(ctx->power))
-               return PTR_ERR(ctx->power);
+       ctx->power = devm_regulator_get(dev, "power");
+       ret = PTR_ERR_OR_ZERO(ctx->power);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to get regulator\n");
 
-       ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
-       if (IS_ERR(ctx->reset)) {
-               dev_err(&spi->dev, "Couldn't get our reset line\n");
-               return PTR_ERR(ctx->reset);
-       }
+       ctx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(ctx->reset);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to get reset line\n");
 
        ret = drm_panel_of_backlight(&ctx->panel);
        if (ret)
-               return ret;
+               return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+       of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
 
        drm_panel_add(&ctx->panel);
 
@@ -394,8 +657,21 @@ static void st7789v_remove(struct spi_device *spi)
        drm_panel_remove(&ctx->panel);
 }
 
+static const struct spi_device_id st7789v_spi_id[] = {
+       { "st7789v", (unsigned long) &default_panel },
+       { "t28cp45tn89-v17", (unsigned long) &t28cp45tn89_panel },
+       { "et028013dma", (unsigned long) &et028013dma_panel },
+       { "jt240mhqs-hwt-ek-e3", (unsigned long) &jt240mhqs_hwt_ek_e3_panel },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, st7789v_spi_id);
+
 static const struct of_device_id st7789v_of_match[] = {
-       { .compatible = "sitronix,st7789v" },
+       { .compatible = "sitronix,st7789v", .data = &default_panel },
+       { .compatible = "inanbo,t28cp45tn89-v17", .data = &t28cp45tn89_panel },
+       { .compatible = "edt,et028013dma", .data = &et028013dma_panel },
+       { .compatible = "jasonic,jt240mhqs-hwt-ek-e3",
+         .data = &jt240mhqs_hwt_ek_e3_panel },
        { }
 };
 MODULE_DEVICE_TABLE(of, st7789v_of_match);
@@ -403,6 +679,7 @@ MODULE_DEVICE_TABLE(of, st7789v_of_match);
 static struct spi_driver st7789v_driver = {
        .probe = st7789v_probe,
        .remove = st7789v_remove,
+       .id_table = st7789v_spi_id,
        .driver = {
                .name = "st7789v",
                .of_match_table = st7789v_of_match,
index 8d8813dbaa45f93de2ab2a080a5439d8a16e7e2a..1bde2f01786b55bfbca2386dcc5d169f805f5ffe 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
new file mode 100644 (file)
index 0000000..6e77a2d
--- /dev/null
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 InforceComputing
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2023 BayLibre, SAS
+ *
+ * Authors:
+ * - Vinay Simha BN <simhavcs@gmail.com>
+ * - Sumit Semwal <sumit.semwal@linaro.org>
+ * - Guillaume La Roque <glaroque@baylibre.com>
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DSI_REG_MCAP   0xB0
+#define DSI_REG_IS     0xB3 /* Interface Setting */
+#define DSI_REG_IIS    0xB4 /* Interface ID Setting */
+#define DSI_REG_CTRL   0xB6
+
+enum {
+       IOVCC = 0,
+       POWER = 1
+};
+
+struct stk_panel {
+       bool prepared;
+       const struct drm_display_mode *mode;
+       struct backlight_device *backlight;
+       struct drm_panel base;
+       struct gpio_desc *enable_gpio; /* Power IC supply enable */
+       struct gpio_desc *reset_gpio; /* External reset */
+       struct mipi_dsi_device *dsi;
+       struct regulator_bulk_data supplies[2];
+};
+
+static inline struct stk_panel *to_stk_panel(struct drm_panel *panel)
+{
+       return container_of(panel, struct stk_panel, base);
+}
+
+static int stk_panel_init(struct stk_panel *stk)
+{
+       struct mipi_dsi_device *dsi = stk->dsi;
+       struct device *dev = &stk->dsi->dev;
+       int ret;
+
+       ret = mipi_dsi_dcs_soft_reset(dsi);
+       if (ret < 0) {
+               dev_err(dev, "failed to mipi_dsi_dcs_soft_reset: %d\n", ret);
+               return ret;
+       }
+       mdelay(5);
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
+               return ret;
+       }
+       msleep(120);
+
+       mipi_dsi_generic_write_seq(dsi, DSI_REG_MCAP, 0x04);
+
+       /* Interface setting, video mode */
+       mipi_dsi_generic_write_seq(dsi, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
+       mipi_dsi_generic_write_seq(dsi, DSI_REG_IIS, 0x0C, 0x00);
+       mipi_dsi_generic_write_seq(dsi, DSI_REG_CTRL, 0x3A, 0xD3);
+
+       ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x77);
+       if (ret < 0) {
+               dev_err(dev, "failed to write display brightness: %d\n", ret);
+               return ret;
+       }
+
+       mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+                              MIPI_DCS_WRITE_MEMORY_START);
+
+       ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
+       if (ret < 0) {
+               dev_err(dev, "failed to set pixel format: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_set_column_address(dsi, 0, stk->mode->hdisplay - 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to set column address: %d\n", ret);
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_set_page_address(dsi, 0, stk->mode->vdisplay - 1);
+       if (ret < 0) {
+               dev_err(dev, "failed to set page address: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int stk_panel_on(struct stk_panel *stk)
+{
+       struct mipi_dsi_device *dsi = stk->dsi;
+       struct device *dev = &stk->dsi->dev;
+       int ret;
+
+       ret = mipi_dsi_dcs_set_display_on(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to set display on: %d\n", ret);
+
+       mdelay(20);
+
+       return ret;
+}
+
+static void stk_panel_off(struct stk_panel *stk)
+{
+       struct mipi_dsi_device *dsi = stk->dsi;
+       struct device *dev = &stk->dsi->dev;
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to set display off: %d\n", ret);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0)
+               dev_err(dev, "failed to enter sleep mode: %d\n", ret);
+
+       msleep(100);
+}
+
+static int stk_panel_unprepare(struct drm_panel *panel)
+{
+       struct stk_panel *stk = to_stk_panel(panel);
+
+       if (!stk->prepared)
+               return 0;
+
+       stk_panel_off(stk);
+       regulator_bulk_disable(ARRAY_SIZE(stk->supplies), stk->supplies);
+       gpiod_set_value(stk->reset_gpio, 0);
+       gpiod_set_value(stk->enable_gpio, 1);
+
+       stk->prepared = false;
+
+       return 0;
+}
+
+static int stk_panel_prepare(struct drm_panel *panel)
+{
+       struct stk_panel *stk = to_stk_panel(panel);
+       struct device *dev = &stk->dsi->dev;
+       int ret;
+
+       if (stk->prepared)
+               return 0;
+
+       gpiod_set_value(stk->reset_gpio, 0);
+       gpiod_set_value(stk->enable_gpio, 0);
+       ret = regulator_enable(stk->supplies[IOVCC].consumer);
+       if (ret < 0)
+               return ret;
+
+       mdelay(8);
+       ret = regulator_enable(stk->supplies[POWER].consumer);
+       if (ret < 0)
+               goto iovccoff;
+
+       mdelay(20);
+       gpiod_set_value(stk->enable_gpio, 1);
+       mdelay(20);
+       gpiod_set_value(stk->reset_gpio, 1);
+       mdelay(10);
+       ret = stk_panel_init(stk);
+       if (ret < 0) {
+               dev_err(dev, "failed to init panel: %d\n", ret);
+               goto poweroff;
+       }
+
+       ret = stk_panel_on(stk);
+       if (ret < 0) {
+               dev_err(dev, "failed to set panel on: %d\n", ret);
+               goto poweroff;
+       }
+
+       stk->prepared = true;
+
+       return 0;
+
+poweroff:
+       regulator_disable(stk->supplies[POWER].consumer);
+iovccoff:
+       regulator_disable(stk->supplies[IOVCC].consumer);
+       gpiod_set_value(stk->reset_gpio, 0);
+       gpiod_set_value(stk->enable_gpio, 0);
+
+       return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+               .clock = 163204,
+               .hdisplay = 1200,
+               .hsync_start = 1200 + 144,
+               .hsync_end = 1200 + 144 + 16,
+               .htotal = 1200 + 144 + 16 + 45,
+               .vdisplay = 1920,
+               .vsync_start = 1920 + 8,
+               .vsync_end = 1920 + 8 + 4,
+               .vtotal = 1920 + 8 + 4 + 4,
+               .width_mm = 95,
+               .height_mm = 151,
+};
+
+static int stk_panel_get_modes(struct drm_panel *panel,
+                              struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(connector->dev, &default_mode);
+       if (!mode) {
+               dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
+                       default_mode.hdisplay, default_mode.vdisplay,
+                       drm_mode_vrefresh(&default_mode));
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+       drm_mode_probed_add(connector, mode);
+       connector->display_info.width_mm = default_mode.width_mm;
+       connector->display_info.height_mm = default_mode.height_mm;
+       return 1;
+}
+
+static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       int ret;
+       u16 brightness;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+       ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+       return brightness & 0xff;
+}
+
+static int dsi_dcs_bl_update_status(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       struct device *dev = &dsi->dev;
+       int ret;
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+       ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+       if (ret < 0) {
+               dev_err(dev, "failed to set DSI control: %d\n", ret);
+               return ret;
+       }
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+       return 0;
+}
+
+static const struct backlight_ops dsi_bl_ops = {
+       .update_status = dsi_dcs_bl_update_status,
+       .get_brightness = dsi_dcs_bl_get_brightness,
+};
+
+static struct backlight_device *
+drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct backlight_properties props = {
+               .type = BACKLIGHT_RAW,
+               .brightness = 255,
+               .max_brightness = 255,
+       };
+
+       return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+                                             &dsi_bl_ops, &props);
+}
+
+static const struct drm_panel_funcs stk_panel_funcs = {
+       .unprepare = stk_panel_unprepare,
+       .prepare = stk_panel_prepare,
+       .get_modes = stk_panel_get_modes,
+};
+
+static const struct of_device_id stk_of_match[] = {
+       { .compatible = "startek,kd070fhfid015", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, stk_of_match);
+
+static int stk_panel_add(struct stk_panel *stk)
+{
+       struct device *dev = &stk->dsi->dev;
+       int ret;
+
+       stk->mode = &default_mode;
+
+       stk->supplies[IOVCC].supply = "iovcc";
+       stk->supplies[POWER].supply = "power";
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(stk->supplies), stk->supplies);
+       if (ret) {
+               dev_err(dev, "regulator_bulk failed\n");
+               return ret;
+       }
+
+       stk->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(stk->reset_gpio)) {
+               ret = PTR_ERR(stk->reset_gpio);
+               dev_err(dev, "cannot get reset-gpios %d\n", ret);
+               return ret;
+       }
+
+       stk->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+       if (IS_ERR(stk->enable_gpio)) {
+               ret = PTR_ERR(stk->enable_gpio);
+               dev_err(dev, "cannot get enable-gpio %d\n", ret);
+               return ret;
+       }
+
+       stk->backlight = drm_panel_create_dsi_backlight(stk->dsi);
+       if (IS_ERR(stk->backlight)) {
+               ret = PTR_ERR(stk->backlight);
+               dev_err(dev, "failed to register backlight %d\n", ret);
+               return ret;
+       }
+
+       drm_panel_init(&stk->base, &stk->dsi->dev, &stk_panel_funcs,
+                      DRM_MODE_CONNECTOR_DSI);
+
+       drm_panel_add(&stk->base);
+
+       return 0;
+}
+
+static int stk_panel_probe(struct mipi_dsi_device *dsi)
+{
+       struct stk_panel *stk;
+       int ret;
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = (MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM);
+
+       stk = devm_kzalloc(&dsi->dev, sizeof(*stk), GFP_KERNEL);
+       if (!stk)
+               return -ENOMEM;
+
+       mipi_dsi_set_drvdata(dsi, stk);
+
+       stk->dsi = dsi;
+
+       ret = stk_panel_add(stk);
+       if (ret < 0)
+               return ret;
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0)
+               drm_panel_remove(&stk->base);
+
+       return 0;
+}
+
+static void stk_panel_remove(struct mipi_dsi_device *dsi)
+{
+       struct stk_panel *stk = mipi_dsi_get_drvdata(dsi);
+       int err;
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+                       err);
+
+       drm_panel_remove(&stk->base);
+}
+
+static struct mipi_dsi_driver stk_panel_driver = {
+       .driver = {
+               .name = "panel-startek-kd070fhfid015",
+               .of_match_table = stk_of_match,
+       },
+       .probe = stk_panel_probe,
+       .remove = stk_panel_remove,
+};
+module_mipi_dsi_driver(stk_panel_driver);
+
+MODULE_AUTHOR("Guillaume La Roque <glaroque@baylibre.com>");
+MODULE_DESCRIPTION("STARTEK KD070FHFID015");
+MODULE_LICENSE("GPL");
index b31cffb660a7720af33aea141bc7579b7a88d2cf..4f4009f9fe25ad71c4cecc72150fe914dd85dd02 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
new file mode 100644 (file)
index 0000000..00fc28a
--- /dev/null
@@ -0,0 +1,390 @@
+//SPDX-License-Identifier: GPL-2.0-only
+//Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+
+#include <video/mipi_display.h>
+
+struct visionox_r66451 {
+       struct drm_panel panel;
+       struct mipi_dsi_device *dsi;
+       struct gpio_desc *reset_gpio;
+       struct regulator_bulk_data supplies[2];
+       bool prepared, enabled;
+};
+
+static inline struct visionox_r66451 *to_visionox_r66451(struct drm_panel *panel)
+{
+       return container_of(panel, struct visionox_r66451, panel);
+}
+
+static void visionox_r66451_reset(struct visionox_r66451 *ctx)
+{
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       usleep_range(10000, 10100);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       usleep_range(10000, 10100);
+       gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+       usleep_range(10000, 10100);
+}
+
+static int visionox_r66451_on(struct visionox_r66451 *ctx)
+{
+       struct mipi_dsi_device *dsi = ctx->dsi;
+
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xc2,
+                              0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+                              0x09, 0x3c);
+       mipi_dsi_dcs_write_seq(dsi, 0xd7,
+                              0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+                              0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+                              0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
+       mipi_dsi_dcs_write_seq(dsi, 0xde,
+                              0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
+                              0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
+       mipi_dsi_dcs_write_seq(dsi, 0xe8, 0x00, 0x02);
+       mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x00, 0x08);
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xc4,
+                              0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                              0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
+       mipi_dsi_dcs_write_seq(dsi, 0xcf,
+                              0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+                              0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+                              0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
+       mipi_dsi_dcs_write_seq(dsi, 0xd3,
+                              0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
+                              0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
+                              0x3c, 0x9c);
+       mipi_dsi_dcs_write_seq(dsi, 0xd7,
+                              0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+                              0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+                              0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+       mipi_dsi_dcs_write_seq(dsi, 0xd8,
+                              0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                              0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
+                              0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                              0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+                              0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+                              0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
+       mipi_dsi_dcs_write_seq(dsi, 0xdf,
+                              0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
+                              0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                              0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
+                              0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
+                              0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x01);
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
+       mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39, 0x04, 0x09, 0x34);
+       mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
+       mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x40);
+       mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
+       mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x11);
+       mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
+       mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x00, 0x02);
+       mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x19);
+       mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42);
+       mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+       mipi_dsi_dcs_set_column_address(dsi, 0, 1080 - 1);
+       mipi_dsi_dcs_set_page_address(dsi, 0, 2340 - 1);
+
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       return 0;
+}
+
+static int visionox_r66451_off(struct visionox_r66451 *ctx)
+{
+       ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+       return 0;
+}
+
+static int visionox_r66451_prepare(struct drm_panel *panel)
+{
+       struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+       struct mipi_dsi_device *dsi = ctx->dsi;
+       struct device *dev = &dsi->dev;
+       int ret;
+
+       if (ctx->prepared)
+               return 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+                                   ctx->supplies);
+       if (ret < 0)
+               return ret;
+
+       visionox_r66451_reset(ctx);
+
+       ret = visionox_r66451_on(ctx);
+       if (ret < 0) {
+               dev_err(dev, "Failed to initialize panel: %d\n", ret);
+               gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+               regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+               return ret;
+       }
+
+       mipi_dsi_compression_mode(ctx->dsi, true);
+
+       ctx->prepared = true;
+       return 0;
+}
+
+static int visionox_r66451_unprepare(struct drm_panel *panel)
+{
+       struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+       struct device *dev = &ctx->dsi->dev;
+       int ret;
+
+       if (!ctx->prepared)
+               return 0;
+
+       ret = visionox_r66451_off(ctx);
+       if (ret < 0)
+               dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+       gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+       regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+       ctx->prepared = false;
+       return 0;
+}
+
+static const struct drm_display_mode visionox_r66451_mode = {
+       .clock = 345830,
+       .hdisplay = 1080,
+       .hsync_start = 1175,
+       .hsync_end = 1176,
+       .htotal = 1216,
+       .vdisplay = 2340,
+       .vsync_start = 2365,
+       .vsync_end = 2366,
+       .vtotal = 2370,
+       .width_mm = 0,
+       .height_mm = 0,
+       .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int visionox_r66451_enable(struct drm_panel *panel)
+{
+       struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+       struct mipi_dsi_device *dsi = ctx->dsi;
+       struct drm_dsc_picture_parameter_set pps;
+       int ret;
+
+       if (ctx->enabled)
+               return 0;
+
+       if (!dsi->dsc) {
+               dev_err(&dsi->dev, "DSC not attached to DSI\n");
+               return -ENODEV;
+       }
+
+       drm_dsc_pps_payload_pack(&pps, dsi->dsc);
+       ret = mipi_dsi_picture_parameter_set(dsi, &pps);
+       if (ret) {
+               dev_err(&dsi->dev, "Failed to set PPS\n");
+               return ret;
+       }
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
+               return ret;
+       }
+       msleep(120);
+
+       ret = mipi_dsi_dcs_set_display_on(dsi);
+       if (ret < 0) {
+               dev_err(&dsi->dev, "Failed on set display on: %d\n", ret);
+               return ret;
+       }
+       msleep(20);
+
+       ctx->enabled = true;
+
+       return 0;
+}
+
+static int visionox_r66451_disable(struct drm_panel *panel)
+{
+       struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+       struct mipi_dsi_device *dsi = ctx->dsi;
+       struct device *dev = &dsi->dev;
+       int ret;
+
+       ctx->enabled = false;
+
+       ret = mipi_dsi_dcs_set_display_off(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to set display off: %d\n", ret);
+               return ret;
+       }
+       msleep(20);
+
+       ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+               return ret;
+       }
+       msleep(120);
+
+       return 0;
+}
+
+static int visionox_r66451_get_modes(struct drm_panel *panel,
+                                   struct drm_connector *connector)
+{
+       drm_connector_helper_get_modes_fixed(connector, &visionox_r66451_mode);
+       return 1;
+}
+
+static const struct drm_panel_funcs visionox_r66451_funcs = {
+       .prepare = visionox_r66451_prepare,
+       .unprepare = visionox_r66451_unprepare,
+       .get_modes = visionox_r66451_get_modes,
+       .enable = visionox_r66451_enable,
+       .disable = visionox_r66451_disable,
+};
+
+static int visionox_r66451_bl_update_status(struct backlight_device *bl)
+{
+       struct mipi_dsi_device *dsi = bl_get_data(bl);
+       u16 brightness = backlight_get_brightness(bl);
+
+       return mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+}
+
+static const struct backlight_ops visionox_r66451_bl_ops = {
+       .update_status = visionox_r66451_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_r66451_create_backlight(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       const struct backlight_properties props = {
+               .type = BACKLIGHT_RAW,
+               .brightness = 255,
+               .max_brightness = 4095,
+       };
+
+       return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+                                             &visionox_r66451_bl_ops, &props);
+}
+
+static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
+{
+       struct device *dev = &dsi->dev;
+       struct visionox_r66451 *ctx;
+       struct drm_dsc_config *dsc;
+       int ret = 0;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
+       if (!dsc)
+               return -ENOMEM;
+
+       /* Set DSC params */
+       dsc->dsc_version_major = 0x1;
+       dsc->dsc_version_minor = 0x2;
+
+       dsc->slice_height = 20;
+       dsc->slice_width = 540;
+       dsc->slice_count = 2;
+       dsc->bits_per_component = 8;
+       dsc->bits_per_pixel = 8 << 4;
+       dsc->block_pred_enable = true;
+
+       dsi->dsc = dsc;
+
+       ctx->supplies[0].supply = "vddio";
+       ctx->supplies[1].supply = "vdd";
+
+       ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+                       ctx->supplies);
+
+       if (ret < 0)
+               return ret;
+
+       ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset_gpio))
+               return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n");
+
+       ctx->dsi = dsi;
+       mipi_dsi_set_drvdata(dsi, ctx);
+
+       dsi->lanes = 4;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+       drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
+       ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
+       if (IS_ERR(ctx->panel.backlight))
+               return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+                               "Failed to create backlight\n");
+
+       drm_panel_add(&ctx->panel);
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+               drm_panel_remove(&ctx->panel);
+       }
+
+       return ret;
+}
+
+static void visionox_r66451_remove(struct mipi_dsi_device *dsi)
+{
+       struct visionox_r66451 *ctx = mipi_dsi_get_drvdata(dsi);
+       int ret;
+
+       ret = mipi_dsi_detach(dsi);
+       if (ret < 0)
+               dev_err(&dsi->dev, "Failed to detach DSI host: %d\n", ret);
+
+       drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_r66451_of_match[] = {
+       {.compatible = "visionox,r66451"},
+       { /*sentinel*/ }
+};
+MODULE_DEVICE_TABLE(of, visionox_r66451_of_match);
+
+static struct mipi_dsi_driver visionox_r66451_driver = {
+       .probe = visionox_r66451_probe,
+       .remove = visionox_r66451_remove,
+       .driver = {
+               .name = "panel-visionox-r66451",
+               .of_match_table = visionox_r66451_of_match,
+       },
+};
+
+module_mipi_dsi_driver(visionox_r66451_driver);
+
+MODULE_AUTHOR("Jessica Zhang <quic_jesszhan@quicinc.com>");
+MODULE_DESCRIPTION("Panel driver for the Visionox R66451 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
index ec228c269146f0c5177de3a16ccbe04010ed8339..c2806e4fd553b1159a98b93673b5cc5614b6e08c 100644 (file)
@@ -5,7 +5,7 @@
 
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
 
index bbada731bbbd02ce30559ae3e4d7fc77daf8703f..a2ab99698ca80a6bc59e34bc704c6df6e2fbc4e6 100644 (file)
@@ -4,8 +4,9 @@
 /* Copyright 2019 Collabora ltd. */
 
 #include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
 #include <linux/pagemap.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <drm/panfrost_drm.h>
 #include <drm/drm_drv.h>
@@ -407,6 +408,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 
        bo = to_panfrost_bo(gem_obj);
 
+       ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
+       if (ret)
+               goto out_put_object;
+
        mutex_lock(&pfdev->shrinker_lock);
        mutex_lock(&bo->mappings.lock);
        if (args->madv == PANFROST_MADV_DONTNEED) {
@@ -444,7 +449,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 out_unlock_mappings:
        mutex_unlock(&bo->mappings.lock);
        mutex_unlock(&pfdev->shrinker_lock);
-
+       dma_resv_unlock(bo->base.base.resv);
+out_put_object:
        drm_gem_object_put(gem_obj);
        return ret;
 }
@@ -539,10 +545,7 @@ static const struct drm_driver panfrost_drm_driver = {
        .minor                  = 2,
 
        .gem_create_object      = panfrost_gem_create_object,
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
 };
 
 static int panfrost_probe(struct platform_device *pdev)
@@ -611,7 +614,7 @@ err_out0:
        return err;
 }
 
-static int panfrost_remove(struct platform_device *pdev)
+static void panfrost_remove(struct platform_device *pdev)
 {
        struct panfrost_device *pfdev = platform_get_drvdata(pdev);
        struct drm_device *ddev = pfdev->ddev;
@@ -625,7 +628,6 @@ static int panfrost_remove(struct platform_device *pdev)
        pm_runtime_set_suspended(pfdev->dev);
 
        drm_dev_put(ddev);
-       return 0;
 }
 
 /*
@@ -717,7 +719,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
 
 static struct platform_driver panfrost_driver = {
        .probe          = panfrost_probe,
-       .remove         = panfrost_remove,
+       .remove_new     = panfrost_remove,
        .driver         = {
                .name   = "panfrost",
                .pm     = pm_ptr(&panfrost_pm_ops),
index bf0170782f2585e9dc90bc2943191ec64dcb6aa5..6a71a2555f85ca0da08a72a71380a90a9fe77dbc 100644 (file)
@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
        if (!mutex_trylock(&bo->mappings.lock))
                return false;
 
-       if (!mutex_trylock(&shmem->pages_lock))
+       if (!dma_resv_trylock(shmem->base.resv))
                goto unlock_mappings;
 
        panfrost_gem_teardown_mappings_locked(bo);
-       drm_gem_shmem_purge_locked(&bo->base);
+       drm_gem_shmem_purge(&bo->base);
        ret = true;
 
-       mutex_unlock(&shmem->pages_lock);
+       dma_resv_unlock(shmem->base.resv);
 
 unlock_mappings:
        mutex_unlock(&bo->mappings.lock);
index dbc597ab46fb9f9260a2232de22d069050bdc72e..a8b4827dc4258633d13d5a0fb3d7e1b04a51a787 100644 (file)
@@ -720,6 +720,22 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
        if (dma_fence_is_signaled(job->done_fence))
                return DRM_GPU_SCHED_STAT_NOMINAL;
 
+       /*
+        * Panfrost IRQ handler may take a long time to process an interrupt
+        * if there is another IRQ handler hogging the processing.
+        * For example, the HDMI encoder driver might be stuck in the IRQ
+        * handler for a significant time in a case of bad cable connection.
+        * In order to catch such cases and not report spurious Panfrost
+        * job timeouts, synchronize the IRQ handler and re-check the fence
+        * status.
+        */
+       synchronize_irq(pfdev->js->irq);
+
+       if (dma_fence_is_signaled(job->done_fence)) {
+               dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
+               return DRM_GPU_SCHED_STAT_NOMINAL;
+       }
+
        dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
                js,
                job_read(pfdev, JS_CONFIG(js)),
index e961fa27702ce61f8675f6b63b6ef4dfd1496e05..c0123d09f699c74d6bd18b6ab0a18944268c7d4b 100644 (file)
@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
+       struct drm_gem_object *obj;
        pgoff_t page_offset;
        struct sg_table *sgt;
        struct page **pages;
@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        page_offset = addr >> PAGE_SHIFT;
        page_offset -= bomapping->mmnode.start;
 
-       mutex_lock(&bo->base.pages_lock);
+       obj = &bo->base.base;
+
+       dma_resv_lock(obj->resv, NULL);
 
        if (!bo->base.pages) {
                bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
                                     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
                if (!bo->sgts) {
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = -ENOMEM;
-                       goto err_bo;
+                       goto err_unlock;
                }
 
                pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                if (!pages) {
                        kvfree(bo->sgts);
                        bo->sgts = NULL;
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = -ENOMEM;
-                       goto err_bo;
+                       goto err_unlock;
                }
                bo->base.pages = pages;
                bo->base.pages_use_count = 1;
@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                pages = bo->base.pages;
                if (pages[page_offset]) {
                        /* Pages are already mapped, bail out. */
-                       mutex_unlock(&bo->base.pages_lock);
                        goto out;
                }
        }
@@ -502,15 +502,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
                pages[i] = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(pages[i])) {
-                       mutex_unlock(&bo->base.pages_lock);
                        ret = PTR_ERR(pages[i]);
                        pages[i] = NULL;
                        goto err_pages;
                }
        }
 
-       mutex_unlock(&bo->base.pages_lock);
-
        sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
        ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
                                        NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
@@ -529,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 
 out:
+       dma_resv_unlock(obj->resv);
+
        panfrost_gem_mapping_put(bomapping);
 
        return 0;
@@ -537,6 +536,8 @@ err_map:
        sg_free_table(sgt);
 err_pages:
        drm_gem_shmem_put_pages(&bo->base);
+err_unlock:
+       dma_resv_unlock(obj->resv);
 err_bo:
        panfrost_gem_mapping_put(bomapping);
        return ret;
index 43049c8028b21649c232cf33ddfe5681cac29644..ba3b5b5f0cdfe4267aef8c48efe3f0e64e1e8072 100644 (file)
@@ -224,10 +224,7 @@ static const struct drm_driver pl111_drm_driver = {
        .minor = 0,
        .patchlevel = 0,
        .dumb_create = drm_gem_dma_dumb_create,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = pl111_gem_import_sg_table,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = pl111_debugfs_init,
index 00c3ebd32359b975c553e1b275c7e3b64d727879..1e4b28d03f4d2df6c7bed0e365e1c9d1cb410687 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/vexpress.h>
 
index a3b83f89e0616e2b16d1651736e580bd68f84174..b30ede1cf62d329389c2becc03836b9856082e4f 100644 (file)
@@ -290,8 +290,6 @@ static struct drm_driver qxl_driver = {
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = qxl_debugfs_init,
 #endif
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
        .fops = &qxl_fops,
        .ioctls = qxl_ioctls,
index fe498c8af1bb1f2c4c0f0a0195b17528b8ca6bcb..f98356be0af288f5ebf5ec3d50138c7cbc1a385a 100644 (file)
@@ -11,7 +11,7 @@ config DRM_RADEON
        select DRM_SUBALLOC_HELPER
         select DRM_TTM
        select DRM_TTM_HELPER
-       select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+       select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
        select SND_HDA_COMPONENT if SND_HDA_CORE
        select POWER_SUPPLY
        select HWMON
index c1bbfbe28bda2ca8d36b962b7a6be2a73bf0dec6..93acb0e42bd6a327918403ad26f4c1f67f632afc 100644 (file)
@@ -68,8 +68,8 @@ typedef struct {
 } atom_exec_context;
 
 int atom_debug = 0;
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
 
 static uint32_t atom_arg_mask[8] = {
        0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
@@ -1156,7 +1156,7 @@ static struct {
        atom_op_shr, ATOM_ARG_MC}, {
 atom_op_debug, 0},};
 
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
 {
        int base = CU16(ctx->cmd_table + 4 + 2 * index);
        int len, ws, ps, ptr;
@@ -1216,7 +1216,7 @@ free:
        return ret;
 }
 
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params)
 {
        int r;
 
@@ -1237,7 +1237,7 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
        return r;
 }
 
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
 {
        int r;
        mutex_lock(&ctx->scratch_mutex);
@@ -1359,8 +1359,8 @@ void atom_destroy(struct atom_context *ctx)
 }
 
 bool atom_parse_data_header(struct atom_context *ctx, int index,
-                           uint16_t * size, uint8_t * frev, uint8_t * crev,
-                           uint16_t * data_start)
+                           uint16_t *size, uint8_t *frev, uint8_t *crev,
+                           uint16_t *data_start)
 {
        int offset = index * 2 + 4;
        int idx = CU16(ctx->data_table + offset);
@@ -1379,8 +1379,8 @@ bool atom_parse_data_header(struct atom_context *ctx, int index,
        return true;
 }
 
-bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
-                          uint8_t * crev)
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
+                          uint8_t *crev)
 {
        int offset = index * 2 + 4;
        int idx = CU16(ctx->cmd_table + offset);
index 5819737c21c678d3926ef8f9570fa05577123689..5d6b81a6578ef2ba489d58d7059229e9e960a225 100644 (file)
@@ -3603,7 +3603,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
  * @rdev: radeon_device pointer
  * @ring: radeon ring buffer object
  * @semaphore: radeon semaphore object
- * @emit_wait: Is this a sempahore wait?
+ * @emit_wait: Is this a semaphore wait?
  *
  * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
  * from running ahead of semaphore waits.
index 356219c6c7f278770cd645e9ca88c1ee660becd2..7da8418704fe4c5e78fd3eee4f60a98c57d1844b 100644 (file)
@@ -23,8 +23,7 @@
 
 #include "clearstate_defs.h"
 
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
     0x00000000, // DB_RENDER_CONTROL
     0x00000000, // DB_COUNT_CONTROL
     0x00000000, // DB_DEPTH_VIEW
index 9c1a92fa2af6dd162ac6508e210403f409232b7b..25201b9a5aae7d88faf66eaa248806517e1818e8 100644 (file)
@@ -249,7 +249,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
 
        /* Sub pixel 1/12 so we can have 4K rendering according to doc */
        gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
-       switch(rdev->num_gb_pipes) {
+       switch (rdev->num_gb_pipes) {
        case 2:
                gb_tile_config |= R300_PIPE_COUNT_R300;
                break;
@@ -638,7 +638,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        track = (struct r100_cs_track *)p->track;
        idx_value = radeon_get_ib_value(p, idx);
 
-       switch(reg) {
+       switch (reg) {
        case AVIVO_D1MODE_VLINE_START_END:
        case RADEON_CRTC_GUI_TRIG_VLINE:
                r = r100_cs_packet_parse_vline(p);
@@ -1180,7 +1180,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
        ib = p->ib.ptr;
        idx = pkt->idx + 1;
        track = (struct r100_cs_track *)p->track;
-       switch(pkt->opcode) {
+       switch (pkt->opcode) {
        case PACKET3_3D_LOAD_VBPNTR:
                r = r100_packet3_load_vbpntr(p, pkt, idx);
                if (r)
index 382795a8b3c064ba8602184d715571c5330e0e24..a17b95eec65fb81036c49c8ed9eeabadfc953f07 100644 (file)
@@ -2918,7 +2918,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
  * @rdev: radeon_device pointer
  * @ring: radeon ring buffer object
  * @semaphore: radeon semaphore object
- * @emit_wait: Is this a sempahore wait?
+ * @emit_wait: Is this a semaphore wait?
  *
  * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
  * from running ahead of semaphore waits.
index bf3c411a55c5d3aac8f9c9ee9c78b4971fd08f89..85c4bb186203c377fb35b07b6cc8c097a5d3de00 100644 (file)
@@ -1389,7 +1389,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 
                num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                        sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-               ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+               ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *)
                        ((u8 *)&ss_info->asSS_Info[0]);
                for (i = 0; i < num_indices; i++) {
                        if (ss_assign->ucSS_Id == id) {
@@ -1402,7 +1402,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
                                ss->refdiv = ss_assign->ucRecommendedRef_Div;
                                return true;
                        }
-                       ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+                       ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *)
                                ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
                }
        }
@@ -3406,7 +3406,7 @@ static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT
 {
        u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
        u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
-       u8 *start = (u8*)v2;
+       u8 *start = (u8 *)v2;
 
        while (offset < size) {
                ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
@@ -3423,7 +3423,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT
 {
        u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
        u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
-       u8 *start = (u8*)v3;
+       u8 *start = (u8 *)v3;
 
        while (offset < size) {
                ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
index d0b450a0650601ec9f92512365b97548caa1f21c..fb4d931fdf185fbe0b5259926a44f6ac8a4ff216 100644 (file)
@@ -94,6 +94,8 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
        union acpi_object atpx_arg_elements[2];
        struct acpi_object_list atpx_arg;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_device *adev = container_of(handle, struct acpi_device, handle);
+       struct device *dev = &adev->dev;
 
        atpx_arg.count = 2;
        atpx_arg.pointer = &atpx_arg_elements[0];
@@ -115,8 +117,8 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
 
        /* Fail only if calling the method fails and ATPX is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk("failed to evaluate ATPX got %s\n",
-                      acpi_format_exception(status));
+               dev_err(dev, "failed to evaluate ATPX got %s\n",
+                       acpi_format_exception(status));
                kfree(buffer.pointer);
                return NULL;
        }
@@ -157,6 +159,8 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
        u32 valid_bits = 0;
+       struct acpi_device *adev = container_of(atpx->handle, struct acpi_device, handle);
+       struct device *dev = &adev->dev;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
@@ -171,7 +175,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
 
                size = *(u16 *) info->buffer.pointer;
                if (size < 10) {
-                       printk("ATPX buffer is too small: %zu\n", size);
+                       dev_err(dev, "ATPX buffer is too small: %zu\n", size);
                        kfree(info);
                        return -EINVAL;
                }
@@ -202,7 +206,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
 
        atpx->is_hybrid = false;
        if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
-               printk("ATPX Hybrid Graphics\n");
+               dev_info(dev, "ATPX Hybrid Graphics\n");
                /*
                 * Disable legacy PM methods only when pcie port PM is usable,
                 * otherwise the device might fail to power off or power on.
@@ -239,7 +243,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
 
        size = *(u16 *) info->buffer.pointer;
        if (size < 8) {
-               printk("ATPX buffer is too small: %zu\n", size);
+               pr_err("ATPX buffer is too small: %zu\n", size);
                err = -EINVAL;
                goto out;
        }
@@ -248,8 +252,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
        memcpy(&output, info->buffer.pointer, size);
 
        /* TODO: check version? */
-       printk("ATPX version %u, functions 0x%08x\n",
-              output.version, output.function_bits);
+       pr_info("ATPX version %u, functions 0x%08x\n",
+               output.version, output.function_bits);
 
        radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
 
index 07193cd0c417468ae3defcd9b354b1e607e5eb62..d2f02c3dfce29780dc1ddda5f718ade98b7ebe90 100644 (file)
@@ -198,8 +198,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
                                DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
                                                  connector->name, bpc);
                        }
-               }
-               else if (bpc > 8) {
+               } else if (bpc > 8) {
                        /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
                        DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
                                          connector->name);
@@ -334,10 +333,8 @@ static void radeon_connector_free_edid(struct drm_connector *connector)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
-       if (radeon_connector->edid) {
-               kfree(radeon_connector->edid);
-               radeon_connector->edid = NULL;
-       }
+       kfree(radeon_connector->edid);
+       radeon_connector->edid = NULL;
 }
 
 static int radeon_ddc_get_modes(struct drm_connector *connector)
@@ -1372,7 +1369,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
                                        /* assume digital unless load detected otherwise */
                                        radeon_connector->use_digital = true;
                                        lret = encoder_funcs->detect(encoder, connector);
-                                       DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+                                       DRM_DEBUG_KMS("load_detect %x returned: %x\n", encoder->encoder_type, lret);
                                        if (lret == connector_status_connected)
                                                radeon_connector->use_digital = false;
                                }
index e4374814f0ef6f36399a4b0647e47d467e4ca2bb..fa531493b11134caabb8967ab1581427f3873ef2 100644 (file)
 #define KMS_DRIVER_MAJOR       2
 #define KMS_DRIVER_MINOR       50
 #define KMS_DRIVER_PATCHLEVEL  0
-int radeon_suspend_kms(struct drm_device *dev, bool suspend,
-                      bool fbcon, bool freeze);
-int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
-                                     unsigned int flags, int *vpos, int *hpos,
-                                     ktime_t *stime, ktime_t *etime,
-                                     const struct drm_display_mode *mode);
-extern bool radeon_is_px(struct drm_device *dev);
-int radeon_mode_dumb_mmap(struct drm_file *filp,
-                         struct drm_device *dev,
-                         uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_create(struct drm_file *file_priv,
-                           struct drm_device *dev,
-                           struct drm_mode_create_dumb *args);
-
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-bool radeon_has_atpx_dgpu_power_cntl(void);
-bool radeon_is_atpx_hybrid(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-static inline bool radeon_is_atpx_hybrid(void) { return false; }
-#endif
 
 int radeon_no_wb;
 int radeon_modeset = -1;
 int radeon_dynclks = -1;
-int radeon_r4xx_atom = 0;
+int radeon_r4xx_atom;
 int radeon_agpmode = -1;
-int radeon_vram_limit = 0;
+int radeon_vram_limit;
 int radeon_gart_size = -1; /* auto */
-int radeon_benchmarking = 0;
-int radeon_testing = 0;
-int radeon_connector_table = 0;
+int radeon_benchmarking;
+int radeon_testing;
+int radeon_connector_table;
 int radeon_tv = 1;
 int radeon_audio = -1;
-int radeon_disp_priority = 0;
-int radeon_hw_i2c = 0;
+int radeon_disp_priority;
+int radeon_hw_i2c;
 int radeon_pcie_gen2 = -1;
 int radeon_msi = -1;
 int radeon_lockup_timeout = 10000;
-int radeon_fastfb = 0;
+int radeon_fastfb;
 int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
-int radeon_hard_reset = 0;
+int radeon_hard_reset;
 int radeon_vm_size = 8;
 int radeon_vm_block_size = -1;
-int radeon_deep_color = 0;
+int radeon_deep_color;
 int radeon_use_pflipirq = 2;
 int radeon_bapm = -1;
 int radeon_backlight = -1;
@@ -384,6 +357,7 @@ radeon_pci_shutdown(struct pci_dev *pdev)
 static int radeon_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+
        return radeon_suspend_kms(drm_dev, true, true, false);
 }
 
@@ -404,12 +378,14 @@ static int radeon_pmops_resume(struct device *dev)
 static int radeon_pmops_freeze(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+
        return radeon_suspend_kms(drm_dev, false, true, true);
 }
 
 static int radeon_pmops_thaw(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+
        return radeon_resume_kms(drm_dev, false, true);
 }
 
@@ -494,6 +470,7 @@ long radeon_drm_ioctl(struct file *filp,
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
        long ret;
+
        dev = file_priv->minor->dev;
        ret = pm_runtime_get_sync(dev->dev);
        if (ret < 0) {
@@ -604,10 +581,7 @@ static const struct drm_driver kms_driver = {
        .dumb_map_offset = radeon_mode_dumb_mmap,
        .fops = &radeon_driver_kms_fops,
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
index 2ffe0975ee543ffaaa5ccfb42f952a56c0675d62..34a1c73d3938f1b092d6afa09536a561b8604b70 100644 (file)
@@ -124,4 +124,17 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
 void radeon_driver_postclose_kms(struct drm_device *dev,
                                 struct drm_file *file_priv);
 
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
 #endif                         /* __RADEON_DRV_H__ */
index b3518a8f95a072bd59fe86788143b0b466c69ce4..9cb6401fe97ed37fbbc5adeb648c22206514542b 100644 (file)
@@ -58,6 +58,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
        count = -1;
        list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
                struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+
                count++;
 
                if (clone_encoder == encoder)
@@ -108,9 +109,10 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
                        if (ASIC_IS_AVIVO(rdev))
                                ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
                        else {
-                               /*if (rdev->family == CHIP_R200)
-                                 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
-                                 else*/
+                               /* if (rdev->family == CHIP_R200)
+                                * ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+                                * else
+                                */
                                ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
                        }
                        break;
@@ -234,6 +236,7 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder)
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder) {
                        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
                        radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
                        DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
                                  radeon_encoder->active_device, radeon_encoder->devices,
@@ -320,12 +323,12 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-       unsigned hblank = native_mode->htotal - native_mode->hdisplay;
-       unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
-       unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
-       unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
-       unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
-       unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+       unsigned int hblank = native_mode->htotal - native_mode->hdisplay;
+       unsigned int vblank = native_mode->vtotal - native_mode->vdisplay;
+       unsigned int hover = native_mode->hsync_start - native_mode->hdisplay;
+       unsigned int vover = native_mode->vsync_start - native_mode->vdisplay;
+       unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+       unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start;
 
        adjusted_mode->clock = native_mode->clock;
        adjusted_mode->flags = native_mode->flags;
@@ -424,6 +427,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
 bool radeon_encoder_is_digital(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
        switch (radeon_encoder->encoder_id) {
        case ENCODER_OBJECT_ID_INTERNAL_LVDS:
        case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
index ab9c1abbac979e50934f8a28c8341877aaf19e28..02bf25759059a7b715f08a777307266bdbaccab7 100644 (file)
@@ -193,7 +193,7 @@ static const struct fb_ops radeon_fbdev_fb_ops = {
        .owner = THIS_MODULE,
        .fb_open = radeon_fbdev_fb_open,
        .fb_release = radeon_fbdev_fb_release,
-       FB_DEFAULT_IO_OPS,
+       FB_DEFAULT_IOMEM_OPS,
        DRM_FB_HELPER_DEFAULT_OPS,
        .fb_destroy = radeon_fbdev_fb_destroy,
 };
@@ -253,7 +253,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
        }
 
        info->fbops = &radeon_fbdev_fb_ops;
-       info->flags = FBINFO_DEFAULT;
+
        /* radeon resume is fragile and needs a vt switch to help it along */
        info->skip_vt_switch = false;
 
@@ -383,10 +383,6 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
                goto err_drm_client_init;
        }
 
-       ret = radeon_fbdev_client_hotplug(&fb_helper->client);
-       if (ret)
-               drm_dbg_kms(rdev->ddev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&fb_helper->client);
 
        return;
index 04109a2a6fd760f10ac220bd3ff08f6b10afba3d..4bb242437ff607c5e0058aaca221426c88516be4 100644 (file)
@@ -74,9 +74,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
 
        ptr = dma_alloc_coherent(&rdev->pdev->dev, rdev->gart.table_size,
                                 &rdev->gart.table_addr, GFP_KERNEL);
-       if (ptr == NULL) {
+       if (!ptr)
                return -ENOMEM;
-       }
+
 #ifdef CONFIG_X86
        if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
            rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
@@ -99,9 +99,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
  */
 void radeon_gart_table_ram_free(struct radeon_device *rdev)
 {
-       if (rdev->gart.ptr == NULL) {
+       if (!rdev->gart.ptr)
                return;
-       }
+
 #ifdef CONFIG_X86
        if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
            rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
@@ -133,9 +133,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
                r = radeon_bo_create(rdev, rdev->gart.table_size,
                                     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
                                     0, NULL, NULL, &rdev->gart.robj);
-               if (r) {
+               if (r)
                        return r;
-               }
        }
        return 0;
 }
@@ -197,9 +196,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
 {
        int r;
 
-       if (rdev->gart.robj == NULL) {
+       if (!rdev->gart.robj)
                return;
-       }
+
        r = radeon_bo_reserve(rdev->gart.robj, false);
        if (likely(r == 0)) {
                radeon_bo_kunmap(rdev->gart.robj);
@@ -220,9 +219,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
  */
 void radeon_gart_table_vram_free(struct radeon_device *rdev)
 {
-       if (rdev->gart.robj == NULL) {
+       if (!rdev->gart.robj)
                return;
-       }
+
        radeon_bo_unref(&rdev->gart.robj);
 }
 
@@ -239,11 +238,10 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
  * Unbinds the requested pages from the gart page table and
  * replaces them with the dummy page (all asics).
  */
-void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned int offset,
                        int pages)
 {
-       unsigned t;
-       unsigned p;
+       unsigned int t, p;
        int i, j;
 
        if (!rdev->gart.ready) {
@@ -284,12 +282,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+int radeon_gart_bind(struct radeon_device *rdev, unsigned int offset,
                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint32_t flags)
 {
-       unsigned t;
-       unsigned p;
+       unsigned int t, p;
        uint64_t page_base, page_entry;
        int i, j;
 
@@ -307,9 +304,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
                for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
                        page_entry = radeon_gart_get_page_entry(page_base, flags);
                        rdev->gart.pages_entry[t] = page_entry;
-                       if (rdev->gart.ptr) {
+                       if (rdev->gart.ptr)
                                radeon_gart_set_page(rdev, t, page_entry);
-                       }
+
                        page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
@@ -332,9 +329,9 @@ int radeon_gart_init(struct radeon_device *rdev)
 {
        int r, i;
 
-       if (rdev->gart.pages) {
+       if (rdev->gart.pages)
                return 0;
-       }
+
        /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
        if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
                DRM_ERROR("Page size is smaller than GPU page size!\n");
index d0119c5f7eb3e6eb95ed820df362b0103a298294..358d19242f4ba290fcd199106411bd394217730d 100644 (file)
@@ -316,7 +316,7 @@ int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
 {
        /* TODO: implement */
        DRM_ERROR("unimplemented %s\n", __func__);
-       return -ENOSYS;
+       return -EOPNOTSUPP;
 }
 
 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
@@ -324,7 +324,7 @@ int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 {
        /* TODO: implement */
        DRM_ERROR("unimplemented %s\n", __func__);
-       return -ENOSYS;
+       return -EOPNOTSUPP;
 }
 
 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
index e0214cf1b43b50b3f993af6b5a41fbb4b09c7f91..a16590c6247fa36561b958b872303b461fe6af08 100644 (file)
@@ -444,7 +444,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
                        return -EINVAL;
                }
-               value = (uint32_t*)&value64;
+               value = (uint32_t *)&value64;
                value_size = sizeof(uint64_t);
                value64 = radeon_get_gpu_clock_counter(rdev);
                break;
@@ -543,18 +543,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                *value = rdev->vce.fb_version;
                break;
        case RADEON_INFO_NUM_BYTES_MOVED:
-               value = (uint32_t*)&value64;
+               value = (uint32_t *)&value64;
                value_size = sizeof(uint64_t);
                value64 = atomic64_read(&rdev->num_bytes_moved);
                break;
        case RADEON_INFO_VRAM_USAGE:
-               value = (uint32_t*)&value64;
+               value = (uint32_t *)&value64;
                value_size = sizeof(uint64_t);
                man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
                value64 = ttm_resource_manager_usage(man);
                break;
        case RADEON_INFO_GTT_USAGE:
-               value = (uint32_t*)&value64;
+               value = (uint32_t *)&value64;
                value_size = sizeof(uint64_t);
                man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
                value64 = ttm_resource_manager_usage(man);
@@ -614,7 +614,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
        }
-       if (copy_to_user(value_ptr, (char*)value, value_size)) {
+       if (copy_to_user(value_ptr, (char *)value, value_size)) {
                DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
                return -EFAULT;
        }
index 12e180b119ac999243f6195f2b9a6aaa3fc5f66a..7883e9ec0baeb7dc26409daed8f51303f4263c66 100644 (file)
@@ -724,12 +724,14 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
        }
 
        for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
-               if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+               tv_dac->tv.h_code_timing[i] = hor_timing[i];
+               if (tv_dac->tv.h_code_timing[i] == 0)
                        break;
        }
 
        for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
-               if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+               tv_dac->tv.v_code_timing[i] = vert_timing[i];
+               if (tv_dac->tv.v_code_timing[i] == 0)
                        break;
        }
 
index a5e1d2139e802bfa7c2f6e5694d965634f95ff11..c9fef9b61ced6d9c02ee793d98180da450303a8f 100644 (file)
@@ -156,10 +156,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                                          i, *vram_start, gtt_start,
                                          (unsigned long long)
                                          (gtt_addr - rdev->mc.gtt_start +
-                                          (void*)gtt_start - gtt_map),
+                                          (void *)gtt_start - gtt_map),
                                          (unsigned long long)
                                          (vram_addr - rdev->mc.vram_start +
-                                          (void*)gtt_start - gtt_map));
+                                          (void *)gtt_start - gtt_map));
                                radeon_bo_kunmap(vram_obj);
                                goto out_lclean_unpin;
                        }
@@ -207,10 +207,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                                          i, *gtt_start, vram_start,
                                          (unsigned long long)
                                          (vram_addr - rdev->mc.vram_start +
-                                          (void*)vram_start - vram_map),
+                                          (void *)vram_start - vram_map),
                                          (unsigned long long)
                                          (gtt_addr - rdev->mc.gtt_start +
-                                          (void*)vram_start - vram_map));
+                                          (void *)vram_start - vram_map));
                                radeon_bo_kunmap(gtt_obj[i]);
                                goto out_lclean_unpin;
                        }
index ca4a36464340f4d6c9dbc4ca23e7db7ad0aa1c40..d1871af967d4afabddd63aae4b7f5e4bf45cda7c 100644 (file)
@@ -95,7 +95,7 @@ int radeon_vce_init(struct radeon_device *rdev)
 
        size = rdev->vce_fw->size - strlen(fw_version) - 9;
        c = rdev->vce_fw->data;
-       for (;size > 0; --size, ++c)
+       for (; size > 0; --size, ++c)
                if (strncmp(c, fw_version, strlen(fw_version)) == 0)
                        break;
 
@@ -110,7 +110,7 @@ int radeon_vce_init(struct radeon_device *rdev)
 
        size = rdev->vce_fw->size - strlen(fb_version) - 3;
        c = rdev->vce_fw->data;
-       for (;size > 0; --size, ++c)
+       for (; size > 0; --size, ++c)
                if (strncmp(c, fb_version, strlen(fb_version)) == 0)
                        break;
 
index 26fa9b0955140c993791e14bf89793efba7e64e1..9ce12fa3c3568340188f0fa2f7ef9d6c190d3e48 100644 (file)
@@ -136,8 +136,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
        return 0;
 }
 
-static const u32 r7xx_golden_registers[] =
-{
+static const u32 r7xx_golden_registers[] = {
        0x8d00, 0xffffffff, 0x0e0e0074,
        0x8d04, 0xffffffff, 0x013a2b34,
        0x9508, 0xffffffff, 0x00000002,
@@ -152,8 +151,7 @@ static const u32 r7xx_golden_registers[] =
        0x7300, 0xffffffff, 0x001000f0
 };
 
-static const u32 r7xx_golden_dyn_gpr_registers[] =
-{
+static const u32 r7xx_golden_dyn_gpr_registers[] = {
        0x8db0, 0xffffffff, 0x98989898,
        0x8db4, 0xffffffff, 0x98989898,
        0x8db8, 0xffffffff, 0x98989898,
@@ -165,8 +163,7 @@ static const u32 r7xx_golden_dyn_gpr_registers[] =
        0x88c4, 0xffffffff, 0x00000082
 };
 
-static const u32 rv770_golden_registers[] =
-{
+static const u32 rv770_golden_registers[] = {
        0x562c, 0xffffffff, 0,
        0x3f90, 0xffffffff, 0,
        0x9148, 0xffffffff, 0,
@@ -175,8 +172,7 @@ static const u32 rv770_golden_registers[] =
        0x9698, 0x18000000, 0x18000000
 };
 
-static const u32 rv770ce_golden_registers[] =
-{
+static const u32 rv770ce_golden_registers[] = {
        0x562c, 0xffffffff, 0,
        0x3f90, 0xffffffff, 0x00cc0000,
        0x9148, 0xffffffff, 0x00cc0000,
@@ -187,8 +183,7 @@ static const u32 rv770ce_golden_registers[] =
        0x9698, 0x18000000, 0x18000000
 };
 
-static const u32 rv770_mgcg_init[] =
-{
+static const u32 rv770_mgcg_init[] = {
        0x8bcc, 0xffffffff, 0x130300f9,
        0x5448, 0xffffffff, 0x100,
        0x55e4, 0xffffffff, 0x100,
@@ -345,8 +340,7 @@ static const u32 rv770_mgcg_init[] =
        0x92a4, 0xffffffff, 0x00080007
 };
 
-static const u32 rv710_golden_registers[] =
-{
+static const u32 rv710_golden_registers[] = {
        0x3f90, 0x00ff0000, 0x00fc0000,
        0x9148, 0x00ff0000, 0x00fc0000,
        0x3f94, 0x00ff0000, 0x00fc0000,
@@ -355,8 +349,7 @@ static const u32 rv710_golden_registers[] =
        0xa180, 0xffffffff, 0x00003f3f
 };
 
-static const u32 rv710_mgcg_init[] =
-{
+static const u32 rv710_mgcg_init[] = {
        0x8bcc, 0xffffffff, 0x13030040,
        0x5448, 0xffffffff, 0x100,
        0x55e4, 0xffffffff, 0x100,
@@ -414,8 +407,7 @@ static const u32 rv710_mgcg_init[] =
        0x9150, 0xffffffff, 0x4d940000
 };
 
-static const u32 rv730_golden_registers[] =
-{
+static const u32 rv730_golden_registers[] = {
        0x3f90, 0x00ff0000, 0x00f00000,
        0x9148, 0x00ff0000, 0x00f00000,
        0x3f94, 0x00ff0000, 0x00f00000,
@@ -425,8 +417,7 @@ static const u32 rv730_golden_registers[] =
        0xa180, 0xffffffff, 0x00003f3f
 };
 
-static const u32 rv730_mgcg_init[] =
-{
+static const u32 rv730_mgcg_init[] = {
        0x8bcc, 0xffffffff, 0x130300f9,
        0x5448, 0xffffffff, 0x100,
        0x55e4, 0xffffffff, 0x100,
@@ -547,8 +538,7 @@ static const u32 rv730_mgcg_init[] =
        0x92a4, 0xffffffff, 0x00000005
 };
 
-static const u32 rv740_golden_registers[] =
-{
+static const u32 rv740_golden_registers[] = {
        0x88c4, 0xffffffff, 0x00000082,
        0x28a50, 0xfffffffc, 0x00000004,
        0x2650, 0x00040000, 0,
@@ -584,8 +574,7 @@ static const u32 rv740_golden_registers[] =
        0x9698, 0x18000000, 0x18000000
 };
 
-static const u32 rv740_mgcg_init[] =
-{
+static const u32 rv740_mgcg_init[] = {
        0x8bcc, 0xffffffff, 0x13030100,
        0x5448, 0xffffffff, 0x100,
        0x55e4, 0xffffffff, 0x100,
index 45575c0d0a1d71d37f364deb5bdba903190d3ec2..09fa7f5e7c410f6a4eeec11423b6fc7ef499ef58 100644 (file)
@@ -34,8 +34,7 @@
 #define FIRST_SMC_INT_VECT_REG 0xFFD8
 #define FIRST_INT_VECT_S19     0xFFC0
 
-static const u8 rv770_smc_int_vectors[] =
-{
+static const u8 rv770_smc_int_vectors[] = {
        0x08, 0x10, 0x08, 0x10,
        0x08, 0x10, 0x08, 0x10,
        0x08, 0x10, 0x08, 0x10,
@@ -54,8 +53,7 @@ static const u8 rv770_smc_int_vectors[] =
        0x03, 0x51, 0x03, 0x51
 };
 
-static const u8 rv730_smc_int_vectors[] =
-{
+static const u8 rv730_smc_int_vectors[] = {
        0x08, 0x15, 0x08, 0x15,
        0x08, 0x15, 0x08, 0x15,
        0x08, 0x15, 0x08, 0x15,
@@ -74,8 +72,7 @@ static const u8 rv730_smc_int_vectors[] =
        0x03, 0x56, 0x03, 0x56
 };
 
-static const u8 rv710_smc_int_vectors[] =
-{
+static const u8 rv710_smc_int_vectors[] = {
        0x08, 0x04, 0x08, 0x04,
        0x08, 0x04, 0x08, 0x04,
        0x08, 0x04, 0x08, 0x04,
@@ -94,8 +91,7 @@ static const u8 rv710_smc_int_vectors[] =
        0x03, 0x51, 0x03, 0x51
 };
 
-static const u8 rv740_smc_int_vectors[] =
-{
+static const u8 rv740_smc_int_vectors[] = {
        0x08, 0x10, 0x08, 0x10,
        0x08, 0x10, 0x08, 0x10,
        0x08, 0x10, 0x08, 0x10,
@@ -114,8 +110,7 @@ static const u8 rv740_smc_int_vectors[] =
        0x03, 0x51, 0x03, 0x51
 };
 
-static const u8 cedar_smc_int_vectors[] =
-{
+static const u8 cedar_smc_int_vectors[] = {
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
@@ -134,8 +129,7 @@ static const u8 cedar_smc_int_vectors[] =
        0x04, 0xF6, 0x04, 0xF6
 };
 
-static const u8 redwood_smc_int_vectors[] =
-{
+static const u8 redwood_smc_int_vectors[] = {
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
@@ -154,8 +148,7 @@ static const u8 redwood_smc_int_vectors[] =
        0x04, 0xF6, 0x04, 0xF6
 };
 
-static const u8 juniper_smc_int_vectors[] =
-{
+static const u8 juniper_smc_int_vectors[] = {
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
@@ -174,8 +167,7 @@ static const u8 juniper_smc_int_vectors[] =
        0x04, 0xF6, 0x04, 0xF6
 };
 
-static const u8 cypress_smc_int_vectors[] =
-{
+static const u8 cypress_smc_int_vectors[] = {
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
        0x0B, 0x05, 0x0B, 0x05,
@@ -194,8 +186,7 @@ static const u8 cypress_smc_int_vectors[] =
        0x04, 0xF6, 0x04, 0xF6
 };
 
-static const u8 barts_smc_int_vectors[] =
-{
+static const u8 barts_smc_int_vectors[] = {
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
@@ -214,8 +205,7 @@ static const u8 barts_smc_int_vectors[] =
        0x05, 0x0A, 0x05, 0x0A
 };
 
-static const u8 turks_smc_int_vectors[] =
-{
+static const u8 turks_smc_int_vectors[] = {
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
@@ -234,8 +224,7 @@ static const u8 turks_smc_int_vectors[] =
        0x05, 0x0A, 0x05, 0x0A
 };
 
-static const u8 caicos_smc_int_vectors[] =
-{
+static const u8 caicos_smc_int_vectors[] = {
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
        0x0C, 0x14, 0x0C, 0x14,
@@ -254,8 +243,7 @@ static const u8 caicos_smc_int_vectors[] =
        0x05, 0x0A, 0x05, 0x0A
 };
 
-static const u8 cayman_smc_int_vectors[] =
-{
+static const u8 cayman_smc_int_vectors[] = {
        0x12, 0x05, 0x12, 0x05,
        0x12, 0x05, 0x12, 0x05,
        0x12, 0x05, 0x12, 0x05,
index 4ea1cb2e45a3ca98eaa1c544008a0c598a7d2e67..4b7dee3cf58b1ee9f0d0226dff36673b4036fac5 100644 (file)
@@ -89,8 +89,7 @@ struct PP_SIslands_PAPMStatus
 };
 typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
 
-struct PP_SIslands_PAPMParameters
-{
+struct PP_SIslands_PAPMParameters {
     uint32_t    NearTDPLimitTherm;
     uint32_t    NearTDPLimitPAPM;
     uint32_t    PlatformPowerLimit;
@@ -100,8 +99,7 @@ struct PP_SIslands_PAPMParameters
 };
 typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
 
-struct SISLANDS_SMC_SCLK_VALUE
-{
+struct SISLANDS_SMC_SCLK_VALUE {
     uint32_t    vCG_SPLL_FUNC_CNTL;
     uint32_t    vCG_SPLL_FUNC_CNTL_2;
     uint32_t    vCG_SPLL_FUNC_CNTL_3;
@@ -113,8 +111,7 @@ struct SISLANDS_SMC_SCLK_VALUE
 
 typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
 
-struct SISLANDS_SMC_MCLK_VALUE
-{
+struct SISLANDS_SMC_MCLK_VALUE {
     uint32_t    vMPLL_FUNC_CNTL;
     uint32_t    vMPLL_FUNC_CNTL_1;
     uint32_t    vMPLL_FUNC_CNTL_2;
@@ -129,8 +126,7 @@ struct SISLANDS_SMC_MCLK_VALUE
 
 typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
 
-struct SISLANDS_SMC_VOLTAGE_VALUE
-{
+struct SISLANDS_SMC_VOLTAGE_VALUE {
     uint16_t    value;
     uint8_t     index;
     uint8_t     phase_settings;
@@ -138,8 +134,7 @@ struct SISLANDS_SMC_VOLTAGE_VALUE
 
 typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
 
-struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL {
     uint8_t                     ACIndex;
     uint8_t                     displayWatermark;
     uint8_t                     gen2PCIE;
@@ -180,8 +175,7 @@ struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
 
 typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
 
-struct SISLANDS_SMC_SWSTATE
-{
+struct SISLANDS_SMC_SWSTATE {
        uint8_t                             flags;
        uint8_t                             levelCount;
        uint8_t                             padding2;
@@ -205,8 +199,7 @@ struct SISLANDS_SMC_SWSTATE_SINGLE {
 #define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
 #define SISLANDS_SMC_VOLTAGEMASK_MAX   4
 
-struct SISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct SISLANDS_SMC_VOLTAGEMASKTABLE {
     uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
 };
 
@@ -214,8 +207,7 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
 
 #define SISLANDS_MAX_NO_VREG_STEPS 32
 
-struct SISLANDS_SMC_STATETABLE
-{
+struct SISLANDS_SMC_STATETABLE {
        uint8_t                                 thermalProtectType;
        uint8_t                                 systemFlags;
        uint8_t                                 maxVDDCIndexInPPTable;
@@ -254,8 +246,7 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
 #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
 #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
 
-struct PP_SIslands_FanTable
-{
+struct PP_SIslands_FanTable {
        uint8_t  fdo_mode;
        uint8_t  padding;
        int16_t  temp_min;
@@ -285,8 +276,7 @@ typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
 #define SMC_SISLANDS_SCALE_I  7
 #define SMC_SISLANDS_SCALE_R 12
 
-struct PP_SIslands_CacConfig
-{
+struct PP_SIslands_CacConfig {
     uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
     uint32_t   lkge_lut_V0;
     uint32_t   lkge_lut_Vstep;
@@ -308,23 +298,20 @@ typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
 #define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
 #define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
 
-struct SMC_SIslands_MCRegisterAddress
-{
+struct SMC_SIslands_MCRegisterAddress {
     uint16_t s0;
     uint16_t s1;
 };
 
 typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
 
-struct SMC_SIslands_MCRegisterSet
-{
+struct SMC_SIslands_MCRegisterSet {
     uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
 };
 
 typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
 
-struct SMC_SIslands_MCRegisters
-{
+struct SMC_SIslands_MCRegisters {
     uint8_t                             last;
     uint8_t                             reserved[3];
     SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -333,8 +320,7 @@ struct SMC_SIslands_MCRegisters
 
 typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
 
-struct SMC_SIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_SIslands_MCArbDramTimingRegisterSet {
     uint32_t mc_arb_dram_timing;
     uint32_t mc_arb_dram_timing2;
     uint8_t  mc_arb_rfsh_rate;
@@ -344,8 +330,7 @@ struct SMC_SIslands_MCArbDramTimingRegisterSet
 
 typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
 
-struct SMC_SIslands_MCArbDramTimingRegisters
-{
+struct SMC_SIslands_MCArbDramTimingRegisters {
     uint8_t                                     arb_current;
     uint8_t                                     reserved[3];
     SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
@@ -353,8 +338,7 @@ struct SMC_SIslands_MCArbDramTimingRegisters
 
 typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
 
-struct SMC_SISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_SISLANDS_SPLL_DIV_TABLE {
     uint32_t    freq[256];
     uint32_t    ss[256];
 };
@@ -374,8 +358,7 @@ typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
 
 #define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
 
-struct Smc_SIslands_DTE_Configuration
-{
+struct Smc_SIslands_DTE_Configuration {
     uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
     uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
     uint32_t K;
index e2a67dda46584d9627edfd83b3ad0c0cfa9ecf38..26a2f5ad8ee5d4ecc8cb6698e16e373a5045bf3d 100644 (file)
@@ -187,11 +187,9 @@ static int rcar_cmm_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int rcar_cmm_remove(struct platform_device *pdev)
+static void rcar_cmm_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
-
-       return 0;
 }
 
 static const struct of_device_id rcar_cmm_of_table[] = {
@@ -203,7 +201,7 @@ MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
 
 static struct platform_driver rcar_cmm_platform_driver = {
        .probe          = rcar_cmm_probe,
-       .remove         = rcar_cmm_remove,
+       .remove_new     = rcar_cmm_remove,
        .driver         = {
                .name   = "rcar-cmm",
                .of_match_table = rcar_cmm_of_table,
index 1ffde19cb87fe1fb672e4e67586ecc5cb8138db5..1dd722fe8631aa9ffd628fdfc049ea28fc7e01a4 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
@@ -605,10 +605,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
 static const struct drm_driver rcar_du_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .dumb_create            = rcar_du_dumb_create,
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
        .fops                   = &rcar_du_fops,
        .name                   = "rcar-du",
        .desc                   = "Renesas R-Car Display Unit",
@@ -642,7 +639,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(rcar_du_pm_ops,
  * Platform driver
  */
 
-static int rcar_du_remove(struct platform_device *pdev)
+static void rcar_du_remove(struct platform_device *pdev)
 {
        struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
        struct drm_device *ddev = &rcdu->ddev;
@@ -651,8 +648,6 @@ static int rcar_du_remove(struct platform_device *pdev)
        drm_atomic_helper_shutdown(ddev);
 
        drm_kms_helper_poll_fini(ddev);
-
-       return 0;
 }
 
 static void rcar_du_shutdown(struct platform_device *pdev)
@@ -728,7 +723,7 @@ error:
 
 static struct platform_driver rcar_du_platform_driver = {
        .probe          = rcar_du_probe,
-       .remove         = rcar_du_remove,
+       .remove_new     = rcar_du_remove,
        .shutdown       = rcar_du_shutdown,
        .driver         = {
                .name   = "rcar-du",
index adfb36b0e8154546b765e2551f0b09c8d525ac75..9ff4537c26c868ab853ff094722c09ab936e8b8b 100644 (file)
 
 #include <linux/device.h>
 #include <linux/dma-buf.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/wait.h>
 
 #include "rcar_du_crtc.h"
index d759e019218181ce06d052357a450f33e8d04d9a..e445fac8e0b46c21b28d4b01dc1fb7bfd5db4a38 100644 (file)
@@ -600,7 +600,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
        if (!state->crtc) {
                /*
                 * The visible field is not reset by the DRM core but only
-                * updated by drm_plane_helper_check_state(), set it manually.
+                * updated by drm_atomic_helper_check_plane_state(), set it
+                * manually.
                 */
                state->visible = false;
                *format = NULL;
index 45c05d0ffc70f9b0d577733ae8be8aafbff3ba0a..9cbb5e6e2cba7321ef85816b3c0e4fe541fdf584 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/dma-mapping.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/videodev2.h>
index 18ed14911b981b7869dc5e54e1f5a8f9540ffd33..119d69d20b230184c01d5b3fe38a6ac5a2b65375 100644 (file)
@@ -93,13 +93,11 @@ static int rcar_dw_hdmi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int rcar_dw_hdmi_remove(struct platform_device *pdev)
+static void rcar_dw_hdmi_remove(struct platform_device *pdev)
 {
        struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
 
        dw_hdmi_remove(hdmi);
-
-       return 0;
 }
 
 static const struct of_device_id rcar_dw_hdmi_of_table[] = {
@@ -110,7 +108,7 @@ MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
 
 static struct platform_driver rcar_dw_hdmi_platform_driver = {
        .probe          = rcar_dw_hdmi_probe,
-       .remove         = rcar_dw_hdmi_remove,
+       .remove_new     = rcar_dw_hdmi_remove,
        .driver         = {
                .name   = "rcar-dw-hdmi",
                .of_match_table = rcar_dw_hdmi_of_table,
index ca215b588fd7e0917921af3b1187fc772b245baa..92ba43a6fe38753dcbcaff277e8b86ea83d000e2 100644 (file)
@@ -918,15 +918,13 @@ static int rcar_lvds_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int rcar_lvds_remove(struct platform_device *pdev)
+static void rcar_lvds_remove(struct platform_device *pdev)
 {
        struct rcar_lvds *lvds = platform_get_drvdata(pdev);
 
        drm_bridge_remove(&lvds->bridge);
 
        pm_runtime_disable(&pdev->dev);
-
-       return 0;
 }
 
 static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
@@ -1020,7 +1018,7 @@ static const struct dev_pm_ops rcar_lvds_pm_ops = {
 
 static struct platform_driver rcar_lvds_platform_driver = {
        .probe          = rcar_lvds_probe,
-       .remove         = rcar_lvds_remove,
+       .remove_new     = rcar_lvds_remove,
        .driver         = {
                .name   = "rcar-lvds",
                .pm     = &rcar_lvds_pm_ops,
index e10e4d4b89a22bd48e16d0e5120749b6b7d9c46e..586c5c4ebb1430ed8c90a503ae05099e49c3fde5 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
@@ -1044,13 +1043,11 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int rcar_mipi_dsi_remove(struct platform_device *pdev)
+static void rcar_mipi_dsi_remove(struct platform_device *pdev)
 {
        struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev);
 
        mipi_dsi_host_unregister(&dsi->host);
-
-       return 0;
 }
 
 static const struct rcar_mipi_dsi_device_info v3u_data = {
@@ -1093,7 +1090,7 @@ MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
 
 static struct platform_driver rcar_mipi_dsi_platform_driver = {
        .probe          = rcar_mipi_dsi_probe,
-       .remove         = rcar_mipi_dsi_remove,
+       .remove_new     = rcar_mipi_dsi_remove,
        .driver         = {
                .name   = "rcar-mipi-dsi",
                .of_match_table = rcar_mipi_dsi_of_table,
index aa95b85a29643710e5cdad5cf57c37ce5274cdbc..10febea473cde974f2c4f85930cd3521c1eda6bf 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -782,14 +781,12 @@ err_pm_disable:
        return ret;
 }
 
-static int rzg2l_mipi_dsi_remove(struct platform_device *pdev)
+static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
 {
        struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
 
        mipi_dsi_host_unregister(&dsi->host);
        pm_runtime_disable(&pdev->dev);
-
-       return 0;
 }
 
 static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
@@ -801,7 +798,7 @@ MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table);
 
 static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
        .probe  = rzg2l_mipi_dsi_probe,
-       .remove = rzg2l_mipi_dsi_remove,
+       .remove_new = rzg2l_mipi_dsi_remove,
        .driver = {
                .name = "rzg2l-mipi-dsi",
                .pm = &rzg2l_mipi_pm_ops,
index ad2d3ae7e6211f37057bb5754618d8895214520b..84aa811ca1e9cf74b5a1ac7feca225db2c5d0b3a 100644 (file)
@@ -10,8 +10,9 @@
 
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 #include <linux/clk.h>
@@ -419,14 +420,12 @@ err_dp_remove:
        return ret;
 }
 
-static int rockchip_dp_remove(struct platform_device *pdev)
+static void rockchip_dp_remove(struct platform_device *pdev)
 {
        struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
 
        component_del(&pdev->dev, &rockchip_dp_component_ops);
        analogix_dp_remove(dp->adp);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -481,7 +480,7 @@ MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
 
 struct platform_driver rockchip_dp_driver = {
        .probe = rockchip_dp_probe,
-       .remove = rockchip_dp_remove,
+       .remove_new = rockchip_dp_remove,
        .driver = {
                   .name = "rockchip-dp",
                   .pm = &rockchip_dp_pm_ops,
index b6afe3786b742a14642b7704efcf952f480d2a99..a29fbafce39366d7d4f51c23ec64387b94ce6812 100644 (file)
@@ -1222,15 +1222,13 @@ static int cdn_dp_probe(struct platform_device *pdev)
        return component_add(dev, &cdn_dp_component_ops);
 }
 
-static int cdn_dp_remove(struct platform_device *pdev)
+static void cdn_dp_remove(struct platform_device *pdev)
 {
        struct cdn_dp_device *dp = platform_get_drvdata(pdev);
 
        platform_device_unregister(dp->audio_pdev);
        cdn_dp_suspend(dp->dev);
        component_del(&pdev->dev, &cdn_dp_component_ops);
-
-       return 0;
 }
 
 static void cdn_dp_shutdown(struct platform_device *pdev)
@@ -1247,7 +1245,7 @@ static const struct dev_pm_ops cdn_dp_pm_ops = {
 
 struct platform_driver cdn_dp_driver = {
        .probe = cdn_dp_probe,
-       .remove = cdn_dp_remove,
+       .remove_new = cdn_dp_remove,
        .shutdown = cdn_dp_shutdown,
        .driver = {
                   .name = "cdn-dp",
index 917e79951aac283adba00a5dcf6a2418fda4f171..0100162a73b295bb796f3b1b3106136fce3f810d 100644 (file)
@@ -12,7 +12,9 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/phy/phy.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 
@@ -1463,13 +1465,11 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+static void dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
 {
        struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
 
        dw_mipi_dsi_remove(dsi->dmd);
-
-       return 0;
 }
 
 static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
@@ -1671,7 +1671,7 @@ MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
 
 struct platform_driver dw_mipi_dsi_rockchip_driver = {
        .probe          = dw_mipi_dsi_rockchip_probe,
-       .remove         = dw_mipi_dsi_rockchip_remove,
+       .remove_new     = dw_mipi_dsi_rockchip_remove,
        .driver         = {
                .of_match_table = dw_mipi_dsi_rockchip_dt_ids,
                .pm     = &dw_mipi_dsi_rockchip_pm_ops,
index 112699949db9fbb364bc15177eac8fa1a0c97685..341550199111f9813d53df7a3b1ba8b7fe0c42df 100644 (file)
@@ -684,11 +684,9 @@ static int dw_hdmi_rockchip_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &dw_hdmi_rockchip_ops);
 }
 
-static int dw_hdmi_rockchip_remove(struct platform_device *pdev)
+static void dw_hdmi_rockchip_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &dw_hdmi_rockchip_ops);
-
-       return 0;
 }
 
 static int __maybe_unused dw_hdmi_rockchip_resume(struct device *dev)
@@ -706,7 +704,7 @@ static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
 
 struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
        .probe  = dw_hdmi_rockchip_probe,
-       .remove = dw_hdmi_rockchip_remove,
+       .remove_new = dw_hdmi_rockchip_remove,
        .driver = {
                .name = "dwhdmi-rockchip",
                .pm = &dw_hdmi_rockchip_pm,
index 9afb889963c16ef3505f1a8ceaf342a69df4da43..6e5b922a121e243c733d7e88032f33d095354f15 100644 (file)
 #include <linux/err.h>
 #include <linux/hdmi.h>
 #include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_edid.h>
@@ -919,11 +920,9 @@ static int inno_hdmi_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &inno_hdmi_ops);
 }
 
-static int inno_hdmi_remove(struct platform_device *pdev)
+static void inno_hdmi_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &inno_hdmi_ops);
-
-       return 0;
 }
 
 static const struct of_device_id inno_hdmi_dt_ids[] = {
@@ -935,7 +934,7 @@ MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
 
 struct platform_driver inno_hdmi_driver = {
        .probe  = inno_hdmi_probe,
-       .remove = inno_hdmi_remove,
+       .remove_new = inno_hdmi_remove,
        .driver = {
                .name = "innohdmi-rockchip",
                .of_match_table = inno_hdmi_dt_ids,
index b5d042ee052fd554ab45fb1fbc2701dd7d134416..fa6e592e0276c3458bf815d9d4e27a100a6b65c8 100644 (file)
@@ -858,11 +858,9 @@ static int rk3066_hdmi_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &rk3066_hdmi_ops);
 }
 
-static int rk3066_hdmi_remove(struct platform_device *pdev)
+static void rk3066_hdmi_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &rk3066_hdmi_ops);
-
-       return 0;
 }
 
 static const struct of_device_id rk3066_hdmi_dt_ids[] = {
@@ -873,7 +871,7 @@ MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
 
 struct platform_driver rk3066_hdmi_driver = {
        .probe  = rk3066_hdmi_probe,
-       .remove = rk3066_hdmi_remove,
+       .remove_new = rk3066_hdmi_remove,
        .driver = {
                .name = "rockchip-rk3066-hdmi",
                .of_match_table = rk3066_hdmi_dt_ids,
index d97f2edc646b77bf0f3d6beea10335ce6ea0c62b..ab55d71325500d84461c7bd1c1ceb1db12cd6222 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
@@ -224,10 +225,7 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
 static const struct drm_driver rockchip_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
        .dumb_create            = rockchip_gem_dumb_create,
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table      = rockchip_gem_prime_import_sg_table,
-       .gem_prime_mmap         = drm_gem_prime_mmap,
        .fops                   = &rockchip_drm_driver_fops,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
@@ -450,13 +448,11 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int rockchip_drm_platform_remove(struct platform_device *pdev)
+static void rockchip_drm_platform_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &rockchip_drm_ops);
 
        rockchip_drm_match_remove(&pdev->dev);
-
-       return 0;
 }
 
 static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
@@ -475,7 +471,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
 
 static struct platform_driver rockchip_drm_platform_driver = {
        .probe = rockchip_drm_platform_probe,
-       .remove = rockchip_drm_platform_remove,
+       .remove_new = rockchip_drm_platform_remove,
        .shutdown = rockchip_drm_platform_shutdown,
        .driver = {
                .name = "rockchip-drm",
index a530ecc4d207c66eb621d1065d418a5cd63a9707..86fd9f51c6928d89f67c848e8d6f3a13d3fbead8 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/overflow.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
index ca73b8ccc29f43e2112445f56b1fe27439209a6c..583df4d22f7e9014e96de40af9166dad94bb7356 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
index 68f6ebb33460bc7992eb31831073ea66901bf943..5828593877923f2157ee6444c3fb17a8cd13e044 100644 (file)
@@ -739,19 +739,17 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int rockchip_lvds_remove(struct platform_device *pdev)
+static void rockchip_lvds_remove(struct platform_device *pdev)
 {
        struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
 
        component_del(&pdev->dev, &rockchip_lvds_component_ops);
        clk_unprepare(lvds->pclk);
-
-       return 0;
 }
 
 struct platform_driver rockchip_lvds_driver = {
        .probe = rockchip_lvds_probe,
-       .remove = rockchip_lvds_remove,
+       .remove_new = rockchip_lvds_remove,
        .driver = {
                   .name = "rockchip-lvds",
                   .of_match_table = of_match_ptr(rockchip_lvds_dt_ids),
index 9d30aa73b54222595ae7aad417bbb74cedf25ce5..62b573f282a7ae00bb72ded614e86a0946f0b42b 100644 (file)
@@ -264,16 +264,14 @@ static int vop2_probe(struct platform_device *pdev)
        return component_add(dev, &vop2_component_ops);
 }
 
-static int vop2_remove(struct platform_device *pdev)
+static void vop2_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vop2_component_ops);
-
-       return 0;
 }
 
 struct platform_driver vop2_platform_driver = {
        .probe = vop2_probe,
-       .remove = vop2_remove,
+       .remove_new = vop2_remove,
        .driver = {
                .name = "rockchip-vop2",
                .of_match_table = of_match_ptr(vop2_dt_match),
index 20ac7811c5eb74d793b88e48f46d2a8fb6142cb1..7b28050067769d96baac6c8e6d05a944c61ed676 100644 (file)
@@ -1163,16 +1163,14 @@ static int vop_probe(struct platform_device *pdev)
        return component_add(dev, &vop_component_ops);
 }
 
-static int vop_remove(struct platform_device *pdev)
+static void vop_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vop_component_ops);
-
-       return 0;
 }
 
 struct platform_driver vop_platform_driver = {
        .probe = vop_probe,
-       .remove = vop_remove,
+       .remove_new = vop_remove,
        .driver = {
                .name = "rockchip-vop",
                .of_match_table = vop_driver_dt_match,
index b2bbc8a68b30562d14b53cf8fc2e5bcf3bb3c75d..a42763e1429dc16ea653e62023174348e0002b5a 100644 (file)
@@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 {
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
-       int r;
+       unsigned long index;
 
        dma_fence_put(f);
 
        /* Wait for all dependencies to avoid data corruptions */
-       while (!xa_empty(&job->dependencies)) {
-               f = xa_erase(&job->dependencies, job->last_dependency++);
-               r = dma_fence_add_callback(f, &job->finish_cb,
-                                          drm_sched_entity_kill_jobs_cb);
-               if (!r)
+       xa_for_each(&job->dependencies, index, f) {
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+               if (s_fence && f == &s_fence->scheduled) {
+                       /* The dependencies array had a reference on the scheduled
+                        * fence, and the finished fence refcount might have
+                        * dropped to zero. Use dma_fence_get_rcu() so we get
+                        * a NULL fence in that case.
+                        */
+                       f = dma_fence_get_rcu(&s_fence->finished);
+
+                       /* Now that we have a reference on the finished fence,
+                        * we can release the reference the dependencies array
+                        * had on the scheduled fence.
+                        */
+                       dma_fence_put(&s_fence->scheduled);
+               }
+
+               xa_erase(&job->dependencies, index);
+               if (f && !dma_fence_add_callback(f, &job->finish_cb,
+                                                drm_sched_entity_kill_jobs_cb))
                        return;
 
                dma_fence_put(f);
@@ -415,8 +431,17 @@ static struct dma_fence *
 drm_sched_job_dependency(struct drm_sched_job *job,
                         struct drm_sched_entity *entity)
 {
-       if (!xa_empty(&job->dependencies))
-               return xa_erase(&job->dependencies, job->last_dependency++);
+       struct dma_fence *f;
+
+       /* We keep the fence around, so we can iterate over all dependencies
+        * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
+        * before killing the job.
+        */
+       f = xa_load(&job->dependencies, job->last_dependency);
+       if (f) {
+               job->last_dependency++;
+               return dma_fence_get(f);
+       }
 
        if (job->sched->ops->prepare_job)
                return job->sched->ops->prepare_job(job, entity);
index ef120475e7c618c0a7de2266e2f5bb0d940f179a..06cedfe4b4867a7c2d42622f7a143512e665d812 100644 (file)
@@ -48,8 +48,32 @@ static void __exit drm_sched_fence_slab_fini(void)
        kmem_cache_destroy(sched_fence_slab);
 }
 
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
+static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
+                                      struct dma_fence *fence)
 {
+       /*
+        * smp_store_release() to ensure another thread racing us
+        * in drm_sched_fence_set_deadline_finished() sees the
+        * fence's parent set before test_bit()
+        */
+       smp_store_release(&s_fence->parent, dma_fence_get(fence));
+       if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
+                    &s_fence->finished.flags))
+               dma_fence_set_deadline(fence, s_fence->deadline);
+}
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+                              struct dma_fence *parent)
+{
+       /* Set the parent before signaling the scheduled fence, such that,
+        * any waiter expecting the parent to be filled after the job has
+        * been scheduled (which is the case for drivers delegating waits
+        * to some firmware) doesn't have to busy wait for parent to show
+        * up.
+        */
+       if (!IS_ERR_OR_NULL(parent))
+               drm_sched_fence_set_parent(fence, parent);
+
        dma_fence_signal(&fence->scheduled);
 }
 
@@ -181,20 +205,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
 }
 EXPORT_SYMBOL(to_drm_sched_fence);
 
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
-                               struct dma_fence *fence)
-{
-       /*
-        * smp_store_release() to ensure another thread racing us
-        * in drm_sched_fence_set_deadline_finished() sees the
-        * fence's parent set before test_bit()
-        */
-       smp_store_release(&s_fence->parent, dma_fence_get(fence));
-       if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
-                    &s_fence->finished.flags))
-               dma_fence_set_deadline(fence, s_fence->deadline);
-}
-
 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
                                              void *owner)
 {
index 7b2bfc10c1a54c497020451841a5ffafef512399..506371c427451a07f8b57662f33a5115b09bc062 100644 (file)
@@ -1043,10 +1043,9 @@ static int drm_sched_main(void *param)
                trace_drm_run_job(sched_job, entity);
                fence = sched->ops->run_job(sched_job);
                complete_all(&entity->entity_idle);
-               drm_sched_fence_scheduled(s_fence);
+               drm_sched_fence_scheduled(s_fence, fence);
 
                if (!IS_ERR_OR_NULL(fence)) {
-                       drm_sched_fence_set_parent(s_fence, fence);
                        /* Drop for original kref_init of the fence */
                        dma_fence_put(fence);
 
index 8cbf5aa66e191bb9d62793ef82e9f754d13b0f7c..5a80b228d18cae33e49d233d9b867b23b3db0b12 100644 (file)
@@ -99,33 +99,61 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
                .default_vcomh = 0x40,
                .default_dclk_div = 1,
                .default_dclk_frq = 5,
+               .default_width = 132,
+               .default_height = 64,
                .page_mode_only = 1,
+               .page_height = 8,
        },
        [SSD1305_ID] = {
                .default_vcomh = 0x34,
                .default_dclk_div = 1,
                .default_dclk_frq = 7,
+               .default_width = 132,
+               .default_height = 64,
+               .page_height = 8,
        },
        [SSD1306_ID] = {
                .default_vcomh = 0x20,
                .default_dclk_div = 1,
                .default_dclk_frq = 8,
                .need_chargepump = 1,
+               .default_width = 128,
+               .default_height = 64,
+               .page_height = 8,
        },
        [SSD1307_ID] = {
                .default_vcomh = 0x20,
                .default_dclk_div = 2,
                .default_dclk_frq = 12,
                .need_pwm = 1,
+               .default_width = 128,
+               .default_height = 39,
+               .page_height = 8,
        },
        [SSD1309_ID] = {
                .default_vcomh = 0x34,
                .default_dclk_div = 1,
                .default_dclk_frq = 10,
+               .default_width = 128,
+               .default_height = 64,
+               .page_height = 8,
        }
 };
 EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
 
+struct ssd130x_plane_state {
+       struct drm_shadow_plane_state base;
+       /* Intermediate buffer to convert pixels from XRGB8888 to HW format */
+       u8 *buffer;
+       /* Buffer to store pixels in HW format and written to the panel */
+       u8 *data_array;
+};
+
+static inline struct ssd130x_plane_state *to_ssd130x_plane_state(struct drm_plane_state *state)
+{
+       return container_of(state, struct ssd130x_plane_state, base.base);
+}
+
 static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm)
 {
        return container_of(drm, struct ssd130x_device, drm);
@@ -419,26 +447,25 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
                                 SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
 }
 
-static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
+static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+                              struct ssd130x_plane_state *ssd130x_state,
                               struct drm_rect *rect)
 {
        unsigned int x = rect->x1;
        unsigned int y = rect->y1;
+       u8 *buf = ssd130x_state->buffer;
+       u8 *data_array = ssd130x_state->data_array;
        unsigned int width = drm_rect_width(rect);
        unsigned int height = drm_rect_height(rect);
        unsigned int line_length = DIV_ROUND_UP(width, 8);
-       unsigned int pages = DIV_ROUND_UP(height, 8);
+       unsigned int page_height = ssd130x->device_info->page_height;
+       unsigned int pages = DIV_ROUND_UP(height, page_height);
        struct drm_device *drm = &ssd130x->drm;
        u32 array_idx = 0;
        int ret, i, j, k;
-       u8 *data_array = NULL;
 
        drm_WARN_ONCE(drm, y % 8 != 0, "y must be aligned to screen page\n");
 
-       data_array = kcalloc(width, pages, GFP_KERNEL);
-       if (!data_array)
-               return -ENOMEM;
-
        /*
         * The screen is divided in pages, each having a height of 8
         * pixels, and the width of the screen. When sending a byte of
@@ -472,11 +499,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
                /* Set address range for horizontal addressing mode */
                ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
                if (ret < 0)
-                       goto out_free;
+                       return ret;
 
                ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
                if (ret < 0)
-                       goto out_free;
+                       return ret;
        }
 
        for (i = 0; i < pages; i++) {
@@ -506,11 +533,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
                                                   ssd130x->page_offset + i,
                                                   ssd130x->col_offset + x);
                        if (ret < 0)
-                               goto out_free;
+                               return ret;
 
                        ret = ssd130x_write_data(ssd130x, data_array, width);
                        if (ret < 0)
-                               goto out_free;
+                               return ret;
 
                        array_idx = 0;
                }
@@ -520,14 +547,12 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
        if (!ssd130x->page_address_mode)
                ret = ssd130x_write_data(ssd130x, data_array, width * pages);
 
-out_free:
-       kfree(data_array);
        return ret;
 }
 
-static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
+static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+                                struct ssd130x_plane_state *ssd130x_state)
 {
-       u8 *buf = NULL;
        struct drm_rect fullscreen = {
                .x1 = 0,
                .x2 = ssd130x->width,
@@ -535,51 +560,80 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
                .y2 = ssd130x->height,
        };
 
-       buf = kcalloc(DIV_ROUND_UP(ssd130x->width, 8), ssd130x->height,
-                     GFP_KERNEL);
-       if (!buf)
-               return;
-
-       ssd130x_update_rect(ssd130x, buf, &fullscreen);
-
-       kfree(buf);
+       ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
 }
 
-static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap,
+static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+                               const struct iosys_map *vmap,
                                struct drm_rect *rect)
 {
+       struct drm_framebuffer *fb = state->fb;
        struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+       unsigned int page_height = ssd130x->device_info->page_height;
+       struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+       u8 *buf = ssd130x_state->buffer;
        struct iosys_map dst;
        unsigned int dst_pitch;
        int ret = 0;
-       u8 *buf = NULL;
 
        /* Align y to display page boundaries */
-       rect->y1 = round_down(rect->y1, 8);
-       rect->y2 = min_t(unsigned int, round_up(rect->y2, 8), ssd130x->height);
+       rect->y1 = round_down(rect->y1, page_height);
+       rect->y2 = min_t(unsigned int, round_up(rect->y2, page_height), ssd130x->height);
 
        dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
-       buf = kcalloc(dst_pitch, drm_rect_height(rect), GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
 
        ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
        if (ret)
-               goto out_free;
+               return ret;
 
        iosys_map_set_vaddr(&dst, buf);
        drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
 
        drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
 
-       ssd130x_update_rect(ssd130x, buf, rect);
-
-out_free:
-       kfree(buf);
+       ssd130x_update_rect(ssd130x, ssd130x_state, rect);
 
        return ret;
 }
 
+static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
+                                                    struct drm_atomic_state *state)
+{
+       struct drm_device *drm = plane->dev;
+       struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+       struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+       unsigned int page_height = ssd130x->device_info->page_height;
+       unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
+       const struct drm_format_info *fi;
+       unsigned int pitch;
+       int ret;
+
+       ret = drm_plane_helper_atomic_check(plane, state);
+       if (ret)
+               return ret;
+
+       fi = drm_format_info(DRM_FORMAT_R1);
+       if (!fi)
+               return -EINVAL;
+
+       pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+       ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
+       if (!ssd130x_state->buffer)
+               return -ENOMEM;
+
+       ssd130x_state->data_array = kcalloc(ssd130x->width, pages, GFP_KERNEL);
+       if (!ssd130x_state->data_array) {
+               kfree(ssd130x_state->buffer);
+               /* Set to prevent a double free in .atomic_destroy_state() */
+               ssd130x_state->buffer = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
                                                       struct drm_atomic_state *state)
 {
@@ -602,7 +656,7 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
                if (!drm_rect_intersect(&dst_clip, &damage))
                        continue;
 
-               ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+               ssd130x_fb_blit_rect(plane_state, &shadow_plane_state->data[0], &dst_clip);
        }
 
        drm_dev_exit(idx);
@@ -613,19 +667,72 @@ static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
 {
        struct drm_device *drm = plane->dev;
        struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+       struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane->state);
        int idx;
 
        if (!drm_dev_enter(drm, &idx))
                return;
 
-       ssd130x_clear_screen(ssd130x);
+       ssd130x_clear_screen(ssd130x, ssd130x_state);
 
        drm_dev_exit(idx);
 }
 
+/* Called during init to allocate the plane's atomic state. */
+static void ssd130x_primary_plane_reset(struct drm_plane *plane)
+{
+       struct ssd130x_plane_state *ssd130x_state;
+
+       WARN_ON(plane->state);
+
+       ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
+       if (!ssd130x_state)
+               return;
+
+       __drm_gem_reset_shadow_plane(plane, &ssd130x_state->base);
+}
+
+static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_plane *plane)
+{
+       struct drm_shadow_plane_state *new_shadow_plane_state;
+       struct ssd130x_plane_state *old_ssd130x_state;
+       struct ssd130x_plane_state *ssd130x_state;
+
+       if (WARN_ON(!plane->state))
+               return NULL;
+
+       old_ssd130x_state = to_ssd130x_plane_state(plane->state);
+       ssd130x_state = kmemdup(old_ssd130x_state, sizeof(*ssd130x_state), GFP_KERNEL);
+       if (!ssd130x_state)
+               return NULL;
+
+       /* The buffers are not duplicated and are allocated in .atomic_check */
+       ssd130x_state->buffer = NULL;
+       ssd130x_state->data_array = NULL;
+
+       new_shadow_plane_state = &ssd130x_state->base;
+
+       __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+
+       return &new_shadow_plane_state->base;
+}
+
+static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
+                                               struct drm_plane_state *state)
+{
+       struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+
+       kfree(ssd130x_state->data_array);
+       kfree(ssd130x_state->buffer);
+
+       __drm_gem_destroy_shadow_plane_state(&ssd130x_state->base);
+
+       kfree(ssd130x_state);
+}
+
 static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
        DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
-       .atomic_check = drm_plane_helper_atomic_check,
+       .atomic_check = ssd130x_primary_plane_helper_atomic_check,
        .atomic_update = ssd130x_primary_plane_helper_atomic_update,
        .atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
 };
@@ -633,8 +740,10 @@ static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs =
 static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
+       .reset = ssd130x_primary_plane_reset,
+       .atomic_duplicate_state = ssd130x_primary_plane_duplicate_state,
+       .atomic_destroy_state = ssd130x_primary_plane_destroy_state,
        .destroy = drm_plane_cleanup,
-       DRM_GEM_SHADOW_PLANE_FUNCS,
 };
 
 static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
@@ -684,14 +793,18 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
                return;
 
        ret = ssd130x_init(ssd130x);
-       if (ret) {
-               ssd130x_power_off(ssd130x);
-               return;
-       }
+       if (ret)
+               goto power_off;
 
        ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
 
        backlight_enable(ssd130x->bl_dev);
+
+       return;
+
+power_off:
+       ssd130x_power_off(ssd130x);
+       return;
 }
 
 static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
@@ -798,10 +911,10 @@ static void ssd130x_parse_properties(struct ssd130x_device *ssd130x)
        struct device *dev = ssd130x->dev;
 
        if (device_property_read_u32(dev, "solomon,width", &ssd130x->width))
-               ssd130x->width = 96;
+               ssd130x->width = ssd130x->device_info->default_width;
 
        if (device_property_read_u32(dev, "solomon,height", &ssd130x->height))
-               ssd130x->height = 16;
+               ssd130x->height = ssd130x->device_info->default_height;
 
        if (device_property_read_u32(dev, "solomon,page-offset", &ssd130x->page_offset))
                ssd130x->page_offset = 1;
index db03ee5db3921ba08a7b26186c1a3dd6ac9c69b6..87968b3e7fb8279dc7afa03c3864d4e0f0c2d51a 100644 (file)
@@ -37,6 +37,9 @@ struct ssd130x_deviceinfo {
        u32 default_vcomh;
        u32 default_dclk_div;
        u32 default_dclk_frq;
+       u32 default_width;
+       u32 default_height;
+       u32 page_height;
        int need_pwm;
        int need_chargepump;
        bool page_mode_only;
index b96fc6837b0d20ce1d2f955d23b2787cd256af7e..48183bbd0590dd53f711aa690c6b6dd8195f7b75 100644 (file)
@@ -9,10 +9,8 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 
index be60c0d546a34f805eabe02bb846c29d66739898..0aa39156f2fabc2c387003a5b18708366090a3db 100644 (file)
@@ -5,10 +5,11 @@
 
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
index ab0e5cce7adb634583fe33cf46f7854bccad6508..d7b143a7560157beb391b619b23e0ac7b7bbd46c 100644 (file)
@@ -5,10 +5,8 @@
 
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_graph.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <video/mipi_display.h>
 
 #include <drm/drm_atomic_helper.h>
index 142a8e1b44368e70b9c14f95474f6808606031d3..33487a1fed8f57f311398e5984c43b6629aa2fc3 100644 (file)
@@ -258,10 +258,9 @@ static int sti_compositor_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sti_compositor_ops);
 }
 
-static int sti_compositor_remove(struct platform_device *pdev)
+static void sti_compositor_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sti_compositor_ops);
-       return 0;
 }
 
 struct platform_driver sti_compositor_driver = {
@@ -270,7 +269,7 @@ struct platform_driver sti_compositor_driver = {
                .of_match_table = compositor_of_match,
        },
        .probe = sti_compositor_probe,
-       .remove = sti_compositor_remove,
+       .remove_new = sti_compositor_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index 1b87b5899f9eed798c0192cef2e8a6544fe34923..2390c1bb6596f0f0a00e436b42a66ff60aee3fca 100644 (file)
@@ -8,7 +8,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -246,11 +248,9 @@ static int sti_platform_probe(struct platform_device *pdev)
        return component_master_add_with_match(dev, &sti_ops, match);
 }
 
-static int sti_platform_remove(struct platform_device *pdev)
+static void sti_platform_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &sti_ops);
-
-       return 0;
 }
 
 static const struct of_device_id sti_dt_ids[] = {
@@ -261,7 +261,7 @@ MODULE_DEVICE_TABLE(of, sti_dt_ids);
 
 static struct platform_driver sti_platform_driver = {
        .probe = sti_platform_probe,
-       .remove = sti_platform_remove,
+       .remove_new = sti_platform_remove,
        .driver = {
                .name = DRIVER_NAME,
                .of_match_table = sti_dt_ids,
index 0c6679e361c82732bca7f96cbbaababd9e017143..fd1df4ce385264ec2b4c5dc3f9bb009b12f0045b 100644 (file)
@@ -567,10 +567,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sti_dvo_ops);
 }
 
-static int sti_dvo_remove(struct platform_device *pdev)
+static void sti_dvo_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sti_dvo_ops);
-       return 0;
 }
 
 static const struct of_device_id dvo_of_match[] = {
@@ -586,7 +585,7 @@ struct platform_driver sti_dvo_driver = {
                .of_match_table = dvo_of_match,
        },
        .probe = sti_dvo_probe,
-       .remove = sti_dvo_remove,
+       .remove_new = sti_dvo_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index 15097ac6793146bf4be75dc21da4d8303ada30b9..6ee35612a14e58f4232df3c47d7037abed55f41c 100644 (file)
@@ -792,10 +792,9 @@ static int sti_hda_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sti_hda_ops);
 }
 
-static int sti_hda_remove(struct platform_device *pdev)
+static void sti_hda_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sti_hda_ops);
-       return 0;
 }
 
 static const struct of_device_id hda_of_match[] = {
@@ -812,7 +811,7 @@ struct platform_driver sti_hda_driver = {
                .of_match_table = hda_of_match,
        },
        .probe = sti_hda_probe,
-       .remove = sti_hda_remove,
+       .remove_new = sti_hda_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index dc1562f14ceb04f4be586dfd0114baf965baa86e..500936d5743c5278f218336a65212fb5b8384f7f 100644 (file)
@@ -1472,7 +1472,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int sti_hdmi_remove(struct platform_device *pdev)
+static void sti_hdmi_remove(struct platform_device *pdev)
 {
        struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
 
@@ -1480,8 +1480,6 @@ static int sti_hdmi_remove(struct platform_device *pdev)
        if (hdmi->audio_pdev)
                platform_device_unregister(hdmi->audio_pdev);
        component_del(&pdev->dev, &sti_hdmi_ops);
-
-       return 0;
 }
 
 struct platform_driver sti_hdmi_driver = {
@@ -1491,7 +1489,7 @@ struct platform_driver sti_hdmi_driver = {
                .of_match_table = hdmi_of_match,
        },
        .probe = sti_hdmi_probe,
-       .remove = sti_hdmi_remove,
+       .remove_new = sti_hdmi_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index 02b77279f6e4c44d718f0356d26ca8040c2e2511..0fb48ac044d8a003e48f93164593387ecb9d77a6 100644 (file)
@@ -1400,10 +1400,9 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sti_hqvdp_ops);
 }
 
-static int sti_hqvdp_remove(struct platform_device *pdev)
+static void sti_hqvdp_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sti_hqvdp_ops);
-       return 0;
 }
 
 static const struct of_device_id hqvdp_of_match[] = {
@@ -1419,7 +1418,7 @@ struct platform_driver sti_hqvdp_driver = {
                .of_match_table = hqvdp_of_match,
        },
        .probe = sti_hqvdp_probe,
-       .remove = sti_hqvdp_remove,
+       .remove_new = sti_hqvdp_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index 2499715a69b79e45fdc75dc47e438eb18be16329..64615638b79a28a79e1e03effa162cbaf8c7a5a3 100644 (file)
@@ -872,10 +872,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
        return component_add(dev, &sti_tvout_ops);
 }
 
-static int sti_tvout_remove(struct platform_device *pdev)
+static void sti_tvout_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sti_tvout_ops);
-       return 0;
 }
 
 static const struct of_device_id tvout_of_match[] = {
@@ -891,7 +890,7 @@ struct platform_driver sti_tvout_driver = {
                .of_match_table = tvout_of_match,
        },
        .probe = sti_tvout_probe,
-       .remove = sti_tvout_remove,
+       .remove_new = sti_tvout_remove,
 };
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
index cb4404b3ce62ceacd407a3d31c2fc22e23a67882..c68c831136c9b02b48adf56654a538e2e81fcf43 100644 (file)
@@ -10,8 +10,9 @@
 
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_aperture.h>
@@ -213,7 +214,7 @@ err_put:
        return ret;
 }
 
-static int stm_drm_platform_remove(struct platform_device *pdev)
+static void stm_drm_platform_remove(struct platform_device *pdev)
 {
        struct drm_device *ddev = platform_get_drvdata(pdev);
 
@@ -222,8 +223,6 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
        drm_dev_unregister(ddev);
        drv_unload(ddev);
        drm_dev_put(ddev);
-
-       return 0;
 }
 
 static const struct of_device_id drv_dt_ids[] = {
@@ -234,7 +233,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
 
 static struct platform_driver stm_drm_platform_driver = {
        .probe = stm_drm_platform_probe,
-       .remove = stm_drm_platform_remove,
+       .remove_new = stm_drm_platform_remove,
        .driver = {
                .name = "stm32-display",
                .of_match_table = drv_dt_ids,
index 1750b6a25e8713823243ac32e919bb7f4c3b1754..d5f8c923d7bc745004d0d9ab150c963c8fbe0faa 100644 (file)
@@ -535,15 +535,13 @@ err_clk_get:
        return ret;
 }
 
-static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
+static void dw_mipi_dsi_stm_remove(struct platform_device *pdev)
 {
        struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
 
        dw_mipi_dsi_remove(dsi->dsi);
        clk_disable_unprepare(dsi->pllref_clk);
        regulator_disable(dsi->vdd_supply);
-
-       return 0;
 }
 
 static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
@@ -588,7 +586,7 @@ static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
 
 static struct platform_driver dw_mipi_dsi_stm_driver = {
        .probe          = dw_mipi_dsi_stm_probe,
-       .remove         = dw_mipi_dsi_stm_remove,
+       .remove_new     = dw_mipi_dsi_stm_remove,
        .driver         = {
                .of_match_table = dw_mipi_dsi_stm_dt_ids,
                .name   = "stm32-display-dsi",
index b8be4c1db4235233fe4e7d23a5649410a667fbee..5576fdae4962335e46dedaaeb320c302fb770c3b 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/interrupt.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
 #include <linux/of_graph.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
index b11dbd50d73e98fbcc3434dfbec08fea9d7531c2..335fd0edb904cfc536a1871ade229109c15ebbf9 100644 (file)
@@ -965,11 +965,9 @@ static int sun4i_backend_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun4i_backend_ops);
 }
 
-static int sun4i_backend_remove(struct platform_device *pdev)
+static void sun4i_backend_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun4i_backend_ops);
-
-       return 0;
 }
 
 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
@@ -1028,7 +1026,7 @@ MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
 
 static struct platform_driver sun4i_backend_platform_driver = {
        .probe          = sun4i_backend_probe,
-       .remove         = sun4i_backend_remove,
+       .remove_new     = sun4i_backend_remove,
        .driver         = {
                .name           = "sun4i-backend",
                .of_match_table = sun4i_backend_of_table,
index daa7faf72a4b785876d11928f1e54b44d2f7580d..6a8dfc022d3c1cc21e1ac48fba28a96eacb70a87 100644 (file)
@@ -408,11 +408,9 @@ static int sun4i_drv_probe(struct platform_device *pdev)
                return 0;
 }
 
-static int sun4i_drv_remove(struct platform_device *pdev)
+static void sun4i_drv_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &sun4i_drv_master_ops);
-
-       return 0;
 }
 
 static const struct of_device_id sun4i_drv_of_table[] = {
@@ -438,7 +436,7 @@ MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
 
 static struct platform_driver sun4i_drv_platform_driver = {
        .probe          = sun4i_drv_probe,
-       .remove         = sun4i_drv_remove,
+       .remove_new     = sun4i_drv_remove,
        .driver         = {
                .name           = "sun4i-drm",
                .of_match_table = sun4i_drv_of_table,
index 799ab7460ae5427901bc7e018f2d61c95aac8be4..280d444dbb6686c06dd3fa65cfdce6dbd85d5101 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -634,11 +634,9 @@ static int sun4i_frontend_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun4i_frontend_ops);
 }
 
-static int sun4i_frontend_remove(struct platform_device *pdev)
+static void sun4i_frontend_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun4i_frontend_ops);
-
-       return 0;
 }
 
 static int sun4i_frontend_runtime_resume(struct device *dev)
@@ -719,7 +717,7 @@ MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table);
 
 static struct platform_driver sun4i_frontend_driver = {
        .probe          = sun4i_frontend_probe,
-       .remove         = sun4i_frontend_remove,
+       .remove_new     = sun4i_frontend_remove,
        .driver         = {
                .name           = "sun4i-frontend",
                .of_match_table = sun4i_frontend_of_table,
index 2e7b76e50c2ba21942fa16b4c5d42f33c6f15326..61c24088772c66581f7d0a3407078b4b3be95d82 100644 (file)
@@ -8,6 +8,7 @@
 #define _SUN4I_FRONTEND_H_
 
 #include <linux/list.h>
+#include <linux/mod_devicetable.h>
 
 #define SUN4I_FRONTEND_EN_REG                  0x000
 #define SUN4I_FRONTEND_EN_EN                           BIT(0)
index c0df5e892fa7e0b251d892fc8a0b974448b3ae96..152375f3de2e2929c105d56255d8c5fe474fb7d0 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/i2c.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -693,11 +693,9 @@ static int sun4i_hdmi_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun4i_hdmi_ops);
 }
 
-static int sun4i_hdmi_remove(struct platform_device *pdev)
+static void sun4i_hdmi_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun4i_hdmi_ops);
-
-       return 0;
 }
 
 static const struct of_device_id sun4i_hdmi_of_table[] = {
@@ -710,7 +708,7 @@ MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
 
 static struct platform_driver sun4i_hdmi_driver = {
        .probe          = sun4i_hdmi_probe,
-       .remove         = sun4i_hdmi_remove,
+       .remove_new     = sun4i_hdmi_remove,
        .driver         = {
                .name           = "sun4i-hdmi",
                .of_match_table = sun4i_hdmi_of_table,
index 6a52fb12cbfbc903b1d71d472a40c8e1f2a412c0..a1a2c845ade0ca465e48116d6954d82d8fc3e256 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/ioport.h>
 #include <linux/media-bus-format.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
@@ -1331,11 +1331,9 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun4i_tcon_ops);
 }
 
-static int sun4i_tcon_remove(struct platform_device *pdev)
+static void sun4i_tcon_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun4i_tcon_ops);
-
-       return 0;
 }
 
 /* platform specific TCON muxing callbacks */
@@ -1570,7 +1568,7 @@ EXPORT_SYMBOL(sun4i_tcon_of_table);
 
 static struct platform_driver sun4i_tcon_platform_driver = {
        .probe          = sun4i_tcon_probe,
-       .remove         = sun4i_tcon_remove,
+       .remove_new     = sun4i_tcon_remove,
        .driver         = {
                .name           = "sun4i-tcon",
                .of_match_table = sun4i_tcon_of_table,
index 9625a00a48ba145da2ecb43070833d9b45437b6c..ec65d9d59de7dc1a44e039743f95362c423070f3 100644 (file)
@@ -546,11 +546,9 @@ static int sun4i_tv_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun4i_tv_ops);
 }
 
-static int sun4i_tv_remove(struct platform_device *pdev)
+static void sun4i_tv_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun4i_tv_ops);
-
-       return 0;
 }
 
 static const struct of_device_id sun4i_tv_of_table[] = {
@@ -561,7 +559,7 @@ MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
 
 static struct platform_driver sun4i_tv_platform_driver = {
        .probe          = sun4i_tv_probe,
-       .remove         = sun4i_tv_remove,
+       .remove_new     = sun4i_tv_remove,
        .driver         = {
                .name           = "sun4i-tve",
                .of_match_table = sun4i_tv_of_table,
index 4fbe9a6b5182c48c9c0f0870706ca801df5d34c1..0d342f43fa93e460eb4fac628eb1e50c9140eac7 100644 (file)
@@ -95,11 +95,9 @@ static int sun6i_drc_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun6i_drc_ops);
 }
 
-static int sun6i_drc_remove(struct platform_device *pdev)
+static void sun6i_drc_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun6i_drc_ops);
-
-       return 0;
 }
 
 static const struct of_device_id sun6i_drc_of_table[] = {
@@ -114,7 +112,7 @@ MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
 
 static struct platform_driver sun6i_drc_platform_driver = {
        .probe          = sun6i_drc_probe,
-       .remove         = sun6i_drc_remove,
+       .remove_new     = sun6i_drc_remove,
        .driver         = {
                .name           = "sun6i-drc",
                .of_match_table = sun6i_drc_of_table,
index 760ff05eabf4076ec522dc17ab7a2632f4d141c8..4abf4f1020074e0d259a6a31d2f3d2c40a101130 100644 (file)
@@ -1200,7 +1200,7 @@ err_attach_clk:
        return ret;
 }
 
-static int sun6i_dsi_remove(struct platform_device *pdev)
+static void sun6i_dsi_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1211,8 +1211,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
                clk_rate_exclusive_put(dsi->mod_clk);
 
        regmap_mmio_detach_clk(dsi->regs);
-
-       return 0;
 }
 
 static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = {
@@ -1246,7 +1244,7 @@ MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
 
 static struct platform_driver sun6i_dsi_platform_driver = {
        .probe          = sun6i_dsi_probe,
-       .remove         = sun6i_dsi_remove,
+       .remove_new     = sun6i_dsi_remove,
        .driver         = {
                .name           = "sun6i-mipi-dsi",
                .of_match_table = sun6i_dsi_of_table,
index 7cab4213a6808ba09c0d80711507f1dc0c3d21d5..4727dfaa8fb98c096c35d277824ab87921754630 100644 (file)
@@ -5,7 +5,7 @@
 
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 
 #include <drm/drm_modeset_helper_vtables.h>
@@ -235,11 +235,9 @@ static int sun8i_dw_hdmi_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun8i_dw_hdmi_ops);
 }
 
-static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
+static void sun8i_dw_hdmi_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun8i_dw_hdmi_ops);
-
-       return 0;
 }
 
 static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
@@ -266,7 +264,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
 
 static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
        .probe  = sun8i_dw_hdmi_probe,
-       .remove = sun8i_dw_hdmi_remove,
+       .remove_new = sun8i_dw_hdmi_remove,
        .driver = {
                .name = "sun8i-dw-hdmi",
                .of_match_table = sun8i_dw_hdmi_dt_ids,
index ca53b5e9fffca77d058ad07f1f01516f1da6ce0c..4fa69c463dc4636d2cf8c75494da5a5c4ee98065 100644 (file)
@@ -4,8 +4,9 @@
  */
 
 #include <linux/delay.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/platform_device.h>
 
 #include "sun8i_dw_hdmi.h"
 
index 11d5244a5aa5fc656f8e2ad8fda6419f7d1559c1..01382860aaeea1b0233befdb52acc9ae378ffe46 100644 (file)
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
 #include <linux/reset.h>
 
 #include <drm/drm_atomic_helper.h>
@@ -555,11 +557,9 @@ static int sun8i_mixer_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun8i_mixer_ops);
 }
 
-static int sun8i_mixer_remove(struct platform_device *pdev)
+static void sun8i_mixer_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun8i_mixer_ops);
-
-       return 0;
 }
 
 static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
@@ -711,7 +711,7 @@ MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
 
 static struct platform_driver sun8i_mixer_platform_driver = {
        .probe          = sun8i_mixer_probe,
-       .remove         = sun8i_mixer_remove,
+       .remove_new     = sun8i_mixer_remove,
        .driver         = {
                .name           = "sun8i-mixer",
                .of_match_table = sun8i_mixer_of_table,
index da97682b68351431a6f5df53403d10a87228114b..6f076cf4b40370641a00e936c21e8af4a954197e 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
 
@@ -261,11 +261,9 @@ static int sun8i_tcon_top_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &sun8i_tcon_top_ops);
 }
 
-static int sun8i_tcon_top_remove(struct platform_device *pdev)
+static void sun8i_tcon_top_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &sun8i_tcon_top_ops);
-
-       return 0;
 }
 
 static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
@@ -302,7 +300,7 @@ EXPORT_SYMBOL(sun8i_tcon_top_of_table);
 
 static struct platform_driver sun8i_tcon_top_platform_driver = {
        .probe          = sun8i_tcon_top_probe,
-       .remove         = sun8i_tcon_top_remove,
+       .remove_new     = sun8i_tcon_top_remove,
        .driver         = {
                .name           = "sun8i-tcon-top",
                .of_match_table = sun8i_tcon_top_of_table,
index 4983137781751a424a458019290ab3037bbc0eed..84e7e6bc3a0c18e53b90dac29a4d69c568c65875 100644 (file)
@@ -12,7 +12,7 @@ config DRM_TEGRA
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
-       select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+       select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
        select TEGRA_HOST1X
        select INTERCONNECT
        select IOMMU_IOVA
index 6e78416e64b0cdc25b4c25e2a227116d1d33deaf..13b182ab905fb0b46287465a67eb6bd9d1093d1c 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/iommu.h>
 #include <linux/interconnect.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
 #include <linux/pm_runtime.h>
index 4d2677dcd83158a454c30d984d184346b6c2ed18..ef02d530f78d751a51b6c1a7681ee8e6246287e6 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
@@ -447,7 +447,6 @@ static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
 static int tegra_dpaux_probe(struct platform_device *pdev)
 {
        struct tegra_dpaux *dpaux;
-       struct resource *regs;
        u32 value;
        int err;
 
@@ -461,14 +460,13 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&dpaux->list);
        dpaux->dev = &pdev->dev;
 
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
+       dpaux->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dpaux->regs))
                return PTR_ERR(dpaux->regs);
 
        dpaux->irq = platform_get_irq(pdev, 0);
        if (dpaux->irq < 0)
-               return -ENXIO;
+               return dpaux->irq;
 
        if (!pdev->dev.pm_domain) {
                dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
index 35ff303c6674f19968fe46e96813b23086f1a85a..ff36171c8fb700bae9967961220ea7cbb262d193 100644 (file)
@@ -887,8 +887,6 @@ static const struct drm_driver tegra_drm_driver = {
        .debugfs_init = tegra_debugfs_init,
 #endif
 
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = tegra_gem_prime_import,
 
        .dumb_create = tegra_bo_dumb_create,
index e74d9be981c7949344690c235fc876be10baa00a..db6eaac3d30e63cf39c0cb072083b7e09ba85a1c 100644 (file)
@@ -59,9 +59,9 @@ static void tegra_fbdev_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops tegra_fb_ops = {
        .owner = THIS_MODULE,
-       __FB_DEFAULT_SYS_OPS_RDWR,
+       __FB_DEFAULT_DMAMEM_OPS_RDWR,
        DRM_FB_HELPER_DEFAULT_OPS,
-       __FB_DEFAULT_SYS_OPS_DRAW,
+       __FB_DEFAULT_DMAMEM_OPS_DRAW,
        .fb_mmap = tegra_fb_mmap,
        .fb_destroy = tegra_fbdev_fb_destroy,
 };
@@ -132,7 +132,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
                }
        }
 
-       info->screen_base = (void __iomem *)bo->vaddr + offset;
+       info->flags |= FBINFO_VIRTFB;
+       info->screen_buffer = bo->vaddr + offset;
        info->screen_size = size;
        info->fix.smem_start = (unsigned long)(bo->iova + offset);
        info->fix.smem_len = size;
@@ -225,10 +226,6 @@ void tegra_fbdev_setup(struct drm_device *dev)
        if (ret)
                goto err_drm_client_init;
 
-       ret = tegra_fbdev_client_hotplug(&helper->client);
-       if (ret)
-               drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
        drm_client_register(&helper->client);
 
        return;
index dea38892d6e66e1d7c14bdae00a354a68e402aa7..a4023163493dca2a2f8c42a58184cbcbc656c9ae 100644 (file)
@@ -694,8 +694,6 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
        struct drm_gem_object *gem = buf->priv;
        int err;
 
-       dma_resv_assert_held(buf->resv);
-
        err = drm_gem_mmap_obj(gem, gem->size, vma);
        if (err < 0)
                return err;
index 50f77fddda54613d4cbd69e109351ae1b3e05e59..a160d01f26e1db485704cfac31d216b3df55445e 100644 (file)
@@ -7,7 +7,8 @@
 #include <linux/delay.h>
 #include <linux/iommu.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
@@ -70,22 +71,15 @@ static int gr2d_init(struct host1x_client *client)
                goto free;
        }
 
-       pm_runtime_enable(client->dev);
-       pm_runtime_use_autosuspend(client->dev);
-       pm_runtime_set_autosuspend_delay(client->dev, 200);
-
        err = tegra_drm_register_client(dev->dev_private, drm);
        if (err < 0) {
                dev_err(client->dev, "failed to register client: %d\n", err);
-               goto disable_rpm;
+               goto detach_iommu;
        }
 
        return 0;
 
-disable_rpm:
-       pm_runtime_dont_use_autosuspend(client->dev);
-       pm_runtime_force_suspend(client->dev);
-
+detach_iommu:
        host1x_client_iommu_detach(client);
 free:
        host1x_syncpt_put(client->syncpts[0]);
@@ -299,6 +293,7 @@ static void gr2d_remove(struct platform_device *pdev)
 {
        struct gr2d *gr2d = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
        host1x_client_unregister(&gr2d->client.base);
 }
 
@@ -372,6 +367,10 @@ static int __maybe_unused gr2d_runtime_resume(struct device *dev)
                goto disable_clk;
        }
 
+       pm_runtime_enable(dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_autosuspend_delay(dev, 500);
+
        return 0;
 
 disable_clk:
index c026c2c916c13a7e8423db0e4b9dd8e944dc0df3..00c8564520e70818c2814b9c30261f928a4755b6 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/host1x.h>
 #include <linux/iommu.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
@@ -80,22 +80,15 @@ static int gr3d_init(struct host1x_client *client)
                goto free;
        }
 
-       pm_runtime_enable(client->dev);
-       pm_runtime_use_autosuspend(client->dev);
-       pm_runtime_set_autosuspend_delay(client->dev, 200);
-
        err = tegra_drm_register_client(dev->dev_private, drm);
        if (err < 0) {
                dev_err(client->dev, "failed to register client: %d\n", err);
-               goto disable_rpm;
+               goto detach_iommu;
        }
 
        return 0;
 
-disable_rpm:
-       pm_runtime_dont_use_autosuspend(client->dev);
-       pm_runtime_force_suspend(client->dev);
-
+detach_iommu:
        host1x_client_iommu_detach(client);
 free:
        host1x_syncpt_put(client->syncpts[0]);
@@ -554,6 +547,7 @@ static void gr3d_remove(struct platform_device *pdev)
 {
        struct gr3d *gr3d = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
        host1x_client_unregister(&gr3d->client.base);
 }
 
@@ -607,6 +601,10 @@ static int __maybe_unused gr3d_runtime_resume(struct device *dev)
                goto disable_clk;
        }
 
+       pm_runtime_enable(dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_autosuspend_delay(dev, 500);
+
        return 0;
 
 disable_clk:
index 6eac54ae12053ac80b09f1863e2211d29e24a8f5..80c760986d9e984a3b655ae5864cc26aa6db718d 100644 (file)
@@ -10,7 +10,8 @@
 #include <linux/hdmi.h>
 #include <linux/math64.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/pm_opp.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
@@ -19,6 +20,7 @@
 #include <soc/tegra/common.h>
 #include <sound/hdmi-codec.h>
 
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_debugfs.h>
@@ -1544,26 +1546,47 @@ static int tegra_hdmi_init(struct host1x_client *client)
 {
        struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
        struct drm_device *drm = dev_get_drvdata(client->host);
+       struct drm_connector *connector;
        int err;
 
        hdmi->output.dev = client->dev;
 
-       drm_connector_init_with_ddc(drm, &hdmi->output.connector,
-                                   &tegra_hdmi_connector_funcs,
-                                   DRM_MODE_CONNECTOR_HDMIA,
-                                   hdmi->output.ddc);
-       drm_connector_helper_add(&hdmi->output.connector,
-                                &tegra_hdmi_connector_helper_funcs);
-       hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
-
        drm_simple_encoder_init(drm, &hdmi->output.encoder,
                                DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&hdmi->output.encoder,
                               &tegra_hdmi_encoder_helper_funcs);
 
-       drm_connector_attach_encoder(&hdmi->output.connector,
-                                         &hdmi->output.encoder);
-       drm_connector_register(&hdmi->output.connector);
+       if (hdmi->output.bridge) {
+               err = drm_bridge_attach(&hdmi->output.encoder, hdmi->output.bridge,
+                                       NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+               if (err) {
+                       dev_err(client->dev, "failed to attach bridge: %d\n",
+                               err);
+                       return err;
+               }
+
+               connector = drm_bridge_connector_init(drm, &hdmi->output.encoder);
+               if (IS_ERR(connector)) {
+                       dev_err(client->dev,
+                               "failed to initialize bridge connector: %pe\n",
+                               connector);
+                       return PTR_ERR(connector);
+               }
+
+               drm_connector_attach_encoder(connector, &hdmi->output.encoder);
+       } else {
+               drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+                                           &tegra_hdmi_connector_funcs,
+                                           DRM_MODE_CONNECTOR_HDMIA,
+                                           hdmi->output.ddc);
+               drm_connector_helper_add(&hdmi->output.connector,
+                                        &tegra_hdmi_connector_helper_funcs);
+               hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+               drm_connector_attach_encoder(&hdmi->output.connector,
+                                            &hdmi->output.encoder);
+               drm_connector_register(&hdmi->output.connector);
+       }
 
        err = tegra_output_init(drm, &hdmi->output);
        if (err < 0) {
@@ -1769,7 +1792,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
        struct tegra_hdmi *hdmi;
-       struct resource *regs;
        int err;
 
        hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -1831,8 +1853,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+       hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hdmi->regs))
                return PTR_ERR(hdmi->regs);
 
index 916857361a91c771baf8aff3d1ed987a71a76487..1af5f8318d914679ee1b32a87d004af82cbd1c6a 100644 (file)
@@ -9,8 +9,8 @@
 #include <linux/host1x.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
index ae78a81e5eef2c140bcd05d1dfc878dd31e9a454..4860790666af51de98a34942c61856bf3c766c8b 100644 (file)
@@ -11,8 +11,6 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
@@ -177,13 +175,9 @@ static int nvdec_init(struct host1x_client *client)
                goto free_channel;
        }
 
-       pm_runtime_enable(client->dev);
-       pm_runtime_use_autosuspend(client->dev);
-       pm_runtime_set_autosuspend_delay(client->dev, 500);
-
        err = tegra_drm_register_client(tegra, drm);
        if (err < 0)
-               goto disable_rpm;
+               goto free_syncpt;
 
        /*
         * Inherit the DMA parameters (such as maximum segment size) from the
@@ -193,10 +187,7 @@ static int nvdec_init(struct host1x_client *client)
 
        return 0;
 
-disable_rpm:
-       pm_runtime_dont_use_autosuspend(client->dev);
-       pm_runtime_force_suspend(client->dev);
-
+free_syncpt:
        host1x_syncpt_put(client->syncpts[0]);
 free_channel:
        host1x_channel_put(nvdec->channel);
@@ -276,6 +267,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
                        return err;
        } else {
                virt = tegra_drm_alloc(tegra, size, &iova);
+               if (IS_ERR(virt))
+                       return PTR_ERR(virt);
        }
 
        nvdec->falcon.firmware.virt = virt;
@@ -539,6 +532,10 @@ static int nvdec_probe(struct platform_device *pdev)
                goto exit_falcon;
        }
 
+       pm_runtime_enable(dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_autosuspend_delay(dev, 500);
+
        return 0;
 
 exit_falcon:
@@ -551,8 +548,8 @@ static void nvdec_remove(struct platform_device *pdev)
 {
        struct nvdec *nvdec = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
        host1x_client_unregister(&nvdec->client.base);
-
        falcon_exit(&nvdec->falcon);
 }
 
index abd6e3b92293bb0e55d939d239d369378f15db51..61b437a84806edab1c449118ac921fd14f1ce8df 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/debugfs.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
@@ -3708,7 +3708,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
 {
        struct device_node *np;
        struct tegra_sor *sor;
-       struct resource *regs;
        int err;
 
        sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
@@ -3781,8 +3780,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
                }
        }
 
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sor->regs = devm_ioremap_resource(&pdev->dev, regs);
+       sor->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sor->regs)) {
                err = PTR_ERR(sor->regs);
                goto remove;
index da7a038dca20aa570566a1a94a3fc06d93bb0f98..73c356f1c90123d0053b2a4778adb1f870e379b7 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
@@ -143,13 +141,9 @@ static int vic_init(struct host1x_client *client)
                goto free_channel;
        }
 
-       pm_runtime_enable(client->dev);
-       pm_runtime_use_autosuspend(client->dev);
-       pm_runtime_set_autosuspend_delay(client->dev, 500);
-
        err = tegra_drm_register_client(tegra, drm);
        if (err < 0)
-               goto disable_rpm;
+               goto free_syncpt;
 
        /*
         * Inherit the DMA parameters (such as maximum segment size) from the
@@ -159,10 +153,7 @@ static int vic_init(struct host1x_client *client)
 
        return 0;
 
-disable_rpm:
-       pm_runtime_dont_use_autosuspend(client->dev);
-       pm_runtime_force_suspend(client->dev);
-
+free_syncpt:
        host1x_syncpt_put(client->syncpts[0]);
 free_channel:
        host1x_channel_put(vic->channel);
@@ -529,6 +520,10 @@ static int vic_probe(struct platform_device *pdev)
                goto exit_falcon;
        }
 
+       pm_runtime_enable(dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_autosuspend_delay(dev, 500);
+
        return 0;
 
 exit_falcon:
@@ -541,8 +536,8 @@ static void vic_remove(struct platform_device *pdev)
 {
        struct vic *vic = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
        host1x_client_unregister(&vic->client.base);
-
        falcon_exit(&vic->falcon);
 }
 
index bca726a8f483241c4ee305e75a403e1978f199c0..ba7baa6226751978b30b7a06896d859b88d9d80c 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
        drm_modes_test.o \
        drm_plane_helper_test.o \
        drm_probe_helper_test.o \
-       drm_rect_test.o
+       drm_rect_test.o \
+       drm_exec_test.o
 
 CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
index 416a279b6daeba1354904b3a0c9cad6b1e0ab3e9..7516f6cb36e4e3a1ed3a655de6f6a4479a0efade 100644 (file)
@@ -82,13 +82,6 @@ static int drm_client_modeset_test_init(struct kunit *test)
        return 0;
 }
 
-static void drm_client_modeset_test_exit(struct kunit *test)
-{
-       struct drm_client_modeset_test_priv *priv = test->priv;
-
-       drm_kunit_helper_free_device(test, priv->dev);
-}
-
 static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
 {
        struct drm_client_modeset_test_priv *priv = test->priv;
@@ -188,7 +181,6 @@ static struct kunit_case drm_test_pick_cmdline_tests[] = {
 static struct kunit_suite drm_test_pick_cmdline_test_suite = {
        .name = "drm_test_pick_cmdline",
        .init = drm_client_modeset_test_init,
-       .exit = drm_client_modeset_test_exit,
        .test_cases = drm_test_pick_cmdline_tests
 };
 
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
new file mode 100644 (file)
index 0000000..563949d
--- /dev/null
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#define pr_fmt(fmt) "drm_exec: " fmt
+
+#include <kunit/test.h>
+
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../lib/drm_random.h"
+
+struct drm_exec_priv {
+       struct device *dev;
+       struct drm_device *drm;
+};
+
+static int drm_exec_test_init(struct kunit *test)
+{
+       struct drm_exec_priv *priv;
+
+       priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+       test->priv = priv;
+
+       priv->dev = drm_kunit_helper_alloc_device(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+       priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev, sizeof(*priv->drm), 0,
+                                                       DRIVER_MODESET);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+       return 0;
+}
+
+static void sanitycheck(struct kunit *test)
+{
+       struct drm_exec exec;
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_fini(&exec);
+       KUNIT_SUCCEED(test);
+}
+
+static void test_lock(struct kunit *test)
+{
+       struct drm_exec_priv *priv = test->priv;
+       struct drm_gem_object gobj = { };
+       struct drm_exec exec;
+       int ret;
+
+       drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               ret = drm_exec_lock_obj(&exec, &gobj);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+       }
+       drm_exec_fini(&exec);
+}
+
+static void test_lock_unlock(struct kunit *test)
+{
+       struct drm_exec_priv *priv = test->priv;
+       struct drm_gem_object gobj = { };
+       struct drm_exec exec;
+       int ret;
+
+       drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               ret = drm_exec_lock_obj(&exec, &gobj);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+
+               drm_exec_unlock_obj(&exec, &gobj);
+               ret = drm_exec_lock_obj(&exec, &gobj);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+       }
+       drm_exec_fini(&exec);
+}
+
+static void test_duplicates(struct kunit *test)
+{
+       struct drm_exec_priv *priv = test->priv;
+       struct drm_gem_object gobj = { };
+       struct drm_exec exec;
+       int ret;
+
+       drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_until_all_locked(&exec) {
+               ret = drm_exec_lock_obj(&exec, &gobj);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+
+               ret = drm_exec_lock_obj(&exec, &gobj);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+       }
+       drm_exec_unlock_obj(&exec, &gobj);
+       drm_exec_fini(&exec);
+}
+
+static void test_prepare(struct kunit *test)
+{
+       struct drm_exec_priv *priv = test->priv;
+       struct drm_gem_object gobj = { };
+       struct drm_exec exec;
+       int ret;
+
+       drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec) {
+               ret = drm_exec_prepare_obj(&exec, &gobj, 1);
+               drm_exec_retry_on_contention(&exec);
+               KUNIT_EXPECT_EQ(test, ret, 0);
+               if (ret)
+                       break;
+       }
+       drm_exec_fini(&exec);
+
+       drm_gem_private_object_fini(&gobj);
+}
+
+static void test_prepare_array(struct kunit *test)
+{
+       struct drm_exec_priv *priv = test->priv;
+       struct drm_gem_object gobj1 = { };
+       struct drm_gem_object gobj2 = { };
+       struct drm_gem_object *array[] = { &gobj1, &gobj2 };
+       struct drm_exec exec;
+       int ret;
+
+       drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
+       drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec)
+               ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
+                                            1);
+       KUNIT_EXPECT_EQ(test, ret, 0);
+       drm_exec_fini(&exec);
+
+       drm_gem_private_object_fini(&gobj1);
+       drm_gem_private_object_fini(&gobj2);
+}
+
+static void test_multiple_loops(struct kunit *test)
+{
+       struct drm_exec exec;
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec)
+       {
+               break;
+       }
+       drm_exec_fini(&exec);
+
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_until_all_locked(&exec)
+       {
+               break;
+       }
+       drm_exec_fini(&exec);
+       KUNIT_SUCCEED(test);
+}
+
+static struct kunit_case drm_exec_tests[] = {
+       KUNIT_CASE(sanitycheck),
+       KUNIT_CASE(test_lock),
+       KUNIT_CASE(test_lock_unlock),
+       KUNIT_CASE(test_duplicates),
+       KUNIT_CASE(test_prepare),
+       KUNIT_CASE(test_prepare_array),
+       KUNIT_CASE(test_multiple_loops),
+       {}
+};
+
+static struct kunit_suite drm_exec_test_suite = {
+       .name = "drm_exec",
+       .init = drm_exec_test_init,
+       .test_cases = drm_exec_tests,
+};
+
+kunit_test_suite(drm_exec_test_suite);
+
+MODULE_AUTHOR("AMD");
+MODULE_LICENSE("GPL and additional rights");
index df235b7fdaa5cc12c93fc40757f24ad19c25feec..f759d9f3b76e5cf8cd29dd25c444db61ef5df0bf 100644 (file)
@@ -178,13 +178,13 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
                 .handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
        }
 },
-{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
        .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
                 .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
                 .pitches = { 600, 300, 300 },
        }
 },
-{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
        .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
                 .handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
        }
index 4df47071dc8892927d0f857f82aebffae597a0b0..3d624ff2f6517ae477fb9812cd93c6be0c4ce4e4 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <drm/drm_atomic.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_kunit_helpers.h>
 #include <drm/drm_managed.h>
@@ -26,6 +27,28 @@ static struct platform_driver fake_platform_driver = {
        },
 };
 
+static void kunit_action_platform_driver_unregister(void *ptr)
+{
+       struct platform_driver *drv = ptr;
+
+       platform_driver_unregister(drv);
+
+}
+
+static void kunit_action_platform_device_put(void *ptr)
+{
+       struct platform_device *pdev = ptr;
+
+       platform_device_put(pdev);
+}
+
+static void kunit_action_platform_device_del(void *ptr)
+{
+       struct platform_device *pdev = ptr;
+
+       platform_device_del(pdev);
+}
+
 /**
  * drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
  * @test: The test context object
@@ -35,8 +58,8 @@ static struct platform_driver fake_platform_driver = {
  * able to leverage the usual infrastructure and most notably the
  * device-managed resources just like a "real" device.
  *
- * Callers need to make sure drm_kunit_helper_free_device() on the
- * device when done.
+ * Resources will be cleaned up automatically, but the removal can be
+ * forced using @drm_kunit_helper_free_device.
  *
  * Returns:
  * A pointer to the new device, or an ERR_PTR() otherwise.
@@ -49,12 +72,27 @@ struct device *drm_kunit_helper_alloc_device(struct kunit *test)
        ret = platform_driver_register(&fake_platform_driver);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
+       ret = kunit_add_action_or_reset(test,
+                                       kunit_action_platform_driver_unregister,
+                                       &fake_platform_driver);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+
        pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
 
+       ret = kunit_add_action_or_reset(test,
+                                       kunit_action_platform_device_put,
+                                       pdev);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+
        ret = platform_device_add(pdev);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
+       ret = kunit_add_action_or_reset(test,
+                                       kunit_action_platform_device_del,
+                                       pdev);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+
        return &pdev->dev;
 }
 EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
@@ -70,8 +108,17 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       platform_device_unregister(pdev);
-       platform_driver_unregister(&fake_platform_driver);
+       kunit_release_action(test,
+                            kunit_action_platform_device_del,
+                            pdev);
+
+       kunit_release_action(test,
+                            kunit_action_platform_device_put,
+                            pdev);
+
+       kunit_release_action(test,
+                            kunit_action_platform_driver_unregister,
+                            pdev);
 }
 EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
 
@@ -100,5 +147,91 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
 }
 EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
 
+static void action_drm_release_context(void *ptr)
+{
+       struct drm_modeset_acquire_ctx *ctx = ptr;
+
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+}
+
+/**
+ * drm_kunit_helper_context_alloc - Allocates an acquire context
+ * @test: The test context object
+ *
+ * Allocates and initializes a modeset acquire context.
+ *
+ * The context is tied to the kunit test context, so we must not call
+ * drm_modeset_acquire_fini() on it, it will be done so automatically.
+ *
+ * Returns:
+ * An ERR_PTR on error, a pointer to the newly allocated context otherwise
+ */
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test)
+{
+       struct drm_modeset_acquire_ctx *ctx;
+       int ret;
+
+       ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ctx);
+
+       drm_modeset_acquire_init(ctx, 0);
+
+       ret = kunit_add_action_or_reset(test,
+                                       action_drm_release_context,
+                                       ctx);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return ctx;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc);
+
+static void kunit_action_drm_atomic_state_put(void *ptr)
+{
+       struct drm_atomic_state *state = ptr;
+
+       drm_atomic_state_put(state);
+}
+
+/**
+ * drm_kunit_helper_atomic_state_alloc - Allocates an atomic state
+ * @test: The test context object
+ * @drm: The device to alloc the state for
+ * @ctx: Locking context for that atomic update
+ *
+ * Allocates a empty atomic state.
+ *
+ * The state is tied to the kunit test context, so we must not call
+ * drm_atomic_state_put() on it, it will be done so automatically.
+ *
+ * Returns:
+ * An ERR_PTR on error, a pointer to the newly allocated state otherwise
+ */
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+                                   struct drm_device *drm,
+                                   struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_atomic_state *state;
+       int ret;
+
+       state = drm_atomic_state_alloc(drm);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       ret = kunit_add_action_or_reset(test,
+                                       kunit_action_drm_atomic_state_put,
+                                       state);
+       if (ret)
+               return ERR_PTR(ret);
+
+       state->acquire_ctx = ctx;
+
+       return state;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc);
+
 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
 MODULE_LICENSE("GPL");
index bc4aa2ce78beb97d9c6645c211595c4b17f40c2d..1e9f63fbfead353f67a2ee1ccee4b6064b4ab0c2 100644 (file)
@@ -36,13 +36,6 @@ static int drm_test_modes_init(struct kunit *test)
        return 0;
 }
 
-static void drm_test_modes_exit(struct kunit *test)
-{
-       struct drm_test_modes_priv *priv = test->priv;
-
-       drm_kunit_helper_free_device(test, priv->dev);
-}
-
 static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
 {
        struct drm_test_modes_priv *priv = test->priv;
@@ -148,7 +141,6 @@ static struct kunit_case drm_modes_analog_tv_tests[] = {
 static struct kunit_suite drm_modes_analog_tv_test_suite = {
        .name = "drm_modes_analog_tv",
        .init = drm_test_modes_init,
-       .exit = drm_test_modes_exit,
        .test_cases = drm_modes_analog_tv_tests,
 };
 
index 0ee65828623eed144cd074498c1ce1f58be87471..1a2044070a6cb8ddeafd96414c0a407645ff7bc8 100644 (file)
@@ -60,13 +60,6 @@ static int drm_probe_helper_test_init(struct kunit *test)
        return 0;
 }
 
-static void drm_probe_helper_test_exit(struct kunit *test)
-{
-       struct drm_probe_helper_test_priv *priv = test->priv;
-
-       drm_kunit_helper_free_device(test, priv->dev);
-}
-
 typedef struct drm_display_mode *(*expected_mode_func_t)(struct drm_device *);
 
 struct drm_connector_helper_tv_get_modes_test {
@@ -208,7 +201,6 @@ static struct kunit_case drm_test_connector_helper_tv_get_modes_tests[] = {
 static struct kunit_suite drm_test_connector_helper_tv_get_modes_suite = {
        .name = "drm_connector_helper_tv_get_modes",
        .init = drm_probe_helper_test_init,
-       .exit = drm_probe_helper_test_exit,
        .test_cases = drm_test_connector_helper_tv_get_modes_tests,
 };
 
index dca077411f77ccbcb5d110f699b7018bdd7331bc..9d9dee7abaefddaa7d45ebc001651758a20035d5 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/module.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -275,6 +273,55 @@ const struct dispc_features dispc_j721e_feats = {
        .vid_order = { 1, 3, 0, 2 },
 };
 
+const struct dispc_features dispc_am625_feats = {
+       .max_pclk_khz = {
+               [DISPC_VP_DPI] = 165000,
+               [DISPC_VP_INTERNAL] = 170000,
+       },
+
+       .scaling = {
+               .in_width_max_5tap_rgb = 1280,
+               .in_width_max_3tap_rgb = 2560,
+               .in_width_max_5tap_yuv = 2560,
+               .in_width_max_3tap_yuv = 4096,
+               .upscale_limit = 16,
+               .downscale_limit_5tap = 4,
+               .downscale_limit_3tap = 2,
+               /*
+                * The max supported pixel inc value is 255. The value
+                * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+                * The maximum bpp of all formats supported by the HW
+                * is 8. So the maximum supported xinc value is 32,
+                * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+                */
+               .xinc_max = 32,
+       },
+
+       .subrev = DISPC_AM625,
+
+       .common = "common",
+       .common_regs = tidss_am65x_common_regs,
+
+       .num_vps = 2,
+       .vp_name = { "vp1", "vp2" },
+       .ovr_name = { "ovr1", "ovr2" },
+       .vpclk_name =  { "vp1", "vp2" },
+       .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI },
+
+       .vp_feat = { .color = {
+                       .has_ctm = true,
+                       .gamma_size = 256,
+                       .gamma_type = TIDSS_GAMMA_8BIT,
+               },
+       },
+
+       .num_planes = 2,
+       /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+       .vid_name = { "vid", "vidl1" },
+       .vid_lite = { false, true, },
+       .vid_order = { 1, 0 },
+};
+
 static const u16 *dispc_common_regmap;
 
 struct dss_vp_data {
@@ -776,6 +823,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
        switch (dispc->feat->subrev) {
        case DISPC_K2G:
                return dispc_k2g_read_and_clear_irqstatus(dispc);
+       case DISPC_AM625:
        case DISPC_AM65X:
        case DISPC_J721E:
                return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -791,6 +839,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
        case DISPC_K2G:
                dispc_k2g_set_irqenable(dispc, mask);
                break;
+       case DISPC_AM625:
        case DISPC_AM65X:
        case DISPC_J721E:
                dispc_k3_set_irqenable(dispc, mask);
@@ -1281,6 +1330,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
                dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
                                        x, y, layer);
                break;
+       case DISPC_AM625:
        case DISPC_AM65X:
                dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
                                          x, y, layer);
@@ -2199,6 +2249,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
        case DISPC_K2G:
                dispc_k2g_plane_init(dispc);
                break;
+       case DISPC_AM625:
        case DISPC_AM65X:
        case DISPC_J721E:
                dispc_k3_plane_init(dispc);
@@ -2305,6 +2356,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
        case DISPC_K2G:
                dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
                break;
+       case DISPC_AM625:
        case DISPC_AM65X:
                dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
                break;
@@ -2579,7 +2631,8 @@ int dispc_runtime_resume(struct dispc_device *dispc)
                REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
                REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
 
-       if (dispc->feat->subrev == DISPC_AM65X)
+       if (dispc->feat->subrev == DISPC_AM625 ||
+           dispc->feat->subrev == DISPC_AM65X)
                dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
                        REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
                        REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
index 946ed769caaf6a275be87d5a43ba337cea24331b..33ac5ad7a423d54a6bfc653747ab2738d6a3b429 100644 (file)
@@ -59,6 +59,7 @@ enum dispc_vp_bus_type {
 
 enum dispc_dss_subrevision {
        DISPC_K2G,
+       DISPC_AM625,
        DISPC_AM65X,
        DISPC_J721E,
 };
@@ -86,6 +87,7 @@ struct dispc_features {
 };
 
 extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am625_feats;
 extern const struct dispc_features dispc_am65x_feats;
 extern const struct dispc_features dispc_j721e_feats;
 
index 3f5f27fb6ebc86935cec0ffe5b624dc5ade2e6da..4d063eb9cd0b747229e0797c037a586dc63aa5e0 100644 (file)
@@ -5,7 +5,7 @@
  */
 
 #include <linux/console.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 
@@ -197,7 +197,7 @@ err_runtime_suspend:
        return ret;
 }
 
-static int tidss_remove(struct platform_device *pdev)
+static void tidss_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct tidss_device *tidss = platform_get_drvdata(pdev);
@@ -221,8 +221,6 @@ static int tidss_remove(struct platform_device *pdev)
        dispc_remove(tidss);
 
        dev_dbg(dev, "%s done\n", __func__);
-
-       return 0;
 }
 
 static void tidss_shutdown(struct platform_device *pdev)
@@ -232,6 +230,7 @@ static void tidss_shutdown(struct platform_device *pdev)
 
 static const struct of_device_id tidss_of_table[] = {
        { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+       { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
        { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
        { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
        { }
@@ -241,7 +240,7 @@ MODULE_DEVICE_TABLE(of, tidss_of_table);
 
 static struct platform_driver tidss_platform_driver = {
        .probe          = tidss_probe,
-       .remove         = tidss_remove,
+       .remove_new     = tidss_remove,
        .shutdown       = tidss_shutdown,
        .driver         = {
                .name   = "tidss",
index 0d4865e9c03d63d6d7d558429462056cdd2ca54c..17a86bed805481c6cf054f6cb3176d60faa0351c 100644 (file)
 
 #include <linux/export.h>
 
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "tidss_crtc.h"
 #include "tidss_drv.h"
 #include "tidss_encoder.h"
 
-static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
-                                     struct drm_crtc_state *crtc_state,
-                                     struct drm_connector_state *conn_state)
+struct tidss_encoder {
+       struct drm_bridge bridge;
+       struct drm_encoder encoder;
+       struct drm_connector *connector;
+       struct drm_bridge *next_bridge;
+       struct tidss_device *tidss;
+};
+
+static inline struct tidss_encoder
+*bridge_to_tidss_encoder(struct drm_bridge *b)
+{
+       return container_of(b, struct tidss_encoder, bridge);
+}
+
+static int tidss_bridge_attach(struct drm_bridge *bridge,
+                              enum drm_bridge_attach_flags flags)
+{
+       struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
+
+       return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+                                bridge, flags);
+}
+
+static int tidss_bridge_atomic_check(struct drm_bridge *bridge,
+                                    struct drm_bridge_state *bridge_state,
+                                    struct drm_crtc_state *crtc_state,
+                                    struct drm_connector_state *conn_state)
 {
-       struct drm_device *ddev = encoder->dev;
+       struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
+       struct tidss_device *tidss = t_enc->tidss;
        struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
        struct drm_display_info *di = &conn_state->connector->display_info;
-       struct drm_bridge *bridge;
-       bool bus_flags_set = false;
-
-       dev_dbg(ddev->dev, "%s\n", __func__);
-
-       /*
-        * Take the bus_flags from the first bridge that defines
-        * bridge timings, or from the connector's display_info if no
-        * bridge defines the timings.
-        */
-       drm_for_each_bridge_in_chain(encoder, bridge) {
-               if (!bridge->timings)
-                       continue;
-
-               tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
-               bus_flags_set = true;
-               break;
-       }
+       struct drm_bridge_state *next_bridge_state = NULL;
+
+       if (t_enc->next_bridge)
+               next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+                                                                   t_enc->next_bridge);
 
-       if (!di->bus_formats || di->num_bus_formats == 0)  {
-               dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+       if (next_bridge_state) {
+               tcrtc_state->bus_flags = next_bridge_state->input_bus_cfg.flags;
+               tcrtc_state->bus_format = next_bridge_state->input_bus_cfg.format;
+       } else if (di->num_bus_formats) {
+               tcrtc_state->bus_format = di->bus_formats[0];
+               tcrtc_state->bus_flags = di->bus_flags;
+       } else {
+               dev_err(tidss->dev, "%s: No bus_formats in connected display\n",
                        __func__);
                return -EINVAL;
        }
 
-       // XXX any cleaner way to set bus format and flags?
-       tcrtc_state->bus_format = di->bus_formats[0];
-       if (!bus_flags_set)
-               tcrtc_state->bus_flags = di->bus_flags;
-
        return 0;
 }
 
-static void tidss_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .atomic_check = tidss_encoder_atomic_check,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = tidss_encoder_destroy,
+static const struct drm_bridge_funcs tidss_bridge_funcs = {
+       .attach                         = tidss_bridge_attach,
+       .atomic_check                   = tidss_bridge_atomic_check,
+       .atomic_reset                   = drm_atomic_helper_bridge_reset,
+       .atomic_duplicate_state         = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state           = drm_atomic_helper_bridge_destroy_state,
 };
 
-struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
-                                        u32 encoder_type, u32 possible_crtcs)
+int tidss_encoder_create(struct tidss_device *tidss,
+                        struct drm_bridge *next_bridge,
+                        u32 encoder_type, u32 possible_crtcs)
 {
+       struct tidss_encoder *t_enc;
        struct drm_encoder *enc;
+       struct drm_connector *connector;
        int ret;
 
-       enc = kzalloc(sizeof(*enc), GFP_KERNEL);
-       if (!enc)
-               return ERR_PTR(-ENOMEM);
+       t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder,
+                                         encoder, encoder_type);
+       if (IS_ERR(t_enc))
+               return PTR_ERR(t_enc);
+
+       t_enc->tidss = tidss;
+       t_enc->next_bridge = next_bridge;
+       t_enc->bridge.funcs = &tidss_bridge_funcs;
 
+       enc = &t_enc->encoder;
        enc->possible_crtcs = possible_crtcs;
 
-       ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
-                              encoder_type, NULL);
-       if (ret < 0) {
-               kfree(enc);
-               return ERR_PTR(ret);
+       /* Attaching first bridge to the encoder */
+       ret = drm_bridge_attach(enc, &t_enc->bridge, NULL,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret) {
+               dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+               return ret;
+       }
+
+       /* Initializing the connector at the end of bridge-chain */
+       connector = drm_bridge_connector_init(&tidss->ddev, enc);
+       if (IS_ERR(connector)) {
+               dev_err(tidss->dev, "bridge_connector create failed\n");
+               return PTR_ERR(connector);
+       }
+
+       ret = drm_connector_attach_encoder(connector, enc);
+       if (ret) {
+               dev_err(tidss->dev, "attaching encoder to connector failed\n");
+               return ret;
        }
 
-       drm_encoder_helper_add(enc, &encoder_helper_funcs);
+       t_enc->connector = connector;
 
        dev_dbg(tidss->dev, "Encoder create done\n");
 
-       return enc;
+       return ret;
 }
index ace877c0e0fdfc197d0c5cc8b5be4b1cfa68cba2..3e561d6b1e8324fab7484bed30a9637d6cb79221 100644 (file)
@@ -11,7 +11,8 @@
 
 struct tidss_device;
 
-struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
-                                        u32 encoder_type, u32 possible_crtcs);
+int tidss_encoder_create(struct tidss_device *tidss,
+                        struct drm_bridge *next_bridge,
+                        u32 encoder_type, u32 possible_crtcs);
 
 #endif
index ad2fa3c3d4a7110ccf901021b73ff89b7b52df2d..c979ad1af2366019a3905deb6990c8e28638a368 100644 (file)
@@ -193,7 +193,6 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
        for (i = 0; i < num_pipes; ++i) {
                struct tidss_plane *tplane;
                struct tidss_crtc *tcrtc;
-               struct drm_encoder *enc;
                u32 hw_plane_id = feat->vid_order[tidss->num_planes];
                int ret;
 
@@ -216,16 +215,13 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
 
                tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
 
-               enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+               ret = tidss_encoder_create(tidss, pipes[i].bridge,
+                                          pipes[i].enc_type,
                                           1 << tcrtc->crtc.index);
-               if (IS_ERR(enc)) {
+               if (ret) {
                        dev_err(tidss->dev, "encoder create failed\n");
-                       return PTR_ERR(enc);
-               }
-
-               ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
-               if (ret)
                        return ret;
+               }
        }
 
        /* create overlay planes of the leftover planes */
index 6bdd6e4a955ab3cca15f81d9e931616f3bea453c..e1c0ef0c3894c8552328fdfcd2da71ba5c629c69 100644 (file)
@@ -38,7 +38,8 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
        if (!new_plane_state->crtc) {
                /*
                 * The visible field is not reset by the DRM core but only
-                * updated by drm_plane_helper_check_state(), set it manually.
+                * updated by drm_atomic_helper_check_plane_state(), set it
+                * manually.
                 */
                new_plane_state->visible = false;
                return 0;
index 2729e16bc05367c263291e09bf68f7acb46381a0..9aefd010acde13d6838a5da40e62469f02a45321 100644 (file)
@@ -374,7 +374,7 @@ fail_backlight:
        return ret;
 }
 
-static int panel_remove(struct platform_device *pdev)
+static void panel_remove(struct platform_device *pdev)
 {
        struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
        struct panel_module *panel_mod = to_panel_module(mod);
@@ -387,8 +387,6 @@ static int panel_remove(struct platform_device *pdev)
 
        tilcdc_module_cleanup(mod);
        kfree(panel_mod->info);
-
-       return 0;
 }
 
 static const struct of_device_id panel_of_match[] = {
@@ -398,7 +396,7 @@ static const struct of_device_id panel_of_match[] = {
 
 static struct platform_driver panel_driver = {
        .probe = panel_probe,
-       .remove = panel_remove,
+       .remove_new = panel_remove,
        .driver = {
                .name = "tilcdc-panel",
                .of_match_table = panel_of_match,
index 077c6ff5a2e1703bd273c40a0bee41d6df77d1ff..4ceb68ffac4bed53ed80202abeb5733d417dc246 100644 (file)
@@ -316,19 +316,24 @@ static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
        u32 speed_hz;
        int ret;
 
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(dbi->dc, 0);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
        ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
+       spi_bus_unlock(spi->controller);
        if (ret || !num)
                return ret;
 
        if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !dbi->swap_bytes)
                bpw = 16;
 
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(dbi->dc, 1);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+       ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+       spi_bus_unlock(spi->controller);
 
-       return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+       return ret;
 }
 
 static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
index 02265c898816d1d3b07af298b8818e3758e93200..938bceed599985b84a0fd5d8f5c67b5e3fe08a51 100644 (file)
@@ -59,9 +59,11 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
         * before being transferred as 8-bit on the big endian SPI bus.
         */
        buf[0] = cpu_to_be16(*cmd);
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(mipi->dc, 0);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
        ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+       spi_bus_unlock(spi->controller);
        if (ret || !num)
                goto free;
 
@@ -79,9 +81,11 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
        if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
                bpw = 16;
 
+       spi_bus_lock(spi->controller);
        gpiod_set_value_cansleep(mipi->dc, 1);
        speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
        ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
+       spi_bus_unlock(spi->controller);
  free:
        kfree(buf);
 
index 76cd7f515bab999c4484e0fb1ab24bdabcc86d90..2d999a0facdee2537ec9369875c7867275b4fbf9 100644 (file)
@@ -1369,13 +1369,11 @@ static int ofdrm_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ofdrm_remove(struct platform_device *pdev)
+static void ofdrm_remove(struct platform_device *pdev)
 {
        struct drm_device *dev = platform_get_drvdata(pdev);
 
        drm_dev_unplug(dev);
-
-       return 0;
 }
 
 static const struct of_device_id ofdrm_of_match_display[] = {
@@ -1390,7 +1388,7 @@ static struct platform_driver ofdrm_platform_driver = {
                .of_match_table = ofdrm_of_match_display,
        },
        .probe = ofdrm_probe,
-       .remove = ofdrm_remove,
+       .remove_new = ofdrm_remove,
 };
 
 module_platform_driver(ofdrm_platform_driver);
index eb9f13f18a025416dc0cd697b842d19ebda08da1..f80a141fcf365a9c5715046c650505af00dacfaa 100644 (file)
@@ -307,7 +307,8 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
        if (IS_ERR(dbi->reset))
                return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
 
-       dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+       /* Multiple panels can share the "dc" GPIO, but only if they are on the same SPI bus! */
+       dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
        if (IS_ERR(dc))
                return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
 
index c2677d081a7b6b41e03229678cc32b4a059edb7b..13ae148f59b9b5079ab6a91162a48684593bb1ee 100644 (file)
@@ -533,7 +533,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
        DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
                  epd->factored_stage_time);
 
-       buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
+       buf = kmalloc(fb->width * fb->height / 8, GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
                goto out_exit;
index 25e11ef11c4ce3c2d3fbdc5f67231771530c7975..ff86ba1ae1b8b07b609b7e5567f3400d34c79010 100644 (file)
@@ -888,14 +888,12 @@ static int simpledrm_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int simpledrm_remove(struct platform_device *pdev)
+static void simpledrm_remove(struct platform_device *pdev)
 {
        struct simpledrm_device *sdev = platform_get_drvdata(pdev);
        struct drm_device *dev = &sdev->dev;
 
        drm_dev_unplug(dev);
-
-       return 0;
 }
 
 static const struct of_device_id simpledrm_of_match_table[] = {
@@ -910,7 +908,7 @@ static struct platform_driver simpledrm_platform_driver = {
                .of_match_table = simpledrm_of_match_table,
        },
        .probe = simpledrm_probe,
-       .remove = simpledrm_remove,
+       .remove_new = simpledrm_remove,
 };
 
 module_platform_driver(simpledrm_platform_driver);
index f906b22959cfb4957d212d199f8e5c1312579a74..dad298127226c3d2a8fef014c8ae35fe4405466a 100644 (file)
@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644 (file)
index 0000000..75fdce0
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST_HELPERS=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644 (file)
index 0000000..ec87c4f
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+        ttm_device_test.o \
+        ttm_pool_test.o \
+        ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644 (file)
index 0000000..b1b423b
--- /dev/null
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_test_case {
+       const char *description;
+       bool use_dma_alloc;
+       bool use_dma32;
+       bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+       struct ttm_test_devices *priv = test->priv;
+       struct ttm_device *ttm_dev;
+       struct ttm_resource_manager *ttm_sys_man;
+       int err;
+
+       ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+       err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+       ttm_sys_man = &ttm_dev->sysman;
+       KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+       KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+       KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+       KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+                           priv->drm->anon_inode->i_mapping);
+
+       ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+       struct ttm_test_devices *priv = test->priv;
+       struct ttm_device *ttm_devs;
+       unsigned int i, num_dev = 3;
+       int err;
+
+       ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+       for (i = 0; i < num_dev; i++) {
+               err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
+               KUNIT_ASSERT_EQ(test, err, 0);
+
+               KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+                                   priv->drm->anon_inode->i_mapping);
+               KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+               KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+               KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+       }
+
+       KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+       for (i = 0; i < num_dev; i++)
+               ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+       struct ttm_test_devices *priv = test->priv;
+       struct ttm_device *ttm_dev;
+       struct ttm_resource_manager *man;
+       int err;
+
+       ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+       err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+       KUNIT_ASSERT_NOT_NULL(test, man);
+
+       ttm_device_fini(ttm_dev);
+
+       KUNIT_ASSERT_FALSE(test, man->use_type);
+       KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+       KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+       struct ttm_test_devices *priv = test->priv;
+       struct drm_device *drm = priv->drm;
+       struct ttm_device *ttm_dev;
+       struct drm_vma_offset_manager *vma_man;
+       int err;
+
+       ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+       /* Let's pretend there's no VMA manager allocated */
+       vma_man = drm->vma_offset_manager;
+       drm->vma_offset_manager = NULL;
+
+       err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+       KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+       /* Bring the manager back for a graceful cleanup */
+       drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+       {
+               .description = "No DMA allocations, no DMA32 required",
+               .use_dma_alloc = false,
+               .use_dma32 = false,
+               .pools_init_expected = false,
+       },
+       {
+               .description = "DMA allocations, DMA32 required",
+               .use_dma_alloc = true,
+               .use_dma32 = true,
+               .pools_init_expected = true,
+       },
+       {
+               .description = "No DMA allocations, DMA32 required",
+               .use_dma_alloc = false,
+               .use_dma32 = true,
+               .pools_init_expected = false,
+       },
+       {
+               .description = "DMA allocations, no DMA32 required",
+               .use_dma_alloc = true,
+               .use_dma32 = false,
+               .pools_init_expected = true,
+       },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+       strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+       struct ttm_test_devices *priv = test->priv;
+       const struct ttm_device_test_case *params = test->param_value;
+       struct ttm_device *ttm_dev;
+       struct ttm_pool *pool;
+       struct ttm_pool_type pt;
+       int err;
+
+       ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+       err = ttm_device_kunit_init(priv, ttm_dev,
+                                   params->use_dma_alloc,
+                                   params->use_dma32);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       pool = &ttm_dev->pool;
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+       KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+       KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+       KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+
+       if (params->pools_init_expected) {
+               for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+                       for (int j = 0; j <= MAX_ORDER; ++j) {
+                               pt = pool->caching[i].orders[j];
+                               KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+                               KUNIT_EXPECT_EQ(test, pt.caching, i);
+                               KUNIT_EXPECT_EQ(test, pt.order, j);
+
+                               if (params->use_dma_alloc)
+                                       KUNIT_ASSERT_FALSE(test,
+                                                          list_empty(&pt.pages));
+                       }
+               }
+       }
+
+       ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+       KUNIT_CASE(ttm_device_init_basic),
+       KUNIT_CASE(ttm_device_init_multiple),
+       KUNIT_CASE(ttm_device_fini_basic),
+       KUNIT_CASE(ttm_device_init_no_vma_man),
+       KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+       {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+       .name = "ttm_device",
+       .init = ttm_test_devices_init,
+       .exit = ttm_test_devices_fini,
+       .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644 (file)
index 0000000..81661d8
--- /dev/null
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_funcs ttm_dev_funcs = {
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+                         struct ttm_device *ttm,
+                         bool use_dma_alloc,
+                         bool use_dma32)
+{
+       struct drm_device *drm = priv->drm;
+       int err;
+
+       err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
+                             drm->anon_inode->i_mapping,
+                             drm->vma_offset_manager,
+                             use_dma_alloc, use_dma32);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+                                           struct ttm_test_devices *devs,
+                                           size_t size)
+{
+       struct drm_gem_object gem_obj = { .size = size };
+       struct ttm_buffer_object *bo;
+
+       bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, bo);
+
+       bo->base = gem_obj;
+       bo->bdev = devs->ttm_dev;
+
+       return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+       struct ttm_test_devices *devs;
+
+       devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, devs);
+
+       devs->dev = drm_kunit_helper_alloc_device(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+       devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+                                                       sizeof(*devs->drm), 0,
+                                                       DRIVER_GEM);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+       return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+       struct ttm_test_devices *devs;
+       struct ttm_device *ttm_dev;
+       int err;
+
+       devs = ttm_test_devices_basic(test);
+
+       ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+       err = ttm_device_kunit_init(devs, ttm_dev, false, false);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       devs->ttm_dev = ttm_dev;
+
+       return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+       if (devs->ttm_dev)
+               ttm_device_fini(devs->ttm_dev);
+
+       drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+       struct ttm_test_devices *priv;
+
+       priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, priv);
+
+       priv = ttm_test_devices_basic(test);
+       test->priv = priv;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+       ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644 (file)
index 0000000..e261e36
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+
+struct ttm_test_devices {
+       struct drm_device *drm;
+       struct device *dev;
+       struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+                         struct ttm_device *ttm,
+                         bool use_dma_alloc,
+                         bool use_dma32);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+                                           struct ttm_test_devices *devs,
+                                           size_t size);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644 (file)
index 0000000..8d90870
--- /dev/null
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright Â© 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_pool_test_case {
+       const char *description;
+       unsigned int order;
+       bool use_dma_alloc;
+};
+
+struct ttm_pool_test_priv {
+       struct ttm_test_devices *devs;
+
+       /* Used to create mock ttm_tts */
+       struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+       .interruptible = true,
+       .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv;
+
+       priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, priv);
+
+       priv->devs = ttm_test_devices_basic(test);
+       test->priv = priv;
+
+       return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+
+       ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+                                       uint32_t page_flags,
+                                       enum ttm_caching caching,
+                                       size_t size)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_buffer_object *bo;
+       struct ttm_tt *tt;
+       int err;
+
+       bo = ttm_bo_kunit_init(test, priv->devs, size);
+       KUNIT_ASSERT_NOT_NULL(test, bo);
+       priv->mock_bo = bo;
+
+       tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+                                              size_t size,
+                                              enum ttm_caching caching)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_test_devices *devs = priv->devs;
+       struct ttm_pool *pool;
+       struct ttm_tt *tt;
+       unsigned long order = __fls(size / PAGE_SIZE);
+       int err;
+
+       tt = ttm_tt_kunit_init(test, order, caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+
+       return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+       {
+               .description = "One page",
+               .order = 0,
+       },
+       {
+               .description = "More than one page",
+               .order = 2,
+       },
+       {
+               .description = "Above the allocation limit",
+               .order = MAX_ORDER + 1,
+       },
+       {
+               .description = "One page, with coherent DMA mappings enabled",
+               .order = 0,
+               .use_dma_alloc = true,
+       },
+       {
+               .description = "Above the allocation limit, with coherent DMA mappings enabled",
+               .order = MAX_ORDER + 1,
+               .use_dma_alloc = true,
+       },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+                                    char *desc)
+{
+       strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+                 ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_test_devices *devs = priv->devs;
+       const struct ttm_pool_test_case *params = test->param_value;
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct page *fst_page, *last_page;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int expected_num_pages = 1 << params->order;
+       size_t size = expected_num_pages * PAGE_SIZE;
+       int err;
+
+       tt = ttm_tt_kunit_init(test, 0, caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
+                     false);
+
+       KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+       KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+       KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+       KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+       fst_page = tt->pages[0];
+       last_page = tt->pages[tt->num_pages - 1];
+
+       if (params->order <= MAX_ORDER) {
+               if (params->use_dma_alloc) {
+                       KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+                       KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+               } else {
+                       KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+               }
+       } else {
+               if (params->use_dma_alloc) {
+                       KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+                       KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+               } else {
+                       /*
+                        * We expect to alloc one big block, followed by
+                        * order 0 blocks
+                        */
+                       KUNIT_ASSERT_EQ(test, fst_page->private,
+                                       min_t(unsigned int, MAX_ORDER,
+                                             params->order));
+                       KUNIT_ASSERT_EQ(test, last_page->private, 0);
+               }
+       }
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_test_devices *devs = priv->devs;
+       const struct ttm_pool_test_case *params = test->param_value;
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_buffer_object *bo;
+       dma_addr_t dma1, dma2;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int expected_num_pages = 1 << params->order;
+       size_t size = expected_num_pages * PAGE_SIZE;
+       int err;
+
+       tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       bo = ttm_bo_kunit_init(test, devs, size);
+       KUNIT_ASSERT_NOT_NULL(test, bo);
+
+       err = ttm_sg_tt_init(tt, bo, 0, caching);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+       KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+       dma1 = tt->dma_address[0];
+       dma2 = tt->dma_address[tt->num_pages - 1];
+
+       KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
+       KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int order = 0;
+       size_t size = PAGE_SIZE;
+       int err;
+
+       pool = ttm_pool_pre_populated(test, size, caching);
+
+       pt = &pool->caching[caching].orders[order];
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+       tt = ttm_tt_kunit_init(test, 0, caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt_pool, *pt_tt;
+       enum ttm_caching tt_caching = ttm_uncached;
+       enum ttm_caching pool_caching = ttm_cached;
+       size_t size = PAGE_SIZE;
+       unsigned int order = 0;
+       int err;
+
+       pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+       pt_pool = &pool->caching[pool_caching].orders[order];
+       pt_tt = &pool->caching[tt_caching].orders[order];
+
+       tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+       KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt_pool, *pt_tt;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int order = 2;
+       size_t fst_size = (1 << order) * PAGE_SIZE;
+       size_t snd_size = PAGE_SIZE;
+       int err;
+
+       pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+       pt_pool = &pool->caching[caching].orders[order];
+       pt_tt = &pool->caching[caching].orders[0];
+
+       tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+       KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+       err = ttm_pool_alloc(pool, tt, &simple_ctx);
+       KUNIT_ASSERT_EQ(test, err, 0);
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_test_devices *devs = priv->devs;
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int order = 2;
+       size_t size = (1 << order) * PAGE_SIZE;
+
+       tt = ttm_tt_kunit_init(test, 0, caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+       ttm_pool_alloc(pool, tt, &simple_ctx);
+
+       pt = &pool->caching[caching].orders[order];
+       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+       struct ttm_pool_test_priv *priv = test->priv;
+       struct ttm_test_devices *devs = priv->devs;
+       struct ttm_tt *tt;
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int order = 2;
+       size_t size = (1 << order) * PAGE_SIZE;
+
+       tt = ttm_tt_kunit_init(test, 0, caching, size);
+       KUNIT_ASSERT_NOT_NULL(test, tt);
+
+       pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, pool);
+
+       ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+       ttm_pool_alloc(pool, tt, &simple_ctx);
+
+       pt = &pool->caching[caching].orders[order];
+       KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+       ttm_pool_free(pool, tt);
+       ttm_tt_fini(tt);
+
+       KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+       ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+       struct ttm_pool *pool;
+       struct ttm_pool_type *pt;
+       enum ttm_caching caching = ttm_uncached;
+       unsigned int order = 0;
+       size_t size = PAGE_SIZE;
+
+       pool = ttm_pool_pre_populated(test, size, caching);
+       pt = &pool->caching[caching].orders[order];
+
+       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+       ttm_pool_fini(pool);
+
+       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+       KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+       KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+                        ttm_pool_alloc_basic_gen_params),
+       KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+       KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+       KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+       KUNIT_CASE(ttm_pool_free_dma_alloc),
+       KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+       KUNIT_CASE(ttm_pool_fini_basic),
+       {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+       .name = "ttm_pool",
+       .init = ttm_pool_test_init,
+       .exit = ttm_pool_test_fini,
+       .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_LICENSE("GPL");
index bd5dae4d16249faf6a8c8edd93a4bbf6ff82cf66..aa8ab1a418afd02992d91a4a9aeff0e4734c356e 100644 (file)
@@ -345,6 +345,7 @@ static void ttm_bo_release(struct kref *kref)
 
                if (!dma_resv_test_signaled(bo->base.resv,
                                            DMA_RESV_USAGE_BOOKKEEP) ||
+                   (want_init_on_free() && (bo->ttm != NULL)) ||
                    !dma_resv_trylock(bo->base.resv)) {
                        /* The BO is not idle, resurrect it for delayed destroy */
                        ttm_bo_flush_all_fences(bo);
@@ -458,18 +459,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
                goto out;
        }
 
-bounce:
-       ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
-       if (ret == -EMULTIHOP) {
+       do {
+               ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+               if (ret != -EMULTIHOP)
+                       break;
+
                ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
-               if (ret) {
-                       if (ret != -ERESTARTSYS && ret != -EINTR)
-                               pr_err("Buffer eviction failed\n");
-                       ttm_resource_free(bo, &evict_mem);
-                       goto out;
-               }
-               /* try and move to final place now. */
-               goto bounce;
+       } while (!ret);
+
+       if (ret) {
+               ttm_resource_free(bo, &evict_mem);
+               if (ret != -ERESTARTSYS && ret != -EINTR)
+                       pr_err("Buffer eviction failed\n");
        }
 out:
        return ret;
@@ -517,6 +518,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 {
        bool ret = false;
 
+       if (bo->pin_count) {
+               *locked = false;
+               *busy = false;
+               return false;
+       }
+
        if (bo->base.resv == ctx->resv) {
                dma_resv_assert_held(bo->base.resv);
                if (ctx->allow_res_evict)
@@ -1154,7 +1161,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
         * Move to system cached
         */
        if (bo->resource->mem_type != TTM_PL_SYSTEM) {
-               struct ttm_operation_ctx ctx = { false, false };
                struct ttm_resource *evict_mem;
                struct ttm_place hop;
 
@@ -1164,9 +1170,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                if (unlikely(ret))
                        goto out;
 
-               ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
+               ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
                if (unlikely(ret != 0)) {
                        WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+                       ttm_resource_free(bo, &evict_mem);
                        goto out;
                }
        }
index 7333f7a87a2fbfc7c93e50624650cf732da15fbe..46ff9c75bb124ae73cc45439ddd33cf9e02e305f 100644 (file)
@@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
                                       struct ttm_resource *res)
 {
        if (pos->last != res) {
+               if (pos->first == res)
+                       pos->first = list_next_entry(res, lru);
                list_move(&res->lru, &pos->last->lru);
                pos->last = res;
        }
@@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
 {
        struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
 
-       if (unlikely(pos->first == res && pos->last == res)) {
+       if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+                    (pos->first == res && pos->last == res))) {
                pos->first = NULL;
                pos->last = NULL;
        } else if (pos->first == res) {
index 40b1168ad671fd953b340b61596284dee19e28ec..0bb56d0635366602d10d8e812688d0c6d2c218b0 100644 (file)
@@ -236,7 +236,7 @@ dev_unref:
        return ret;
 }
 
-static int tve200_remove(struct platform_device *pdev)
+static void tve200_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
        struct tve200_drm_dev_private *priv = drm->dev_private;
@@ -247,8 +247,6 @@ static int tve200_remove(struct platform_device *pdev)
        drm_mode_config_cleanup(drm);
        clk_disable_unprepare(priv->pclk);
        drm_dev_put(drm);
-
-       return 0;
 }
 
 static const struct of_device_id tve200_of_match[] = {
@@ -261,10 +259,10 @@ static const struct of_device_id tve200_of_match[] = {
 static struct platform_driver tve200_driver = {
        .driver = {
                .name           = "tve200",
-               .of_match_table = of_match_ptr(tve200_of_match),
+               .of_match_table = tve200_of_match,
        },
        .probe = tve200_probe,
-       .remove = tve200_remove,
+       .remove_new = tve200_remove,
 };
 drm_module_platform_driver(tve200_driver);
 
index aa02fd2789c3f885946e2c1bb85a0611bc10e556..40876bcdd79a47ac24898ccf08b38014b1e6cd14 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
 #include <drm/drm_damage_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_edid.h>
@@ -310,16 +311,6 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
  * CRTC
  */
 
-static int udl_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
-{
-       struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
-       if (!new_crtc_state->enable)
-               return 0;
-
-       return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
-}
-
 static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
 {
        struct drm_device *dev = crtc->dev;
@@ -381,7 +372,7 @@ out:
 }
 
 static const struct drm_crtc_helper_funcs udl_crtc_helper_funcs = {
-       .atomic_check = udl_crtc_helper_atomic_check,
+       .atomic_check = drm_crtc_helper_atomic_check,
        .atomic_enable = udl_crtc_helper_atomic_enable,
        .atomic_disable = udl_crtc_helper_atomic_disable,
 };
index 478f1f0f60dec2d4d5b911abf3b1b4da2b930cfb..ffbbe9d527d324fca46f63656b12d0684fcd64bd 100644 (file)
@@ -171,10 +171,7 @@ static const struct drm_driver v3d_drm_driver = {
 #endif
 
        .gem_create_object = v3d_create_object,
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = v3d_prime_import_sg_table,
-       .gem_prime_mmap = drm_gem_prime_mmap,
 
        .ioctls = v3d_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -295,7 +292,7 @@ dma_free:
        return ret;
 }
 
-static int v3d_platform_drm_remove(struct platform_device *pdev)
+static void v3d_platform_drm_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
        struct v3d_dev *v3d = to_v3d_dev(drm);
@@ -306,13 +303,11 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
 
        dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
                    v3d->mmu_scratch_paddr);
-
-       return 0;
 }
 
 static struct platform_driver v3d_platform_driver = {
        .probe          = v3d_platform_drm_probe,
-       .remove         = v3d_platform_drm_remove,
+       .remove_new     = v3d_platform_drm_remove,
        .driver         = {
                .name   = "v3d",
                .of_match_table = v3d_of_match,
index b74b1351bfc836c504d87ad33ce086170fd8cdb2..7f664a4b2a752156a13ef0e667b535e0b77afc8e 100644 (file)
@@ -340,7 +340,7 @@ struct v3d_submit_ext {
 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
 {
        /* nsecs_to_jiffies64() does not guard against overflow */
-       if (NSEC_PER_SEC % HZ &&
+       if ((NSEC_PER_SEC % HZ) != 0 &&
            div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
                return MAX_JIFFY_OFFSET;
 
index a4bed26af32f8e9183fc3738ce65363e0a3d1e6a..63ca46f4cb350551288d6f80e85cda1a223eb1d4 100644 (file)
@@ -153,6 +153,13 @@ static int __build_mock(struct kunit *test, struct drm_device *drm,
        return 0;
 }
 
+static void kunit_action_drm_dev_unregister(void *ptr)
+{
+       struct drm_device *drm = ptr;
+
+       drm_dev_unregister(drm);
+}
+
 static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
 {
        struct drm_device *drm;
@@ -186,6 +193,11 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
        ret = drm_dev_register(drm, 0);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
+       ret = kunit_add_action_or_reset(test,
+                                       kunit_action_drm_dev_unregister,
+                                       drm);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+
        return vc4;
 }
 
index ae0bd0f81698c5a0569ae97ae1fd2cad403e0cb2..61622e95103120247d03510f173a77f1461781c7 100644 (file)
@@ -20,7 +20,6 @@
 
 struct pv_muxing_priv {
        struct vc4_dev *vc4;
-       struct drm_modeset_acquire_ctx ctx;
        struct drm_atomic_state *state;
 };
 
@@ -725,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
 static int vc4_pv_muxing_test_init(struct kunit *test)
 {
        const struct pv_muxing_param *params = test->param_value;
-       struct drm_atomic_state *state;
+       struct drm_modeset_acquire_ctx *ctx;
        struct pv_muxing_priv *priv;
        struct drm_device *drm;
        struct vc4_dev *vc4;
@@ -738,33 +737,16 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
        priv->vc4 = vc4;
 
-       drm_modeset_acquire_init(&priv->ctx, 0);
+       ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
 
        drm = &vc4->base;
-       state = drm_atomic_state_alloc(drm);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-       state->acquire_ctx = &priv->ctx;
-
-       priv->state = state;
+       priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
 
        return 0;
 }
 
-static void vc4_pv_muxing_test_exit(struct kunit *test)
-{
-       struct pv_muxing_priv *priv = test->priv;
-       struct vc4_dev *vc4 = priv->vc4;
-       struct drm_device *drm = &vc4->base;
-       struct drm_atomic_state *state = priv->state;
-
-       drm_atomic_state_put(state);
-       drm_modeset_drop_locks(&priv->ctx);
-       drm_modeset_acquire_fini(&priv->ctx);
-       drm_dev_unregister(drm);
-       drm_kunit_helper_free_device(test, vc4->dev);
-}
-
 static struct kunit_case vc4_pv_muxing_tests[] = {
        KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
                         vc4_test_pv_muxing_gen_params),
@@ -776,7 +758,6 @@ static struct kunit_case vc4_pv_muxing_tests[] = {
 static struct kunit_suite vc4_pv_muxing_test_suite = {
        .name = "vc4-pv-muxing-combinations",
        .init = vc4_pv_muxing_test_init,
-       .exit = vc4_pv_muxing_test_exit,
        .test_cases = vc4_pv_muxing_tests,
 };
 
@@ -791,7 +772,6 @@ static struct kunit_case vc5_pv_muxing_tests[] = {
 static struct kunit_suite vc5_pv_muxing_test_suite = {
        .name = "vc5-pv-muxing-combinations",
        .init = vc4_pv_muxing_test_init,
-       .exit = vc4_pv_muxing_test_exit,
        .test_cases = vc5_pv_muxing_tests,
 };
 
@@ -802,7 +782,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = {
  */
 static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
 {
-       struct drm_modeset_acquire_ctx ctx;
+       struct drm_modeset_acquire_ctx *ctx;
        struct drm_atomic_state *state;
        struct vc4_crtc_state *new_vc4_crtc_state;
        struct vc4_hvs_state *new_hvs_state;
@@ -815,14 +795,13 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
        vc4 = vc5_mock_device(test);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
 
-       drm_modeset_acquire_init(&ctx, 0);
+       ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
 
        drm = &vc4->base;
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -843,13 +822,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
        ret = drm_atomic_helper_swap_state(state, false);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
-       drm_atomic_state_put(state);
-
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -868,17 +843,18 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
        KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
 
        KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
-
-       drm_atomic_state_put(state);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-       drm_dev_unregister(drm);
-       drm_kunit_helper_free_device(test, vc4->dev);
 }
 
+/*
+ * This test makes sure that we never change the FIFO of an active HVS
+ * channel if we disable a FIFO with a lower index.
+ *
+ * Doing so would result in a FIFO stall and would disrupt an output
+ * supposed to be unaffected by the commit.
+ */
 static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
 {
-       struct drm_modeset_acquire_ctx ctx;
+       struct drm_modeset_acquire_ctx *ctx;
        struct drm_atomic_state *state;
        struct vc4_crtc_state *new_vc4_crtc_state;
        struct vc4_hvs_state *new_hvs_state;
@@ -891,14 +867,13 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
        vc4 = vc5_mock_device(test);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
 
-       drm_modeset_acquire_init(&ctx, 0);
+       ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
 
        drm = &vc4->base;
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -930,13 +905,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
        ret = drm_atomic_helper_swap_state(state, false);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
-       drm_atomic_state_put(state);
-
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -958,18 +929,27 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
 
                KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
        }
-
-       drm_atomic_state_put(state);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-       drm_dev_unregister(drm);
-       drm_kunit_helper_free_device(test, vc4->dev);
 }
 
+/*
+ * Test that if we affect a single output, only the CRTC state of that
+ * output will be pulled in the global atomic state.
+ *
+ * This is relevant for two things:
+ *
+ *   - If we don't have that state at all, we are unlikely to affect the
+ *     FIFO muxing. This is somewhat redundant with
+ *     drm_test_vc5_pv_muxing_bugs_stable_fifo()
+ *
+ *   - KMS waits for page flips to occur on all the CRTC found in the
+ *     CRTC state. Since the CRTC is unaffected, we would over-wait, but
+ *     most importantly run into corner cases like waiting on an
+ *     inactive CRTC that never completes.
+ */
 static void
 drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
 {
-       struct drm_modeset_acquire_ctx ctx;
+       struct drm_modeset_acquire_ctx *ctx;
        struct drm_atomic_state *state;
        struct vc4_crtc_state *new_vc4_crtc_state;
        struct drm_device *drm;
@@ -979,14 +959,13 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
        vc4 = vc5_mock_device(test);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
 
-       drm_modeset_acquire_init(&ctx, 0);
+       ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
 
        drm = &vc4->base;
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -996,13 +975,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
        ret = drm_atomic_helper_swap_state(state, false);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
-       drm_atomic_state_put(state);
-
-       state = drm_atomic_state_alloc(drm);
+       state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
 
-       state->acquire_ctx = &ctx;
-
        ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
        KUNIT_ASSERT_EQ(test, ret, 0);
 
@@ -1012,12 +987,6 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
        new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
                                                            VC4_ENCODER_TYPE_HDMI0);
        KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
-
-       drm_atomic_state_put(state);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-       drm_dev_unregister(drm);
-       drm_kunit_helper_free_device(test, vc4->dev);
 }
 
 static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
index bef9d45ef1df6c6a344de7232ba1f6e6c7b175cd..8b5a7e5eb1466c27604afdbd49b4b67b99e01262 100644 (file)
@@ -31,7 +31,8 @@
 
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_atomic.h>
@@ -1450,15 +1451,14 @@ static int vc4_crtc_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_crtc_ops);
 }
 
-static int vc4_crtc_dev_remove(struct platform_device *pdev)
+static void vc4_crtc_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_crtc_ops);
-       return 0;
 }
 
 struct platform_driver vc4_crtc_driver = {
        .probe = vc4_crtc_dev_probe,
-       .remove = vc4_crtc_dev_remove,
+       .remove_new = vc4_crtc_dev_remove,
        .driver = {
                .name = "vc4_crtc",
                .of_match_table = vc4_crtc_dt_match,
index e68c07d86040d940f4cd0b0f17a748db8477ed04..39152e755a13e415dc3659c06d64e39a7e555905 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/media-bus-format.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
 #include "vc4_drv.h"
 #include "vc4_regs.h"
 
@@ -388,15 +388,14 @@ static int vc4_dpi_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_dpi_ops);
 }
 
-static int vc4_dpi_dev_remove(struct platform_device *pdev)
+static void vc4_dpi_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_dpi_ops);
-       return 0;
 }
 
 struct platform_driver vc4_dpi_driver = {
        .probe = vc4_dpi_dev_probe,
-       .remove = vc4_dpi_dev_remove,
+       .remove_new = vc4_dpi_dev_remove,
        .driver = {
                .name = "vc4_dpi",
                .of_match_table = vc4_dpi_dt_match,
index 823395c23cc308fac53ce61147a452e39bc03c4c..1b3531374967ef0256c6106a3f44318caf443a2e 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
@@ -439,11 +439,9 @@ static int vc4_platform_drm_probe(struct platform_device *pdev)
        return component_master_add_with_match(dev, &vc4_drm_ops, match);
 }
 
-static int vc4_platform_drm_remove(struct platform_device *pdev)
+static void vc4_platform_drm_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &vc4_drm_ops);
-
-       return 0;
 }
 
 static const struct of_device_id vc4_of_match[] = {
@@ -456,7 +454,7 @@ MODULE_DEVICE_TABLE(of, vc4_of_match);
 
 static struct platform_driver vc4_platform_driver = {
        .probe          = vc4_platform_drm_probe,
-       .remove         = vc4_platform_drm_remove,
+       .remove_new     = vc4_platform_drm_remove,
        .driver         = {
                .name   = "vc4-drm",
                .of_match_table = vc4_of_match,
index 9e0c355b236f521854d699407d61029e41594f1b..46f6c4ce61c5291b66efb2fbbb6c9f70ad06e3b0 100644 (file)
@@ -25,8 +25,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_atomic_helper.h>
@@ -1825,20 +1826,18 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int vc4_dsi_dev_remove(struct platform_device *pdev)
+static void vc4_dsi_dev_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct vc4_dsi *dsi = dev_get_drvdata(dev);
 
        mipi_dsi_host_unregister(&dsi->dsi_host);
        vc4_dsi_put(dsi);
-
-       return 0;
 }
 
 struct platform_driver vc4_dsi_driver = {
        .probe = vc4_dsi_dev_probe,
-       .remove = vc4_dsi_dev_remove,
+       .remove_new = vc4_dsi_dev_remove,
        .driver = {
                .name = "vc4_dsi",
                .of_match_table = vc4_dsi_dt_match,
index 5261526d286f5100fb62971450e78e793e241a39..a488625773dc5a18efcf208306852a4566d2f449 100644 (file)
@@ -41,8 +41,8 @@
 #include <linux/component.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
 #include <linux/rational.h>
 #include <linux/reset.h>
@@ -3770,10 +3770,9 @@ static int vc4_hdmi_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_hdmi_ops);
 }
 
-static int vc4_hdmi_dev_remove(struct platform_device *pdev)
+static void vc4_hdmi_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_hdmi_ops);
-       return 0;
 }
 
 static const struct vc4_hdmi_variant bcm2835_variant = {
@@ -3869,7 +3868,7 @@ static const struct dev_pm_ops vc4_hdmi_pm_ops = {
 
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
-       .remove = vc4_hdmi_dev_remove,
+       .remove_new = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
index 4da66ef9678354ffa6645a035efa5a8fc0f4ddac..04af672caacb1b4c869a953f9d25d3ed7c8e7b9e 100644 (file)
@@ -1061,10 +1061,9 @@ static int vc4_hvs_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_hvs_ops);
 }
 
-static int vc4_hvs_dev_remove(struct platform_device *pdev)
+static void vc4_hvs_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_hvs_ops);
-       return 0;
 }
 
 static const struct of_device_id vc4_hvs_dt_match[] = {
@@ -1075,7 +1074,7 @@ static const struct of_device_id vc4_hvs_dt_match[] = {
 
 struct platform_driver vc4_hvs_driver = {
        .probe = vc4_hvs_dev_probe,
-       .remove = vc4_hvs_dev_remove,
+       .remove_new = vc4_hvs_dev_remove,
        .driver = {
                .name = "vc4_hvs",
                .of_match_table = vc4_hvs_dt_match,
index c5abdec0310399e0cc40285ef911f0dd3d5acc5e..ffe1f7d1b911d35b0b3d50f4ae55634786b8bebd 100644 (file)
@@ -9,8 +9,8 @@
 
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_atomic.h>
@@ -573,10 +573,9 @@ static int vc4_txp_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_txp_ops);
 }
 
-static int vc4_txp_remove(struct platform_device *pdev)
+static void vc4_txp_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_txp_ops);
-       return 0;
 }
 
 static const struct of_device_id vc4_txp_dt_match[] = {
@@ -586,7 +585,7 @@ static const struct of_device_id vc4_txp_dt_match[] = {
 
 struct platform_driver vc4_txp_driver = {
        .probe = vc4_txp_probe,
-       .remove = vc4_txp_remove,
+       .remove_new = vc4_txp_remove,
        .driver = {
                .name = "vc4_txp",
                .of_match_table = vc4_txp_dt_match,
index 29a664c8bf44a1ebb86aa60b1382dbb33c4230d1..04ac7805e6d5fe3d218460039f5d336f79b33e40 100644 (file)
@@ -532,10 +532,9 @@ static int vc4_v3d_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_v3d_ops);
 }
 
-static int vc4_v3d_dev_remove(struct platform_device *pdev)
+static void vc4_v3d_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_v3d_ops);
-       return 0;
 }
 
 const struct of_device_id vc4_v3d_dt_match[] = {
@@ -547,7 +546,7 @@ const struct of_device_id vc4_v3d_dt_match[] = {
 
 struct platform_driver vc4_v3d_driver = {
        .probe = vc4_v3d_dev_probe,
-       .remove = vc4_v3d_dev_remove,
+       .remove_new = vc4_v3d_dev_remove,
        .driver = {
                .name = "vc4_v3d",
                .of_match_table = vc4_v3d_dt_match,
index d6e6a1a22eba78d4a2e2313957435f434e68dd3a..268f18b10ee0145b5165433dacf8c2c88da6a26c 100644 (file)
@@ -21,8 +21,8 @@
 #include <drm/drm_simple_kms_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
 #include "vc4_drv.h"
@@ -812,15 +812,14 @@ static int vc4_vec_dev_probe(struct platform_device *pdev)
        return component_add(&pdev->dev, &vc4_vec_ops);
 }
 
-static int vc4_vec_dev_remove(struct platform_device *pdev)
+static void vc4_vec_dev_remove(struct platform_device *pdev)
 {
        component_del(&pdev->dev, &vc4_vec_ops);
-       return 0;
 }
 
 struct platform_driver vc4_vec_driver = {
        .probe = vc4_vec_dev_probe,
-       .remove = vc4_vec_dev_remove,
+       .remove_new = vc4_vec_dev_remove,
        .driver = {
                .name = "vc4_vec",
                .of_match_table = vc4_vec_dt_match,
index add075681e18f80b3b42afc6cb31ff38e8fa937b..644b8ee51009bf92bf6263c098251fcb9b79b42b 100644 (file)
@@ -176,7 +176,8 @@ static const struct drm_driver driver = {
         * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
         * out via drm_device::driver_features:
         */
-       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+                          DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
        .open = virtio_gpu_driver_open,
        .postclose = virtio_gpu_driver_postclose,
 
@@ -186,9 +187,6 @@ static const struct drm_driver driver = {
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = virtio_gpu_debugfs_init,
 #endif
-       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_mmap = drm_gem_prime_mmap,
        .gem_prime_import = virtgpu_gem_prime_import,
        .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
 
index cf3c04b16a7a8f7a813f1ccb91fc58d1d9767a02..3c00135ead45af7bd5f9b9c64236750a0cdd4ad7 100644 (file)
 #include <linux/uaccess.h>
 
 #include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
 #include <drm/virtgpu_drm.h>
 
 #include "virtgpu_drv.h"
 
+struct virtio_gpu_submit_post_dep {
+       struct drm_syncobj *syncobj;
+       struct dma_fence_chain *chain;
+       u64 point;
+};
+
 struct virtio_gpu_submit {
+       struct virtio_gpu_submit_post_dep *post_deps;
+       unsigned int num_out_syncobjs;
+
+       struct drm_syncobj **in_syncobjs;
+       unsigned int num_in_syncobjs;
+
        struct virtio_gpu_object_array *buflist;
        struct drm_virtgpu_execbuffer *exbuf;
        struct virtio_gpu_fence *out_fence;
@@ -59,18 +72,211 @@ static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
        return 0;
 }
 
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+                                    u32 nr_syncobjs)
+{
+       u32 i = nr_syncobjs;
+
+       while (i--) {
+               if (syncobjs[i])
+                       drm_syncobj_put(syncobjs[i]);
+       }
+
+       kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+       struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+       struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+       size_t syncobj_stride = exbuf->syncobj_stride;
+       u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+       struct drm_syncobj **syncobjs;
+       int ret = 0, i;
+
+       if (!num_in_syncobjs)
+               return 0;
+
+       /*
+        * kvalloc at first tries to allocate memory using kmalloc and
+        * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+        * internally for allocations larger than a page size, preventing
+        * storm of KMSG warnings.
+        */
+       syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+       if (!syncobjs)
+               return -ENOMEM;
+
+       for (i = 0; i < num_in_syncobjs; i++) {
+               u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+               struct dma_fence *fence;
+
+               memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+               if (copy_from_user(&syncobj_desc,
+                                  u64_to_user_ptr(address),
+                                  min(syncobj_stride, sizeof(syncobj_desc)))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+                                            syncobj_desc.point, 0, &fence);
+               if (ret)
+                       break;
+
+               ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+               dma_fence_put(fence);
+               if (ret)
+                       break;
+
+               if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+                       syncobjs[i] = drm_syncobj_find(submit->file,
+                                                      syncobj_desc.handle);
+                       if (!syncobjs[i]) {
+                               ret = -EINVAL;
+                               break;
+                       }
+               }
+       }
+
+       if (ret) {
+               virtio_gpu_free_syncobjs(syncobjs, i);
+               return ret;
+       }
+
+       submit->num_in_syncobjs = num_in_syncobjs;
+       submit->in_syncobjs = syncobjs;
+
+       return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+                                     u32 nr_syncobjs)
+{
+       u32 i;
+
+       for (i = 0; i < nr_syncobjs; i++) {
+               if (syncobjs[i])
+                       drm_syncobj_replace_fence(syncobjs[i], NULL);
+       }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+                         u32 nr_syncobjs)
+{
+       u32 i = nr_syncobjs;
+
+       while (i--) {
+               kfree(post_deps[i].chain);
+               drm_syncobj_put(post_deps[i].syncobj);
+       }
+
+       kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+       struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+       struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+       struct virtio_gpu_submit_post_dep *post_deps;
+       u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+       size_t syncobj_stride = exbuf->syncobj_stride;
+       int ret = 0, i;
+
+       if (!num_out_syncobjs)
+               return 0;
+
+       post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+       if (!post_deps)
+               return -ENOMEM;
+
+       for (i = 0; i < num_out_syncobjs; i++) {
+               u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+               memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+               if (copy_from_user(&syncobj_desc,
+                                  u64_to_user_ptr(address),
+                                  min(syncobj_stride, sizeof(syncobj_desc)))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               post_deps[i].point = syncobj_desc.point;
+
+               if (syncobj_desc.flags) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (syncobj_desc.point) {
+                       post_deps[i].chain = dma_fence_chain_alloc();
+                       if (!post_deps[i].chain) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+               }
+
+               post_deps[i].syncobj = drm_syncobj_find(submit->file,
+                                                       syncobj_desc.handle);
+               if (!post_deps[i].syncobj) {
+                       kfree(post_deps[i].chain);
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+
+       if (ret) {
+               virtio_gpu_free_post_deps(post_deps, i);
+               return ret;
+       }
+
+       submit->num_out_syncobjs = num_out_syncobjs;
+       submit->post_deps = post_deps;
+
+       return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+       struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+       if (post_deps) {
+               struct dma_fence *fence = &submit->out_fence->f;
+               u32 i;
+
+               for (i = 0; i < submit->num_out_syncobjs; i++) {
+                       if (post_deps[i].chain) {
+                               drm_syncobj_add_point(post_deps[i].syncobj,
+                                                     post_deps[i].chain,
+                                                     fence, post_deps[i].point);
+                               post_deps[i].chain = NULL;
+                       } else {
+                               drm_syncobj_replace_fence(post_deps[i].syncobj,
+                                                         fence);
+                       }
+               }
+       }
+}
+
 static int virtio_gpu_fence_event_create(struct drm_device *dev,
                                         struct drm_file *file,
                                         struct virtio_gpu_fence *fence,
                                         u32 ring_idx)
 {
-       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct virtio_gpu_fence_event *e = NULL;
        int ret;
 
-       if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
-               return 0;
-
        e = kzalloc(sizeof(*e), GFP_KERNEL);
        if (!e)
                return -ENOMEM;
@@ -122,6 +328,10 @@ static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
 
 static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
 {
+       virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+       virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+       virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
        if (!IS_ERR(submit->buf))
                kvfree(submit->buf);
 
@@ -164,18 +374,31 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fence *out_fence;
+       bool drm_fence_event;
        int err;
 
        memset(submit, 0, sizeof(*submit));
 
-       out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
-       if (!out_fence)
-               return -ENOMEM;
-
-       err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
-       if (err) {
-               dma_fence_put(&out_fence->f);
-               return err;
+       if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+           (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+               drm_fence_event = true;
+       else
+               drm_fence_event = false;
+
+       if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+           exbuf->num_out_syncobjs ||
+           exbuf->num_bo_handles ||
+           drm_fence_event)
+               out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+       else
+               out_fence = NULL;
+
+       if (drm_fence_event) {
+               err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+               if (err) {
+                       dma_fence_put(&out_fence->f);
+                       return err;
+               }
        }
 
        submit->out_fence = out_fence;
@@ -283,6 +506,14 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto cleanup;
 
+       ret = virtio_gpu_parse_post_deps(&submit);
+       if (ret)
+               goto cleanup;
+
+       ret = virtio_gpu_parse_deps(&submit);
+       if (ret)
+               goto cleanup;
+
        /*
         * Await in-fences in the end of the job submission path to
         * optimize the path by proceeding directly to the submission
@@ -303,6 +534,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
         * the job submission path.
         */
        virtio_gpu_install_out_fence_fd(&submit);
+       virtio_gpu_process_post_deps(&submit);
        virtio_gpu_complete_submit(&submit);
 cleanup:
        virtio_gpu_cleanup_submit(&submit);
index 906d3df40cdbe4e0af10755d92369e8b864e5cbb..d5d4f642d3678077c7053dad9c5026e04b9f2055 100644 (file)
@@ -6,6 +6,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_blend.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_fixed.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_vblank.h>
 #include <linux/minmax.h>
@@ -23,7 +24,7 @@ static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
 
 /**
  * pre_mul_alpha_blend - alpha blending equation
- * @src_frame_info: source framebuffer's metadata
+ * @frame_info: Source framebuffer's metadata
  * @stage_buffer: The line with the pixels from src_plane
  * @output_buffer: A line buffer that receives all the blends output
  *
@@ -89,12 +90,81 @@ static void fill_background(const struct pixel_argb_u16 *background_color,
                output_buffer->pixels[i] = *background_color;
 }
 
+// lerp(a, b, t) = a + (b - a) * t
+static u16 lerp_u16(u16 a, u16 b, s64 t)
+{
+       s64 a_fp = drm_int2fixp(a);
+       s64 b_fp = drm_int2fixp(b);
+
+       s64 delta = drm_fixp_mul(b_fp - a_fp,  t);
+
+       return drm_fixp2int(a_fp + delta);
+}
+
+static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
+{
+       s64 color_channel_fp = drm_int2fixp(channel_value);
+
+       return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
+}
+
+/*
+ * This enum is related to the positions of the variables inside
+ * `struct drm_color_lut`, so the order of both needs to be the same.
+ */
+enum lut_channel {
+       LUT_RED = 0,
+       LUT_GREEN,
+       LUT_BLUE,
+       LUT_RESERVED
+};
+
+static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
+                                     enum lut_channel channel)
+{
+       s64 lut_index = get_lut_index(lut, channel_value);
+
+       /*
+        * This checks if `struct drm_color_lut` has any gap added by the compiler
+        * between the struct fields.
+        */
+       static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
+
+       u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+       u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+
+       u16 floor_channel_value = floor_lut_value[channel];
+       u16 ceil_channel_value = ceil_lut_value[channel];
+
+       return lerp_u16(floor_channel_value, ceil_channel_value,
+                       lut_index & DRM_FIXED_DECIMAL_MASK);
+}
+
+static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
+{
+       if (!crtc_state->gamma_lut.base)
+               return;
+
+       if (!crtc_state->gamma_lut.lut_length)
+               return;
+
+       for (size_t x = 0; x < output_buffer->n_pixels; x++) {
+               struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
+
+               pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
+               pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
+               pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
+       }
+}
+
 /**
- * @wb_frame_info: The writeback frame buffer metadata
+ * blend - blend the pixels from all planes and compute crc
+ * @wb: The writeback frame buffer metadata
  * @crtc_state: The crtc state
  * @crc32: The crc output of the final frame
  * @output_buffer: A buffer of a row that will receive the result of the blend(s)
  * @stage_buffer: The line with the pixels from plane being blend to the output
+ * @row_size: The size, in bytes, of a single row
  *
  * This function blends the pixels (Using the `pre_mul_alpha_blend`)
  * from all planes, calculates the crc32 of the output from the former step,
@@ -128,10 +198,12 @@ static void blend(struct vkms_writeback_job *wb,
                                            output_buffer);
                }
 
+               apply_lut(crtc_state, output_buffer);
+
                *crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
 
                if (wb)
-                       wb->wb_write(&wb->wb_frame_info, output_buffer, y_pos);
+                       vkms_writeback_row(wb, output_buffer, y_pos);
        }
 }
 
@@ -145,7 +217,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
                if (!planes[i]->pixel_read)
                        return -1;
 
-       if (active_wb && !active_wb->wb_write)
+       if (active_wb && !active_wb->pixel_write)
                return -1;
 
        return 0;
@@ -242,6 +314,22 @@ void vkms_composer_worker(struct work_struct *work)
        crtc_state->frame_start = 0;
        crtc_state->frame_end = 0;
        crtc_state->crc_pending = false;
+
+       if (crtc->state->gamma_lut) {
+               s64 max_lut_index_fp;
+               s64 u16_max_fp = drm_int2fixp(0xffff);
+
+               crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+               crtc_state->gamma_lut.lut_length =
+                       crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
+               max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length  - 1);
+               crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
+                                                                              u16_max_fp);
+
+       } else {
+               crtc_state->gamma_lut.base = NULL;
+       }
+
        spin_unlock_irq(&out->composer_lock);
 
        /*
@@ -320,10 +408,15 @@ void vkms_set_composer(struct vkms_output *out, bool enabled)
        if (enabled)
                drm_crtc_vblank_get(&out->crtc);
 
-       spin_lock_irq(&out->lock);
+       mutex_lock(&out->enabled_lock);
        old_enabled = out->composer_enabled;
        out->composer_enabled = enabled;
-       spin_unlock_irq(&out->lock);
+
+       /* the composition wasn't enabled, so unlock the lock to make sure the lock
+        * will be balanced even if we have a failed commit
+        */
+       if (!out->composer_enabled)
+               mutex_unlock(&out->enabled_lock);
 
        if (old_enabled)
                drm_crtc_vblank_put(&out->crtc);
index 515f6772b8663d1b02ab9736fe49bbb8ab1e24e1..3c5ebf106b66de958d78518ffecb7e6a343d9fba 100644 (file)
@@ -16,7 +16,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
        struct drm_crtc *crtc = &output->crtc;
        struct vkms_crtc_state *state;
        u64 ret_overrun;
-       bool ret, fence_cookie;
+       bool ret, fence_cookie, composer_enabled;
 
        fence_cookie = dma_fence_begin_signalling();
 
@@ -25,15 +25,15 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
        if (ret_overrun != 1)
                pr_warn("%s: vblank timer overrun\n", __func__);
 
-       spin_lock(&output->lock);
        ret = drm_crtc_handle_vblank(crtc);
        if (!ret)
                DRM_ERROR("vkms failure on handling vblank");
 
        state = output->composer_state;
-       spin_unlock(&output->lock);
+       composer_enabled = output->composer_enabled;
+       mutex_unlock(&output->enabled_lock);
 
-       if (state && output->composer_enabled) {
+       if (state && composer_enabled) {
                u64 frame = drm_crtc_accurate_vblank_count(crtc);
 
                /* update frame_start only if a queued vkms_composer_worker()
@@ -290,8 +290,12 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 
        drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
 
+       drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
+       drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
+
        spin_lock_init(&vkms_out->lock);
        spin_lock_init(&vkms_out->composer_lock);
+       mutex_init(&vkms_out->enabled_lock);
 
        vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
        if (!vkms_out->composer_workq)
index e3c9c9571c8d6fa58a99e345e9ca83b8a82caf73..dd0af086e7fa943a11a5b6867c997533b75dfa34 100644 (file)
@@ -120,9 +120,27 @@ static const struct drm_driver vkms_driver = {
        .minor                  = DRIVER_MINOR,
 };
 
+static int vkms_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *new_crtc_state;
+       int i;
+
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+               if (!new_crtc_state->gamma_lut || !new_crtc_state->color_mgmt_changed)
+                       continue;
+
+               if (new_crtc_state->gamma_lut->length / sizeof(struct drm_color_lut *)
+                   > VKMS_LUT_SIZE)
+                       return -EINVAL;
+       }
+
+       return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs vkms_mode_funcs = {
        .fb_create = drm_gem_fb_create,
-       .atomic_check = drm_atomic_helper_check,
+       .atomic_check = vkms_atomic_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
 
index 5f1a0a44a78cfb9162cc4a79271228cf1fd311cb..c7ae6c2ba1df08fe903a8ff8be3782918e70efbb 100644 (file)
@@ -23,6 +23,8 @@
 
 #define NUM_OVERLAY_PLANES 8
 
+#define VKMS_LUT_SIZE 256
+
 struct vkms_frame_info {
        struct drm_framebuffer *fb;
        struct drm_rect src, dst;
@@ -46,8 +48,7 @@ struct line_buffer {
 struct vkms_writeback_job {
        struct iosys_map data[DRM_FORMAT_MAX_PLANES];
        struct vkms_frame_info wb_frame_info;
-       void (*wb_write)(struct vkms_frame_info *frame_info,
-                        const struct line_buffer *buffer, int y);
+       void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
 };
 
 /**
@@ -65,6 +66,12 @@ struct vkms_plane {
        struct drm_plane base;
 };
 
+struct vkms_color_lut {
+       struct drm_color_lut *base;
+       size_t lut_length;
+       s64 channel_value2index_ratio;
+};
+
 /**
  * vkms_crtc_state - Driver specific CRTC state
  * @base: base CRTC state
@@ -80,6 +87,7 @@ struct vkms_crtc_state {
        /* stack of active planes for crc computation, should be in z order */
        struct vkms_plane_state **active_planes;
        struct vkms_writeback_job *active_writeback;
+       struct vkms_color_lut gamma_lut;
 
        /* below four are protected by vkms_output.composer_lock */
        bool crc_pending;
@@ -100,8 +108,10 @@ struct vkms_output {
        struct workqueue_struct *composer_workq;
        /* protects concurrent access to composer */
        spinlock_t lock;
+       /* guarantees that if the composer is enabled, a job will be queued */
+       struct mutex enabled_lock;
 
-       /* protected by @lock */
+       /* protected by @enabled_lock */
        bool composer_enabled;
        struct vkms_crtc_state *composer_state;
 
@@ -157,6 +167,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
 void vkms_composer_worker(struct work_struct *work);
 void vkms_set_composer(struct vkms_output *out, bool enabled);
 void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
+void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
 
 /* Writeback */
 int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
index 5945da0beba6f08c56c1eb22d6178ae09496b426..36046b12f29686ecb30831ec3709fa8b8996b2c9 100644 (file)
@@ -111,6 +111,19 @@ static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
        out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
 }
 
+/**
+ * vkms_compose_row - compose a single row of a plane
+ * @stage_buffer: output line with the composed pixels
+ * @plane: state of the plane that is being composed
+ * @y: y coordinate of the row
+ *
+ * This function composes a single row of a plane. It gets the source pixels
+ * through the y coordinate (see get_packed_src_addr()) and goes linearly
+ * through the source pixel, reading the pixels and converting it to
+ * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
+ * the source pixels are not traversed linearly. The source pixels are queried
+ * on each iteration in order to traverse the pixels vertically.
+ */
 void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
 {
        struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
@@ -137,107 +150,81 @@ void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state
  * They are used in the `compose_active_planes` to convert and store a line
  * from the src_buffer to the writeback buffer.
  */
-static void argb_u16_to_ARGB8888(struct vkms_frame_info *frame_info,
-                                const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
 {
-       int x_dst = frame_info->dst.x1;
-       u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
-       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
-       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
-                           src_buffer->n_pixels);
-
-       for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
-               /*
-                * This sequence below is important because the format's byte order is
-                * in little-endian. In the case of the ARGB8888 the memory is
-                * organized this way:
-                *
-                * | Addr     | = blue channel
-                * | Addr + 1 | = green channel
-                * | Addr + 2 | = Red channel
-                * | Addr + 3 | = Alpha channel
-                */
-               dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixels[x].a, 257);
-               dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
-               dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
-               dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
-       }
+       /*
+        * This sequence below is important because the format's byte order is
+        * in little-endian. In the case of the ARGB8888 the memory is
+        * organized this way:
+        *
+        * | Addr     | = blue channel
+        * | Addr + 1 | = green channel
+        * | Addr + 2 | = Red channel
+        * | Addr + 3 | = Alpha channel
+        */
+       dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+       dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+       dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+       dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
 }
 
-static void argb_u16_to_XRGB8888(struct vkms_frame_info *frame_info,
-                                const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
 {
-       int x_dst = frame_info->dst.x1;
-       u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
-       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
-       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
-                           src_buffer->n_pixels);
-
-       for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
-               dst_pixels[3] = 0xff;
-               dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
-               dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
-               dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
-       }
+       dst_pixels[3] = 0xff;
+       dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+       dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+       dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
 }
 
-static void argb_u16_to_ARGB16161616(struct vkms_frame_info *frame_info,
-                                    const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
 {
-       int x_dst = frame_info->dst.x1;
-       u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
-       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
-       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
-                           src_buffer->n_pixels);
-
-       for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
-               dst_pixels[3] = cpu_to_le16(in_pixels[x].a);
-               dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
-               dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
-               dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
-       }
+       u16 *pixels = (u16 *)dst_pixels;
+
+       pixels[3] = cpu_to_le16(in_pixel->a);
+       pixels[2] = cpu_to_le16(in_pixel->r);
+       pixels[1] = cpu_to_le16(in_pixel->g);
+       pixels[0] = cpu_to_le16(in_pixel->b);
 }
 
-static void argb_u16_to_XRGB16161616(struct vkms_frame_info *frame_info,
-                                    const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
 {
-       int x_dst = frame_info->dst.x1;
-       u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
-       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
-       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
-                           src_buffer->n_pixels);
-
-       for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
-               dst_pixels[3] = 0xffff;
-               dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
-               dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
-               dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
-       }
+       u16 *pixels = (u16 *)dst_pixels;
+
+       pixels[3] = 0xffff;
+       pixels[2] = cpu_to_le16(in_pixel->r);
+       pixels[1] = cpu_to_le16(in_pixel->g);
+       pixels[0] = cpu_to_le16(in_pixel->b);
 }
 
-static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
-                              const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
 {
-       int x_dst = frame_info->dst.x1;
-       u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
-       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
-       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
-                           src_buffer->n_pixels);
+       u16 *pixels = (u16 *)dst_pixels;
 
        s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
        s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
 
-       for (size_t x = 0; x < x_limit; x++, dst_pixels++) {
-               s64 fp_r = drm_int2fixp(in_pixels[x].r);
-               s64 fp_g = drm_int2fixp(in_pixels[x].g);
-               s64 fp_b = drm_int2fixp(in_pixels[x].b);
+       s64 fp_r = drm_int2fixp(in_pixel->r);
+       s64 fp_g = drm_int2fixp(in_pixel->g);
+       s64 fp_b = drm_int2fixp(in_pixel->b);
 
-               u16 r = drm_fixp2int_round(drm_fixp_div(fp_r, fp_rb_ratio));
-               u16 g = drm_fixp2int_round(drm_fixp_div(fp_g, fp_g_ratio));
-               u16 b = drm_fixp2int_round(drm_fixp_div(fp_b, fp_rb_ratio));
+       u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+       u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+       u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
 
-               *dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
-       }
+       *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+}
+
+void vkms_writeback_row(struct vkms_writeback_job *wb,
+                       const struct line_buffer *src_buffer, int y)
+{
+       struct vkms_frame_info *frame_info = &wb->wb_frame_info;
+       int x_dst = frame_info->dst.x1;
+       u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+       struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+       int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
+
+       for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+               wb->pixel_write(dst_pixels, &in_pixels[x]);
 }
 
 void *get_pixel_conversion_function(u32 format)
@@ -258,7 +245,7 @@ void *get_pixel_conversion_function(u32 format)
        }
 }
 
-void *get_line_to_frame_function(u32 format)
+void *get_pixel_write_function(u32 format)
 {
        switch (format) {
        case DRM_FORMAT_ARGB8888:
index c5b113495d0c0fffb449908170355ecf253ccb90..cf59c2ed8e9af06c9838635e3f7819a9169494b5 100644 (file)
@@ -7,6 +7,6 @@
 
 void *get_pixel_conversion_function(u32 format);
 
-void *get_line_to_frame_function(u32 format);
+void *get_pixel_write_function(u32 format);
 
 #endif /* _VKMS_FORMATS_H_ */
index 84a51cd281b9cb47e50dc60200818dd214b7556c..d7e63aa14663fe76afeb6b596d82930534a8441e 100644 (file)
@@ -15,6 +15,7 @@
 #include "vkms_formats.h"
 
 static const u32 vkms_wb_formats[] = {
+       DRM_FORMAT_ARGB8888,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_XRGB16161616,
        DRM_FORMAT_ARGB16161616,
@@ -142,13 +143,15 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
 
        spin_lock_irq(&output->composer_lock);
        crtc_state->active_writeback = active_wb;
+       crtc_state->wb_pending = true;
+       spin_unlock_irq(&output->composer_lock);
+
        wb_frame_info->offset = fb->offsets[0];
        wb_frame_info->pitch = fb->pitches[0];
        wb_frame_info->cpp = fb->format->cpp[0];
-       crtc_state->wb_pending = true;
-       spin_unlock_irq(&output->composer_lock);
+
        drm_writeback_queue_job(wb_conn, connector_state);
-       active_wb->wb_write = get_line_to_frame_function(wb_format);
+       active_wb->pixel_write = get_pixel_write_function(wb_format);
        drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
        drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
 }
index 90996c108146dbf2e12bb945af1099bc710c288b..aab79c5e34c26ec99e20f7201ad38b33d76663ed 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
@@ -474,10 +473,7 @@ DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
 static const struct drm_driver xen_drm_driver = {
        .driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        .release                   = xen_drm_drv_release,
-       .prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
-       .gem_prime_mmap            = drm_gem_prime_mmap,
        .dumb_create               = xen_drm_drv_dumb_create,
        .fops                      = &xen_drm_dev_fops,
        .name                      = "xendrm-du",
index 3b87eebddc979f39c3ad5f8b99b4e06d023d50ff..407bc07cec69ab6834b7a1884167334e935d9068 100644 (file)
@@ -1094,8 +1094,8 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
                         "%s%u", dma_names[layer->id], i);
                dma->chan = dma_request_chan(disp->dev, dma_channel_name);
                if (IS_ERR(dma->chan)) {
-                       dev_err(disp->dev, "failed to request dma channel\n");
-                       ret = PTR_ERR(dma->chan);
+                       ret = dev_err_probe(disp->dev, PTR_ERR(dma->chan),
+                                           "failed to request dma channel\n");
                        dma->chan = NULL;
                        return ret;
                }
@@ -1228,7 +1228,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
 {
        struct platform_device *pdev = to_platform_device(dpsub->dev);
        struct zynqmp_disp *disp;
-       struct resource *res;
        int ret;
 
        disp = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -1238,22 +1237,19 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
        disp->dev = &pdev->dev;
        disp->dpsub = dpsub;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
-       disp->blend.base = devm_ioremap_resource(disp->dev, res);
+       disp->blend.base = devm_platform_ioremap_resource_byname(pdev, "blend");
        if (IS_ERR(disp->blend.base)) {
                ret = PTR_ERR(disp->blend.base);
                goto error;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
-       disp->avbuf.base = devm_ioremap_resource(disp->dev, res);
+       disp->avbuf.base = devm_platform_ioremap_resource_byname(pdev, "av_buf");
        if (IS_ERR(disp->avbuf.base)) {
                ret = PTR_ERR(disp->avbuf.base);
                goto error;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
-       disp->audio.base = devm_ioremap_resource(disp->dev, res);
+       disp->audio.base = devm_platform_ioremap_resource_byname(pdev, "aud");
        if (IS_ERR(disp->audio.base)) {
                ret = PTR_ERR(disp->audio.base);
                goto error;
index 0a7b466446fb60b8a95348e610f457826eb263fd..a0606fab0e22c83bfd2c2597d0538f407368b1d0 100644 (file)
@@ -784,7 +784,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
 }
 
 /**
- * zynqmp_dp_link_train - Train the link
+ * zynqmp_dp_train - Train the link
  * @dp: DisplayPort IP core structure
  *
  * Return: 0 if all trains are done successfully, or corresponding error code.
index bab862484d429c0b266694d6e72943de9db51f53..88eb33acd5f0dd4f98ed60343480f4258f65e798 100644 (file)
@@ -227,7 +227,9 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
        dpsub->dev = &pdev->dev;
        platform_set_drvdata(pdev, dpsub);
 
-       dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
+       ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
+       if (ret)
+               return ret;
 
        /* Try the reserved memory. Proceed if there's none. */
        of_reserved_mem_device_init(&pdev->dev);
@@ -280,7 +282,7 @@ err_mem:
        return ret;
 }
 
-static int zynqmp_dpsub_remove(struct platform_device *pdev)
+static void zynqmp_dpsub_remove(struct platform_device *pdev)
 {
        struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
 
@@ -298,8 +300,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
 
        if (!dpsub->drm)
                zynqmp_dpsub_release(dpsub);
-
-       return 0;
 }
 
 static void zynqmp_dpsub_shutdown(struct platform_device *pdev)
@@ -320,7 +320,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
 
 static struct platform_driver zynqmp_dpsub_driver = {
        .probe                  = zynqmp_dpsub_probe,
-       .remove                 = zynqmp_dpsub_remove,
+       .remove_new             = zynqmp_dpsub_remove,
        .shutdown               = zynqmp_dpsub_shutdown,
        .driver                 = {
                .name           = "zynqmp-dpsub",
index 4d16a3396c4abf6f0b2b093b6fcb9b6fdfcf5eed..84d042796d2e66ffb8c5dce5dd5485b680c7b8e1 100644 (file)
@@ -338,32 +338,15 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
        return strcmp(dev_name(dev), drv->name) == 0;
 }
 
+/*
+ * Note that this is really only needed for backwards compatibility
+ * with libdrm, which parses this information from sysfs and will
+ * fail if it can't find the OF_FULLNAME, specifically.
+ */
 static int host1x_device_uevent(const struct device *dev,
                                struct kobj_uevent_env *env)
 {
-       struct device_node *np = dev->parent->of_node;
-       unsigned int count = 0;
-       struct property *p;
-       const char *compat;
-
-       /*
-        * This duplicates most of of_device_uevent(), but the latter cannot
-        * be called from modules and operates on dev->of_node, which is not
-        * available in this case.
-        *
-        * Note that this is really only needed for backwards compatibility
-        * with libdrm, which parses this information from sysfs and will
-        * fail if it can't find the OF_FULLNAME, specifically.
-        */
-       add_uevent_var(env, "OF_NAME=%pOFn", np);
-       add_uevent_var(env, "OF_FULLNAME=%pOF", np);
-
-       of_property_for_each_string(np, "compatible", p, compat) {
-               add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat);
-               count++;
-       }
-
-       add_uevent_var(env, "OF_COMPATIBLE_N=%u", count);
+       of_device_uevent(dev->parent, env);
 
        return 0;
 }
index 9ad89d22c0ca72032195f7a244f2cf2bd86c6ac8..a3f336edd991b937d006b1d0df3fbdfa29f00750 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/device.h>
 #include <linux/kref.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 #include <linux/pid.h>
 #include <linux/slab.h>
 
@@ -79,6 +79,14 @@ int host1x_memory_context_list_init(struct host1x *host1x)
                    !device_iommu_mapped(&ctx->dev)) {
                        dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
                        device_unregister(&ctx->dev);
+
+                       /*
+                        * This means that if IOMMU is disabled but context devices
+                        * are defined in the device tree, Host1x will fail to probe.
+                        * That's probably OK in this time and age.
+                        */
+                       err = -EINVAL;
+
                        goto unreg_devices;
                }
        }
index aae2efeef503213ccc6695735b6bebf7d4511c16..7c6699aed7d2ae48401f6614203a95062d8ebbb7 100644 (file)
@@ -11,8 +11,9 @@
 #include <linux/io.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
index c35eac1116f5f8a7945bd85f16f40e7fbb83af83..71ec1e7f657adbaa2de385860e5896f657a0adcd 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
 
 #include <drm/drm_fourcc.h>
index ad82c9e0252fdaf74e8476c9746dc7dac37a1b55..aef984a431901739ca0a7ec9a94ddc9ba63e9b99 100644 (file)
@@ -271,15 +271,13 @@ u32 ipu_pre_get_baddr(struct ipu_pre *pre)
 static int ipu_pre_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct ipu_pre *pre;
 
        pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
        if (!pre)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       pre->regs = devm_ioremap_resource(&pdev->dev, res);
+       pre->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(pre->regs))
                return PTR_ERR(pre->regs);
 
index 196797c1b4b3ee0e900e1ac3bcf9c94161a9d3a9..72960570995590cdb5ed4aff4120f30d4d339109 100644 (file)
@@ -358,7 +358,6 @@ EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
 static int ipu_prg_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct ipu_prg *prg;
        u32 val;
        int i, ret;
@@ -367,12 +366,10 @@ static int ipu_prg_probe(struct platform_device *pdev)
        if (!prg)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       prg->regs = devm_ioremap_resource(&pdev->dev, res);
+       prg->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(prg->regs))
                return PTR_ERR(prg->regs);
 
-
        prg->clk_ipg = devm_clk_get(dev, "ipg");
        if (IS_ERR(prg->clk_ipg))
                return PTR_ERR(prg->clk_ipg);
index 6f0d332ccf51c7d5f873ff2518c4b7282eddda63..06bdcf072d10cf908c421b7d94de66643b777691 100644 (file)
@@ -132,29 +132,45 @@ static void get_common_inputs(struct common_input_property *common, int report_i
        common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
 }
 
-static int float_to_int(u32 float32)
+static int float_to_int(u32 flt32_val)
 {
        int fraction, shift, mantissa, sign, exp, zeropre;
 
-       mantissa = float32 & GENMASK(22, 0);
-       sign = (float32 & BIT(31)) ? -1 : 1;
-       exp = (float32 & ~BIT(31)) >> 23;
+       mantissa = flt32_val & GENMASK(22, 0);
+       sign = (flt32_val & BIT(31)) ? -1 : 1;
+       exp = (flt32_val & ~BIT(31)) >> 23;
 
        if (!exp && !mantissa)
                return 0;
 
+       /*
+        * Calculate the exponent and fraction part of floating
+        * point representation.
+        */
        exp -= 127;
        if (exp < 0) {
                exp = -exp;
+               if (exp >= BITS_PER_TYPE(u32))
+                       return 0;
                zeropre = (((BIT(23) + mantissa) * 100) >> 23) >> exp;
                return zeropre >= 50 ? sign : 0;
        }
 
        shift = 23 - exp;
-       float32 = BIT(exp) + (mantissa >> shift);
-       fraction = mantissa & GENMASK(shift - 1, 0);
+       if (abs(shift) >= BITS_PER_TYPE(u32))
+               return 0;
+
+       if (shift < 0) {
+               shift = -shift;
+               flt32_val = BIT(exp) + (mantissa << shift);
+               shift = 0;
+       } else {
+               flt32_val = BIT(exp) + (mantissa >> shift);
+       }
+
+       fraction = (shift == 0) ? 0 : mantissa & GENMASK(shift - 1, 0);
 
-       return (((fraction * 100) >> shift) >= 50) ? sign * (float32 + 1) : sign * float32;
+       return (((fraction * 100) >> shift) >= 50) ? sign * (flt32_val + 1) : sign * flt32_val;
 }
 
 static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
index 49d4a26895e76791be50237d37ccf7490197c7bd..f33485d83d24ff9ca5c80c36bab775f26ae8e68d 100644 (file)
@@ -258,19 +258,17 @@ static void mousevsc_on_receive(struct hv_device *device,
 
        switch (hid_msg_hdr->type) {
        case SYNTH_HID_PROTOCOL_RESPONSE:
+               len = struct_size(pipe_msg, data, pipe_msg->size);
+
                /*
                 * While it will be impossible for us to protect against
                 * malicious/buggy hypervisor/host, add a check here to
                 * ensure we don't corrupt memory.
                 */
-               if (struct_size(pipe_msg, data, pipe_msg->size)
-                       > sizeof(struct mousevsc_prt_msg)) {
-                       WARN_ON(1);
+               if (WARN_ON(len > sizeof(struct mousevsc_prt_msg)))
                        break;
-               }
 
-               memcpy(&input_dev->protocol_resp, pipe_msg,
-                               struct_size(pipe_msg, data, pipe_msg->size));
+               memcpy(&input_dev->protocol_resp, pipe_msg, len);
                complete(&input_dev->wait_event);
                break;
 
index a1d2690a1a0deeea0d021e8b4d31ddb84de3b20b..851ee86eff32a4085e9a640620c8ea9a5e2211c8 100644 (file)
@@ -1093,6 +1093,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);          break;
                case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);         break;
 
+               case 0x076: map_key_clear(KEY_CAMERA_ACCESS_ENABLE);    break;
+               case 0x077: map_key_clear(KEY_CAMERA_ACCESS_DISABLE);   break;
+               case 0x078: map_key_clear(KEY_CAMERA_ACCESS_TOGGLE);    break;
+
                case 0x079: map_key_clear(KEY_KBDILLUMUP);      break;
                case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);    break;
                case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);  break;
@@ -1139,9 +1143,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
 
-               case 0x0d5: map_key_clear(KEY_CAMERA_ACCESS_ENABLE);            break;
-               case 0x0d6: map_key_clear(KEY_CAMERA_ACCESS_DISABLE);           break;
-               case 0x0d7: map_key_clear(KEY_CAMERA_ACCESS_TOGGLE);            break;
                case 0x0d8: map_key_clear(KEY_DICTATE);         break;
                case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
 
index dfe8e09a18de0e1350be5fb66ad12855fe89d6c5..129b01be488d2ff88678c8913a0ec467e47d2634 100644 (file)
@@ -4598,6 +4598,8 @@ static const struct hid_device_id hidpp_devices[] = {
 
        { /* Logitech G403 Wireless Gaming Mouse over USB */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+       { /* Logitech G502 Lightspeed Wireless Gaming Mouse over USB */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08D) },
        { /* Logitech G703 Gaming Mouse over USB */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
        { /* Logitech G703 Hero Gaming Mouse over USB */
index 85700cec5eac6cf5396186161f5bedb160892534..a928ad2be62dbe21c8fc2f01be18ff9a7bc7d488 100644 (file)
@@ -63,12 +63,12 @@ static_assert(sizeof(enum thunderstrike_led_state) == 1);
 struct thunderstrike_hostcmd_board_info {
        __le16 revision;
        __le16 serial[7];
-};
+} __packed;
 
 struct thunderstrike_hostcmd_haptics {
        u8 motor_left;
        u8 motor_right;
-};
+} __packed;
 
 struct thunderstrike_hostcmd_resp_report {
        u8 report_id; /* THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID */
@@ -81,7 +81,7 @@ struct thunderstrike_hostcmd_resp_report {
                __le16 fw_version;
                enum thunderstrike_led_state led_state;
                u8 payload[30];
-       };
+       } __packed;
 } __packed;
 static_assert(sizeof(struct thunderstrike_hostcmd_resp_report) ==
              THUNDERSTRIKE_HOSTCMD_REPORT_SIZE);
@@ -92,15 +92,15 @@ struct thunderstrike_hostcmd_req_report {
        u8 reserved_at_10;
 
        union {
-               struct {
+               struct __packed {
                        u8 update;
                        enum thunderstrike_led_state state;
                } led;
-               struct {
+               struct __packed {
                        u8 update;
                        struct thunderstrike_hostcmd_haptics motors;
                } haptics;
-       };
+       } __packed;
        u8 reserved_at_30[27];
 } __packed;
 static_assert(sizeof(struct thunderstrike_hostcmd_req_report) ==
index dabcd054dad94678e96d5b4aebd74f0639b832fc..d726aaafb14622739c023fe253b666b6ab80a418 100644 (file)
@@ -527,7 +527,6 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
        info->var = picolcdfb_var;
        info->fix = picolcdfb_fix;
        info->fix.smem_len   = PICOLCDFB_SIZE*8;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        fbdata = info->par;
        spin_lock_init(&fbdata->lock);
index 3be17109301a69154b8053dae657db3cdd54208b..ef7c595c9403cb5bfcd60eb3d76a9ffd1ad920e9 100644 (file)
@@ -9,6 +9,7 @@ if I2C_HID
 config I2C_HID_ACPI
        tristate "HID over I2C transport layer ACPI driver"
        depends on ACPI
+       depends on DRM || !DRM
        select I2C_HID_CORE
        help
          Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
@@ -25,6 +26,7 @@ config I2C_HID_OF
        tristate "HID over I2C transport layer Open Firmware driver"
        # No "depends on OF" because this can also be used for manually
        # (board-file) instantiated "hid-over-i2c" type i2c-clients.
+       depends on DRM || !DRM
        select I2C_HID_CORE
        help
          Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
@@ -41,6 +43,7 @@ config I2C_HID_OF
 config I2C_HID_OF_ELAN
        tristate "Driver for Elan hid-i2c based devices on OF systems"
        depends on OF
+       depends on DRM || !DRM
        select I2C_HID_CORE
        help
          Say Y here if you want support for Elan i2c devices that use
@@ -56,6 +59,7 @@ config I2C_HID_OF_ELAN
 config I2C_HID_OF_GOODIX
        tristate "Driver for Goodix hid-i2c based devices on OF systems"
        depends on OF
+       depends on DRM || !DRM
        select I2C_HID_CORE
        help
          Say Y here if you want support for Goodix i2c devices that use
@@ -70,5 +74,7 @@ config I2C_HID_OF_GOODIX
 
 config I2C_HID_CORE
        tristate
+       # We need to call into panel code so if DRM=m, this can't be 'y'
+       depends on DRM || !DRM
 endif
 
index efbba0465eef18fdc6aab5039b01a36129ef07a4..9601c0605fd9e6c1cfbd4a85754d3c45dc545da5 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/mutex.h>
 #include <asm/unaligned.h>
 
+#include <drm/drm_panel.h>
+
 #include "../hid-ids.h"
 #include "i2c-hid.h"
 
@@ -107,6 +109,10 @@ struct i2c_hid {
        struct mutex            reset_lock;
 
        struct i2chid_ops       *ops;
+       struct drm_panel_follower panel_follower;
+       struct work_struct      panel_follower_prepare_work;
+       bool                    is_panel_follower;
+       bool                    prepare_work_finished;
 };
 
 static const struct i2c_hid_quirks {
@@ -855,7 +861,8 @@ static int i2c_hid_init_irq(struct i2c_client *client)
                irqflags = IRQF_TRIGGER_LOW;
 
        ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
-                                  irqflags | IRQF_ONESHOT, client->name, ihid);
+                                  irqflags | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+                                  client->name, ihid);
        if (ret < 0) {
                dev_warn(&client->dev,
                        "Could not register for %s interrupt, irq = %d,"
@@ -940,6 +947,239 @@ static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
        ihid->ops->shutdown_tail(ihid->ops);
 }
 
+static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
+{
+       struct i2c_client *client = ihid->client;
+       struct hid_device *hid = ihid->hid;
+       int ret;
+
+       ret = hid_driver_suspend(hid, PMSG_SUSPEND);
+       if (ret < 0)
+               return ret;
+
+       /* Save some power */
+       i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+
+       disable_irq(client->irq);
+
+       if (force_poweroff || !device_may_wakeup(&client->dev))
+               i2c_hid_core_power_down(ihid);
+
+       return 0;
+}
+
+static int i2c_hid_core_resume(struct i2c_hid *ihid)
+{
+       struct i2c_client *client = ihid->client;
+       struct hid_device *hid = ihid->hid;
+       int ret;
+
+       if (!device_may_wakeup(&client->dev))
+               i2c_hid_core_power_up(ihid);
+
+       enable_irq(client->irq);
+
+       /* Instead of resetting device, simply powers the device on. This
+        * solves "incomplete reports" on Raydium devices 2386:3118 and
+        * 2386:4B33 and fixes various SIS touchscreens no longer sending
+        * data after a suspend/resume.
+        *
+        * However some ALPS touchpads generate IRQ storm without reset, so
+        * let's still reset them here.
+        */
+       if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
+               ret = i2c_hid_hwreset(ihid);
+       else
+               ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+
+       if (ret)
+               return ret;
+
+       return hid_driver_reset_resume(hid);
+}
+
+/**
+ * __do_i2c_hid_core_initial_power_up() - First time power up of the i2c-hid device.
+ * @ihid: The ihid object created during probe.
+ *
+ * This function is called at probe time.
+ *
+ * The initial power on is where we do some basic validation that the device
+ * exists, where we fetch the HID descriptor, and where we create the actual
+ * HID devices.
+ *
+ * Return: 0 or error code.
+ */
+static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
+{
+       struct i2c_client *client = ihid->client;
+       struct hid_device *hid = ihid->hid;
+       int ret;
+
+       ret = i2c_hid_core_power_up(ihid);
+       if (ret)
+               return ret;
+
+       /* Make sure there is something at this address */
+       ret = i2c_smbus_read_byte(client);
+       if (ret < 0) {
+               i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
+               ret = -ENXIO;
+               goto err;
+       }
+
+       ret = i2c_hid_fetch_hid_descriptor(ihid);
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "Failed to fetch the HID Descriptor\n");
+               goto err;
+       }
+
+       enable_irq(client->irq);
+
+       hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
+       hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+       hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+
+       hid->initial_quirks |= i2c_hid_get_dmi_quirks(hid->vendor,
+                                                     hid->product);
+
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                client->name, (u16)hid->vendor, (u16)hid->product);
+       strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+
+       ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
+       ret = hid_add_device(hid);
+       if (ret) {
+               if (ret != -ENODEV)
+                       hid_err(client, "can't add hid device: %d\n", ret);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       i2c_hid_core_power_down(ihid);
+       return ret;
+}
+
+static void ihid_core_panel_prepare_work(struct work_struct *work)
+{
+       struct i2c_hid *ihid = container_of(work, struct i2c_hid,
+                                           panel_follower_prepare_work);
+       struct hid_device *hid = ihid->hid;
+       int ret;
+
+       /*
+        * hid->version is set on the first power up. If it's still zero then
+        * this is the first power on so we should perform initial power up
+        * steps.
+        */
+       if (!hid->version)
+               ret = __do_i2c_hid_core_initial_power_up(ihid);
+       else
+               ret = i2c_hid_core_resume(ihid);
+
+       if (ret)
+               dev_warn(&ihid->client->dev, "Power on failed: %d\n", ret);
+       else
+               WRITE_ONCE(ihid->prepare_work_finished, true);
+
+       /*
+        * The work APIs provide a number of memory ordering guarantees
+        * including one that says that memory writes before schedule_work()
+        * are always visible to the work function, but they don't appear to
+        * guarantee that a write that happened in the work is visible after
+        * cancel_work_sync(). We'll add a write memory barrier here to match
+        * with i2c_hid_core_panel_unpreparing() to ensure that our write to
+        * prepare_work_finished is visible there.
+        */
+       smp_wmb();
+}
+
+static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
+{
+       struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
+
+       /*
+        * Powering on a touchscreen can be a slow process. Queue the work to
+        * the system workqueue so we don't block the panel's power up.
+        */
+       WRITE_ONCE(ihid->prepare_work_finished, false);
+       schedule_work(&ihid->panel_follower_prepare_work);
+
+       return 0;
+}
+
+static int i2c_hid_core_panel_unpreparing(struct drm_panel_follower *follower)
+{
+       struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
+
+       cancel_work_sync(&ihid->panel_follower_prepare_work);
+
+       /* Match with ihid_core_panel_prepare_work() */
+       smp_rmb();
+       if (!READ_ONCE(ihid->prepare_work_finished))
+               return 0;
+
+       return i2c_hid_core_suspend(ihid, true);
+}
+
+static const struct drm_panel_follower_funcs i2c_hid_core_panel_follower_funcs = {
+       .panel_prepared = i2c_hid_core_panel_prepared,
+       .panel_unpreparing = i2c_hid_core_panel_unpreparing,
+};
+
+static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
+{
+       struct device *dev = &ihid->client->dev;
+       int ret;
+
+       ihid->is_panel_follower = true;
+       ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
+
+       /*
+        * If we're not in control of our own power up/power down then we can't
+        * do the logic to manage wakeups. Give a warning if a user thought
+        * that was possible then force the capability off.
+        */
+       if (device_can_wakeup(dev)) {
+               dev_warn(dev, "Can't wakeup if following panel\n");
+               device_set_wakeup_capable(dev, false);
+       }
+
+       ret = drm_panel_add_follower(dev, &ihid->panel_follower);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
+{
+       /*
+        * If we're a panel follower, we'll register and do our initial power
+        * up when the panel turns on; otherwise we do it right away.
+        */
+       if (drm_is_panel_follower(&ihid->client->dev))
+               return i2c_hid_core_register_panel_follower(ihid);
+       else
+               return __do_i2c_hid_core_initial_power_up(ihid);
+}
+
+static void i2c_hid_core_final_power_down(struct i2c_hid *ihid)
+{
+       /*
+        * If we're a follower, the act of unfollowing will cause us to be
+        * powered down. Otherwise we need to manually do it.
+        */
+       if (ihid->is_panel_follower)
+               drm_panel_remove_follower(&ihid->panel_follower);
+       else
+               i2c_hid_core_suspend(ihid, true);
+}
+
 int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
                       u16 hid_descriptor_address, u32 quirks)
 {
@@ -966,48 +1206,27 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        if (!ihid)
                return -ENOMEM;
 
-       ihid->ops = ops;
-
-       ret = i2c_hid_core_power_up(ihid);
-       if (ret)
-               return ret;
-
        i2c_set_clientdata(client, ihid);
 
+       ihid->ops = ops;
        ihid->client = client;
-
        ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address);
 
        init_waitqueue_head(&ihid->wait);
        mutex_init(&ihid->reset_lock);
+       INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
 
        /* we need to allocate the command buffer without knowing the maximum
         * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
         * real computation later. */
        ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
        if (ret < 0)
-               goto err_powered;
-
+               return ret;
        device_enable_async_suspend(&client->dev);
 
-       /* Make sure there is something at this address */
-       ret = i2c_smbus_read_byte(client);
-       if (ret < 0) {
-               i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
-               ret = -ENXIO;
-               goto err_powered;
-       }
-
-       ret = i2c_hid_fetch_hid_descriptor(ihid);
-       if (ret < 0) {
-               dev_err(&client->dev,
-                       "Failed to fetch the HID Descriptor\n");
-               goto err_powered;
-       }
-
        ret = i2c_hid_init_irq(client);
        if (ret < 0)
-               goto err_powered;
+               goto err_buffers_allocated;
 
        hid = hid_allocate_device();
        if (IS_ERR(hid)) {
@@ -1021,26 +1240,11 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->ll_driver = &i2c_hid_ll_driver;
        hid->dev.parent = &client->dev;
        hid->bus = BUS_I2C;
-       hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
-       hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
-       hid->product = le16_to_cpu(ihid->hdesc.wProductID);
-
        hid->initial_quirks = quirks;
-       hid->initial_quirks |= i2c_hid_get_dmi_quirks(hid->vendor,
-                                                     hid->product);
-
-       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
-                client->name, (u16)hid->vendor, (u16)hid->product);
-       strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
-
-       ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
 
-       ret = hid_add_device(hid);
-       if (ret) {
-               if (ret != -ENODEV)
-                       hid_err(client, "can't add hid device: %d\n", ret);
+       ret = i2c_hid_core_initial_power_up(ihid);
+       if (ret)
                goto err_mem_free;
-       }
 
        return 0;
 
@@ -1050,9 +1254,9 @@ err_mem_free:
 err_irq:
        free_irq(client->irq, ihid);
 
-err_powered:
-       i2c_hid_core_power_down(ihid);
+err_buffers_allocated:
        i2c_hid_free_buffers(ihid);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(i2c_hid_core_probe);
@@ -1062,6 +1266,8 @@ void i2c_hid_core_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
+       i2c_hid_core_final_power_down(ihid);
+
        hid = ihid->hid;
        hid_destroy_device(hid);
 
@@ -1069,8 +1275,6 @@ void i2c_hid_core_remove(struct i2c_client *client)
 
        if (ihid->bufsize)
                i2c_hid_free_buffers(ihid);
-
-       i2c_hid_core_power_down(ihid);
 }
 EXPORT_SYMBOL_GPL(i2c_hid_core_remove);
 
@@ -1085,63 +1289,30 @@ void i2c_hid_core_shutdown(struct i2c_client *client)
 }
 EXPORT_SYMBOL_GPL(i2c_hid_core_shutdown);
 
-#ifdef CONFIG_PM_SLEEP
-static int i2c_hid_core_suspend(struct device *dev)
+static int i2c_hid_core_pm_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct i2c_hid *ihid = i2c_get_clientdata(client);
-       struct hid_device *hid = ihid->hid;
-       int ret;
-
-       ret = hid_driver_suspend(hid, PMSG_SUSPEND);
-       if (ret < 0)
-               return ret;
-
-       /* Save some power */
-       i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
 
-       disable_irq(client->irq);
-
-       if (!device_may_wakeup(&client->dev))
-               i2c_hid_core_power_down(ihid);
+       if (ihid->is_panel_follower)
+               return 0;
 
-       return 0;
+       return i2c_hid_core_suspend(ihid, false);
 }
 
-static int i2c_hid_core_resume(struct device *dev)
+static int i2c_hid_core_pm_resume(struct device *dev)
 {
-       int ret;
        struct i2c_client *client = to_i2c_client(dev);
        struct i2c_hid *ihid = i2c_get_clientdata(client);
-       struct hid_device *hid = ihid->hid;
-
-       if (!device_may_wakeup(&client->dev))
-               i2c_hid_core_power_up(ihid);
-
-       enable_irq(client->irq);
-
-       /* Instead of resetting device, simply powers the device on. This
-        * solves "incomplete reports" on Raydium devices 2386:3118 and
-        * 2386:4B33 and fixes various SIS touchscreens no longer sending
-        * data after a suspend/resume.
-        *
-        * However some ALPS touchpads generate IRQ storm without reset, so
-        * let's still reset them here.
-        */
-       if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
-               ret = i2c_hid_hwreset(ihid);
-       else
-               ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
 
-       if (ret)
-               return ret;
+       if (ihid->is_panel_follower)
+               return 0;
 
-       return hid_driver_reset_resume(hid);
+       return i2c_hid_core_resume(ihid);
 }
-#endif
 
 const struct dev_pm_ops i2c_hid_core_pm = {
-       SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_core_suspend, i2c_hid_core_resume)
+       SYSTEM_SLEEP_PM_OPS(i2c_hid_core_pm_suspend, i2c_hid_core_pm_resume)
 };
 EXPORT_SYMBOL_GPL(i2c_hid_core_pm);
 
index 3ebd4b6586b3eee2e99b885b2606b1cb8f076a41..05c0fb2acbc447f94ed3209adbdc893d1b334583 100644 (file)
@@ -34,8 +34,9 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t ma
        }
 
        ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
-       if (ret < min)
+       if (ret < 0)
                goto out;
+
        mm->pasid = ret;
        ret = 0;
 out:
index da340f11c5f5b6e9bc03824594c4a78f18067f1f..caaf563d38ae090246c602a0b982fbe4d5a9ccf1 100644 (file)
@@ -2891,14 +2891,11 @@ static int iommu_setup_default_domain(struct iommu_group *group,
                ret = __iommu_group_set_domain_internal(
                        group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
                if (WARN_ON(ret))
-                       goto out_free;
+                       goto out_free_old;
        } else {
                ret = __iommu_group_set_domain(group, dom);
-               if (ret) {
-                       iommu_domain_free(dom);
-                       group->default_domain = old_dom;
-                       return ret;
-               }
+               if (ret)
+                       goto err_restore_def_domain;
        }
 
        /*
@@ -2911,20 +2908,24 @@ static int iommu_setup_default_domain(struct iommu_group *group,
                for_each_group_device(group, gdev) {
                        ret = iommu_create_device_direct_mappings(dom, gdev->dev);
                        if (ret)
-                               goto err_restore;
+                               goto err_restore_domain;
                }
        }
 
-err_restore:
-       if (old_dom) {
+out_free_old:
+       if (old_dom)
+               iommu_domain_free(old_dom);
+       return ret;
+
+err_restore_domain:
+       if (old_dom)
                __iommu_group_set_domain_internal(
                        group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
+err_restore_def_domain:
+       if (old_dom) {
                iommu_domain_free(dom);
-               old_dom = NULL;
+               group->default_domain = old_dom;
        }
-out_free:
-       if (old_dom)
-               iommu_domain_free(old_dom);
        return ret;
 }
 
index 205d3cac425cf4090a6c4daf3fe7655ff6a0b719..2fa455d4a0480e95fd8c42b3e00db321370d396b 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/dma-buf.h>
-#include <linux/dma-resv.h>
 #include <linux/module.h>
 #include <linux/refcount.h>
 #include <linux/scatterlist.h>
@@ -456,8 +455,6 @@ static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
        struct vm_area_struct *vma)
 {
-       dma_resv_assert_held(dbuf->resv);
-
        return vb2_dc_mmap(dbuf->priv, vma);
 }
 
index 183037fb12732d97d353080bde65d3ab7b478b81..28f3fdfe23a2987e7ad76c290542fb3b2e657451 100644 (file)
@@ -10,7 +10,6 @@
  * the Free Software Foundation.
  */
 
-#include <linux/dma-resv.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/refcount.h>
@@ -498,8 +497,6 @@ static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
        struct vm_area_struct *vma)
 {
-       dma_resv_assert_held(dbuf->resv);
-
        return vb2_dma_sg_mmap(dbuf->priv, vma);
 }
 
index a6c6d2fcaaa4604a4ea484f4edb0b3341f4e1891..7c635e29210623a6b7f57bc87b0bcc97f8f8b6ac 100644 (file)
@@ -10,7 +10,6 @@
  * the Free Software Foundation.
  */
 
-#include <linux/dma-resv.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -319,8 +318,6 @@ static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
        struct vm_area_struct *vma)
 {
-       dma_resv_assert_held(dbuf->resv);
-
        return vb2_vmalloc_mmap(dbuf->priv, vma);
 }
 
index 0aeb9daaee4cbeddd2800d8b90723fe8e3a48af6..23c8c094e791b9ee5a6e7ab1dafd36166eaa4cf0 100644 (file)
@@ -1048,7 +1048,6 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
        /* Generate valid fb_info */
 
        oi->ivtvfb_info.node = -1;
-       oi->ivtvfb_info.flags = FBINFO_FLAG_DEFAULT;
        oi->ivtvfb_info.par = itv;
        oi->ivtvfb_info.var = oi->ivtvfb_defined;
        oi->ivtvfb_info.fix = oi->ivtvfb_fix;
index 318799d317badd412cab2ec4ed28469d9407c05f..5b08a5ad291ee306315bffd7cfca0a281809c161 100644 (file)
@@ -3,11 +3,9 @@ config VIDEO_VIVID
        tristate "Virtual Video Test Driver"
        depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
        depends on HAS_DMA
+       select FB_IOMEM_HELPERS
        select FONT_SUPPORT
        select FONT_8x16
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select VIDEOBUF2_VMALLOC
        select VIDEOBUF2_DMA_CONTIG
        select VIDEO_V4L2_TPG
index ec25edc679b397eef8b0abf0a5090e8fdd0a0f71..5c931b94a7b5e2fd4e50f8bc1b2767e5c927de2b 100644 (file)
@@ -246,12 +246,10 @@ static int vivid_fb_blank(int blank_mode, struct fb_info *info)
 
 static const struct fb_ops vivid_fb_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = vivid_fb_check_var,
        .fb_set_par     = vivid_fb_set_par,
        .fb_setcolreg   = vivid_fb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_cursor      = NULL,
        .fb_ioctl       = vivid_fb_ioctl,
        .fb_pan_display = vivid_fb_pan_display,
@@ -310,7 +308,6 @@ static int vivid_fb_init_vidmode(struct vivid_dev *dev)
        /* Generate valid fb_info */
 
        dev->fb_info.node = -1;
-       dev->fb_info.flags = FBINFO_FLAG_DEFAULT;
        dev->fb_info.par = dev;
        dev->fb_info.var = dev->fb_defined;
        dev->fb_info.fix = dev->fb_fix;
index 70c0e2b1936b335caa7cbff616a25a54834f2067..8da46d284e35649a7b68a4148593e2a03aa5d8c7 100644 (file)
@@ -1286,7 +1286,6 @@ static int felix_parse_ports_node(struct felix *felix,
                if (err < 0) {
                        dev_info(dev, "Unsupported PHY mode %s on port %d\n",
                                 phy_modes(phy_mode), port);
-                       of_node_put(child);
 
                        /* Leave port_phy_modes[port] = 0, which is also
                         * PHY_INTERFACE_MODE_NA. This will perform a
@@ -1786,16 +1785,15 @@ static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
 {
        struct ocelot *ocelot = ds->priv;
        struct ocelot_port *ocelot_port = ocelot->ports[port];
-       struct felix *felix = ocelot_to_felix(ocelot);
 
        ocelot_port_set_maxlen(ocelot, port, new_mtu);
 
-       mutex_lock(&ocelot->tas_lock);
+       mutex_lock(&ocelot->fwd_domain_lock);
 
-       if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
-               felix->info->tas_guard_bands_update(ocelot, port);
+       if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update)
+               ocelot->ops->tas_guard_bands_update(ocelot, port);
 
-       mutex_unlock(&ocelot->tas_lock);
+       mutex_unlock(&ocelot->fwd_domain_lock);
 
        return 0;
 }
index 96008c046da5361578b33f6a080769a9769a36ad..1d4befe7cfe8e06044d401f715f34b95edbdca0c 100644 (file)
@@ -57,7 +57,6 @@ struct felix_info {
        void    (*mdio_bus_free)(struct ocelot *ocelot);
        int     (*port_setup_tc)(struct dsa_switch *ds, int port,
                                 enum tc_setup_type type, void *type_data);
-       void    (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
        void    (*port_sched_speed_set)(struct ocelot *ocelot, int port,
                                        u32 speed);
        void    (*phylink_mac_config)(struct ocelot *ocelot, int port,
index bb39fedd46c72370c796d21b7e36a9b6a57c8919..1c113957fcf45f44a8d4ba38e762e88e10c63712 100644 (file)
@@ -1209,15 +1209,17 @@ static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
 static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct ocelot_mm_state *mm = &ocelot->mm[port];
        struct tc_taprio_qopt_offload *taprio;
        u64 min_gate_len[OCELOT_NUM_TC];
+       u32 val, maxlen, add_frag_size;
+       u64 needed_min_frag_time_ps;
        int speed, picos_per_byte;
        u64 needed_bit_time_ps;
-       u32 val, maxlen;
        u8 tas_speed;
        int tc;
 
-       lockdep_assert_held(&ocelot->tas_lock);
+       lockdep_assert_held(&ocelot->fwd_domain_lock);
 
        taprio = ocelot_port->taprio;
 
@@ -1253,14 +1255,21 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
         */
        needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
 
+       /* Preemptible TCs don't need to pass a full MTU, the port will
+        * automatically emit a HOLD request when a preemptible TC gate closes
+        */
+       val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
+       add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
+       needed_min_frag_time_ps = picos_per_byte *
+               (u64)(24 + 2 * ethtool_mm_frag_size_add_to_min(add_frag_size));
+
        dev_dbg(ocelot->dev,
-               "port %d: max frame size %d needs %llu ps at speed %d\n",
-               port, maxlen, needed_bit_time_ps, speed);
+               "port %d: max frame size %d needs %llu ps, %llu ps for mPackets at speed %d\n",
+               port, maxlen, needed_bit_time_ps, needed_min_frag_time_ps,
+               speed);
 
        vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
 
-       mutex_lock(&ocelot->fwd_domain_lock);
-
        for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
                u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
                u64 remaining_gate_len_ps;
@@ -1269,7 +1278,9 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
                remaining_gate_len_ps =
                        vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
 
-               if (remaining_gate_len_ps > needed_bit_time_ps) {
+               if ((mm->active_preemptible_tcs & BIT(tc)) ?
+                   remaining_gate_len_ps > needed_min_frag_time_ps :
+                   remaining_gate_len_ps > needed_bit_time_ps) {
                        /* Setting QMAXSDU_CFG to 0 disables oversized frame
                         * dropping.
                         */
@@ -1323,8 +1334,6 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
        ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
 
        ocelot->ops->cut_through_fwd(ocelot);
-
-       mutex_unlock(&ocelot->fwd_domain_lock);
 }
 
 static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
@@ -1351,7 +1360,7 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
                break;
        }
 
-       mutex_lock(&ocelot->tas_lock);
+       mutex_lock(&ocelot->fwd_domain_lock);
 
        ocelot_rmw_rix(ocelot,
                       QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
@@ -1361,7 +1370,7 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
        if (ocelot_port->taprio)
                vsc9959_tas_guard_bands_update(ocelot, port);
 
-       mutex_unlock(&ocelot->tas_lock);
+       mutex_unlock(&ocelot->fwd_domain_lock);
 }
 
 static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1409,7 +1418,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
        int ret, i;
        u32 val;
 
-       mutex_lock(&ocelot->tas_lock);
+       mutex_lock(&ocelot->fwd_domain_lock);
 
        if (taprio->cmd == TAPRIO_CMD_DESTROY) {
                ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
@@ -1421,7 +1430,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
 
                vsc9959_tas_guard_bands_update(ocelot, port);
 
-               mutex_unlock(&ocelot->tas_lock);
+               mutex_unlock(&ocelot->fwd_domain_lock);
                return 0;
        } else if (taprio->cmd != TAPRIO_CMD_REPLACE) {
                ret = -EOPNOTSUPP;
@@ -1504,7 +1513,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
        ocelot_port->taprio = taprio_offload_get(taprio);
        vsc9959_tas_guard_bands_update(ocelot, port);
 
-       mutex_unlock(&ocelot->tas_lock);
+       mutex_unlock(&ocelot->fwd_domain_lock);
 
        return 0;
 
@@ -1512,7 +1521,7 @@ err_reset_tc:
        taprio->mqprio.qopt.num_tc = 0;
        ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
 err_unlock:
-       mutex_unlock(&ocelot->tas_lock);
+       mutex_unlock(&ocelot->fwd_domain_lock);
 
        return ret;
 }
@@ -1525,7 +1534,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
        int port;
        u32 val;
 
-       mutex_lock(&ocelot->tas_lock);
+       mutex_lock(&ocelot->fwd_domain_lock);
 
        for (port = 0; port < ocelot->num_phys_ports; port++) {
                ocelot_port = ocelot->ports[port];
@@ -1563,7 +1572,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
                               QSYS_TAG_CONFIG_ENABLE,
                               QSYS_TAG_CONFIG, port);
        }
-       mutex_unlock(&ocelot->tas_lock);
+       mutex_unlock(&ocelot->fwd_domain_lock);
 }
 
 static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
@@ -1634,6 +1643,18 @@ static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
        }
 }
 
+static int vsc9959_qos_port_mqprio(struct ocelot *ocelot, int port,
+                                  struct tc_mqprio_qopt_offload *mqprio)
+{
+       int ret;
+
+       mutex_lock(&ocelot->fwd_domain_lock);
+       ret = ocelot_port_mqprio(ocelot, port, mqprio);
+       mutex_unlock(&ocelot->fwd_domain_lock);
+
+       return ret;
+}
+
 static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
                                 enum tc_setup_type type,
                                 void *type_data)
@@ -1646,7 +1667,7 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
        case TC_SETUP_QDISC_TAPRIO:
                return vsc9959_qos_port_tas_set(ocelot, port, type_data);
        case TC_SETUP_QDISC_MQPRIO:
-               return ocelot_port_mqprio(ocelot, port, type_data);
+               return vsc9959_qos_port_mqprio(ocelot, port, type_data);
        case TC_SETUP_QDISC_CBS:
                return vsc9959_qos_port_cbs_set(ds, port, type_data);
        default:
@@ -2591,6 +2612,7 @@ static const struct ocelot_ops vsc9959_ops = {
        .cut_through_fwd        = vsc9959_cut_through_fwd,
        .tas_clock_adjust       = vsc9959_tas_clock_adjust,
        .update_stats           = vsc9959_update_stats,
+       .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
 };
 
 static const struct felix_info felix_info_vsc9959 = {
@@ -2616,7 +2638,6 @@ static const struct felix_info felix_info_vsc9959 = {
        .port_modes             = vsc9959_port_modes,
        .port_setup_tc          = vsc9959_port_setup_tc,
        .port_sched_speed_set   = vsc9959_sched_speed_set,
-       .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
 };
 
 /* The INTB interrupt is shared between for PTP TX timestamp availability
index f7d7cfb2fd86517e9dbd7d9c5124472795e400ea..09b80644c11bd679557ee626f0c78eb5f24297ed 100644 (file)
@@ -588,6 +588,9 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
        bool ack;
        int ret;
 
+       if (!skb)
+               return -ENOMEM;
+
        reinit_completion(&mgmt_eth_data->rw_done);
 
        /* Increment seq_num and set it in the copy pkt */
index 451c3a1b62553794e736e2d1eb7b981072def1c6..633b321d7fdd97c9090ee6d7f6452d952dbc1fd8 100644 (file)
@@ -35,6 +35,8 @@
 
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
+#define ENA_MAX_BACKOFF_DELAY_EXP 16U
+
 #define ENA_MIN_ADMIN_POLL_US 100
 
 #define ENA_MAX_ADMIN_POLL_US 5000
@@ -536,6 +538,7 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
 
 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
 {
+       exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
        delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
        delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
        usleep_range(delay_us, 2 * delay_us);
index 1761df8fb7f96888017e0a4f7cad0c22aad67c77..10c7c232cc4ecce0d719435c8f3b14060de0dc58 100644 (file)
@@ -1492,8 +1492,6 @@ int bgmac_enet_probe(struct bgmac *bgmac)
 
        bgmac->in_init = true;
 
-       bgmac_chip_intrs_off(bgmac);
-
        net_dev->irq = bgmac->irq;
        SET_NETDEV_DEV(net_dev, bgmac->dev);
        dev_set_drvdata(bgmac->dev, bgmac);
@@ -1511,6 +1509,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
         */
        bgmac_clk_enable(bgmac, 0);
 
+       bgmac_chip_intrs_off(bgmac);
+
        /* This seems to be fixing IRQ by assigning OOB #6 to the core */
        if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
                if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
index 9939ccafb556609f8b6931ef42bb49f267aadea8..63a053dea819d09807942adf30a6795f49d31cd6 100644 (file)
@@ -355,7 +355,7 @@ struct bufdesc_ex {
 #define RX_RING_SIZE           (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
 #define FEC_ENET_TX_FRSIZE     2048
 #define FEC_ENET_TX_FRPPG      (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE           512     /* Must be power of two */
+#define TX_RING_SIZE           1024    /* Must be power of two */
 #define TX_RING_MOD_MASK       511     /*   for this to work */
 
 #define BD_ENET_RX_INT         0x00800000
@@ -544,10 +544,23 @@ enum {
        XDP_STATS_TOTAL,
 };
 
+enum fec_txbuf_type {
+       FEC_TXBUF_T_SKB,
+       FEC_TXBUF_T_XDP_NDO,
+};
+
+struct fec_tx_buffer {
+       union {
+               struct sk_buff *skb;
+               struct xdp_frame *xdp;
+       };
+       enum fec_txbuf_type type;
+};
+
 struct fec_enet_priv_tx_q {
        struct bufdesc_prop bd;
        unsigned char *tx_bounce[TX_RING_SIZE];
-       struct  sk_buff *tx_skbuff[TX_RING_SIZE];
+       struct fec_tx_buffer tx_buf[TX_RING_SIZE];
 
        unsigned short tx_stop_threshold;
        unsigned short tx_wake_threshold;
index 8fbe47703d4755a9c31bbd96ce059c7efa8863ea..ec9e4bdb0c06be7eaa2e6aa0d2957ee70c57100f 100644 (file)
@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
                        fec16_to_cpu(bdp->cbd_sc),
                        fec32_to_cpu(bdp->cbd_bufaddr),
                        fec16_to_cpu(bdp->cbd_datlen),
-                       txq->tx_skbuff[index]);
+                       txq->tx_buf[index].skb);
                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
                index++;
        } while (bdp != txq->bd.base);
@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
 
        index = fec_enet_get_bd_index(last_bdp, &txq->bd);
        /* Save skb pointer */
-       txq->tx_skbuff[index] = skb;
+       txq->tx_buf[index].skb = skb;
 
        /* Make sure the updates to rest of the descriptor are performed before
         * transferring ownership.
@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
 
        skb_tx_timestamp(skb);
 
-       /* Make sure the update to bdp and tx_skbuff are performed before
-        * txq->bd.cur.
-        */
+       /* Make sure the update to bdp is performed before txq->bd.cur. */
        wmb();
        txq->bd.cur = bdp;
 
@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
        }
 
        /* Save skb pointer */
-       txq->tx_skbuff[index] = skb;
+       txq->tx_buf[index].skb = skb;
 
        skb_tx_timestamp(skb);
        txq->bd.cur = bdp;
@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
                for (i = 0; i < txq->bd.ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
-                       if (bdp->cbd_bufaddr &&
-                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-                               dma_unmap_single(&fep->pdev->dev,
-                                                fec32_to_cpu(bdp->cbd_bufaddr),
-                                                fec16_to_cpu(bdp->cbd_datlen),
-                                                DMA_TO_DEVICE);
-                       if (txq->tx_skbuff[i]) {
-                               dev_kfree_skb_any(txq->tx_skbuff[i]);
-                               txq->tx_skbuff[i] = NULL;
+                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+                               if (bdp->cbd_bufaddr &&
+                                   !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                                       dma_unmap_single(&fep->pdev->dev,
+                                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                                        fec16_to_cpu(bdp->cbd_datlen),
+                                                        DMA_TO_DEVICE);
+                               if (txq->tx_buf[i].skb) {
+                                       dev_kfree_skb_any(txq->tx_buf[i].skb);
+                                       txq->tx_buf[i].skb = NULL;
+                               }
+                       } else {
+                               if (bdp->cbd_bufaddr)
+                                       dma_unmap_single(&fep->pdev->dev,
+                                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                                        fec16_to_cpu(bdp->cbd_datlen),
+                                                        DMA_TO_DEVICE);
+
+                               if (txq->tx_buf[i].xdp) {
+                                       xdp_return_frame(txq->tx_buf[i].xdp);
+                                       txq->tx_buf[i].xdp = NULL;
+                               }
+
+                               /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+                               txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
                        }
+
                        bdp->cbd_bufaddr = cpu_to_fec32(0);
                        bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
                }
@@ -1360,6 +1375,7 @@ static void
 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 {
        struct  fec_enet_private *fep;
+       struct xdp_frame *xdpf;
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                index = fec_enet_get_bd_index(bdp, &txq->bd);
 
-               skb = txq->tx_skbuff[index];
-               txq->tx_skbuff[index] = NULL;
-               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-                       dma_unmap_single(&fep->pdev->dev,
-                                        fec32_to_cpu(bdp->cbd_bufaddr),
-                                        fec16_to_cpu(bdp->cbd_datlen),
-                                        DMA_TO_DEVICE);
-               bdp->cbd_bufaddr = cpu_to_fec32(0);
-               if (!skb)
-                       goto skb_done;
+               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+                       skb = txq->tx_buf[index].skb;
+                       txq->tx_buf[index].skb = NULL;
+                       if (bdp->cbd_bufaddr &&
+                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       if (!skb)
+                               goto tx_buf_done;
+               } else {
+                       xdpf = txq->tx_buf[index].xdp;
+                       if (bdp->cbd_bufaddr)
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       if (!xdpf) {
+                               txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+                               goto tx_buf_done;
+                       }
+               }
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                                ndev->stats.tx_carrier_errors++;
                } else {
                        ndev->stats.tx_packets++;
-                       ndev->stats.tx_bytes += skb->len;
-               }
-
-               /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
-                * are to time stamp the packet, so we still need to check time
-                * stamping enabled flag.
-                */
-               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
-                            fep->hwts_tx_en) &&
-                   fep->bufdesc_ex) {
-                       struct skb_shared_hwtstamps shhwtstamps;
-                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
-                       skb_tstamp_tx(skb, &shhwtstamps);
+                       if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
+                               ndev->stats.tx_bytes += skb->len;
+                       else
+                               ndev->stats.tx_bytes += xdpf->len;
                }
 
                /* Deferred means some collisions occurred during transmit,
@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                if (status & BD_ENET_TX_DEF)
                        ndev->stats.collisions++;
 
-               /* Free the sk buffer associated with this last transmit */
-               dev_kfree_skb_any(skb);
-skb_done:
-               /* Make sure the update to bdp and tx_skbuff are performed
+               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+                       /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+                        * are to time stamp the packet, so we still need to check time
+                        * stamping enabled flag.
+                        */
+                       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+                                    fep->hwts_tx_en) && fep->bufdesc_ex) {
+                               struct skb_shared_hwtstamps shhwtstamps;
+                               struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+                               fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+                               skb_tstamp_tx(skb, &shhwtstamps);
+                       }
+
+                       /* Free the sk buffer associated with this last transmit */
+                       dev_kfree_skb_any(skb);
+               } else {
+                       xdp_return_frame(xdpf);
+
+                       txq->tx_buf[index].xdp = NULL;
+                       /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+                       txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+               }
+
+tx_buf_done:
+               /* Make sure the update to bdp and tx_buf are performed
                 * before dirty_tx
                 */
                wmb();
@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
                for (i = 0; i < txq->bd.ring_size; i++) {
                        kfree(txq->tx_bounce[i]);
                        txq->tx_bounce[i] = NULL;
-                       skb = txq->tx_skbuff[i];
-                       txq->tx_skbuff[i] = NULL;
-                       dev_kfree_skb(skb);
+
+                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+                               skb = txq->tx_buf[i].skb;
+                               txq->tx_buf[i].skb = NULL;
+                               dev_kfree_skb(skb);
+                       } else {
+                               if (txq->tx_buf[i].xdp) {
+                                       xdp_return_frame(txq->tx_buf[i].xdp);
+                                       txq->tx_buf[i].xdp = NULL;
+                               }
+
+                               txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+                       }
                }
        }
 }
@@ -3296,8 +3349,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
                fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
 
                txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
-               txq->tx_wake_threshold =
-                       (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
+               txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
 
                txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
                                        txq->bd.ring_size * TSO_HEADER_SIZE,
@@ -3732,12 +3784,18 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
                if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
                        return -EOPNOTSUPP;
 
+               if (!bpf->prog)
+                       xdp_features_clear_redirect_target(dev);
+
                if (is_run) {
                        napi_disable(&fep->napi);
                        netif_tx_disable(dev);
                }
 
                old_prog = xchg(&fep->xdp_prog, bpf->prog);
+               if (old_prog)
+                       bpf_prog_put(old_prog);
+
                fec_restart(dev);
 
                if (is_run) {
@@ -3745,8 +3803,8 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
                        netif_tx_start_all_queues(dev);
                }
 
-               if (old_prog)
-                       bpf_prog_put(old_prog);
+               if (bpf->prog)
+                       xdp_features_set_redirect_target(dev, false);
 
                return 0;
 
@@ -3778,7 +3836,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
 
        entries_free = fec_enet_get_free_txdesc_num(txq);
        if (entries_free < MAX_SKB_FRAGS + 1) {
-               netdev_err(fep->netdev, "NOT enough BD for SG!\n");
+               netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
                return -EBUSY;
        }
 
@@ -3811,7 +3869,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
                ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
-       txq->tx_skbuff[index] = NULL;
+       txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+       txq->tx_buf[index].xdp = frame;
 
        /* Make sure the updates to rest of the descriptor are performed before
         * transferring ownership.
@@ -4016,8 +4075,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
                ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
-                                    NETDEV_XDP_ACT_REDIRECT |
-                                    NETDEV_XDP_ACT_NDO_XMIT;
+                                    NETDEV_XDP_ACT_REDIRECT;
 
        fec_restart(ndev);
 
index 98eb78d98e9f43c6e8541946d5398e9d7824005e..4b425bf71ede13711fbc3d3a81abd8447d5d4193 100644 (file)
@@ -964,5 +964,6 @@ void gve_handle_report_stats(struct gve_priv *priv);
 /* exported by ethtool.c */
 extern const struct ethtool_ops gve_ethtool_ops;
 /* needed by ethtool */
+extern char gve_driver_name[];
 extern const char gve_version_str[];
 #endif /* _GVE_H_ */
index cfd4b8d284d12eaaf1b7fef612c3db47fbf0bc8d..233e5946905e7dfa4bb7f0bf2955cd316b352b21 100644 (file)
@@ -15,7 +15,7 @@ static void gve_get_drvinfo(struct net_device *netdev,
 {
        struct gve_priv *priv = netdev_priv(netdev);
 
-       strscpy(info->driver, "gve", sizeof(info->driver));
+       strscpy(info->driver, gve_driver_name, sizeof(info->driver));
        strscpy(info->version, gve_version_str, sizeof(info->version));
        strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
 }
@@ -590,6 +590,9 @@ static int gve_get_link_ksettings(struct net_device *netdev,
                err = gve_adminq_report_link_speed(priv);
 
        cmd->base.speed = priv->link_speed;
+
+       cmd->base.duplex = DUPLEX_FULL;
+
        return err;
 }
 
index 8fb70db63b8b8830d4fc74a7a67e3615f1eee5e5..e6f1711d9be04f5ccdc1ed9ed4fd8b83027e16ed 100644 (file)
@@ -33,6 +33,7 @@
 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
 #define DQO_TX_MAX     0x3FFFF
 
+char gve_driver_name[] = "gve";
 const char gve_version_str[] = GVE_VERSION;
 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
 
@@ -2200,7 +2201,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
-       err = pci_request_regions(pdev, "gvnic-cfg");
+       err = pci_request_regions(pdev, gve_driver_name);
        if (err)
                goto abort_with_enabled;
 
@@ -2393,8 +2394,8 @@ static const struct pci_device_id gve_id_table[] = {
        { }
 };
 
-static struct pci_driver gvnic_driver = {
-       .name           = "gvnic",
+static struct pci_driver gve_driver = {
+       .name           = gve_driver_name,
        .id_table       = gve_id_table,
        .probe          = gve_probe,
        .remove         = gve_remove,
@@ -2405,10 +2406,10 @@ static struct pci_driver gvnic_driver = {
 #endif
 };
 
-module_pci_driver(gvnic_driver);
+module_pci_driver(gve_driver);
 
 MODULE_DEVICE_TABLE(pci, gve_id_table);
 MODULE_AUTHOR("Google, Inc.");
-MODULE_DESCRIPTION("gVNIC Driver");
+MODULE_DESCRIPTION("Google Virtual NIC Driver");
 MODULE_LICENSE("Dual MIT/GPL");
 MODULE_VERSION(GVE_VERSION);
index 93979ab18bc1d6740206ffe2a8854fca23fd8033..19a5e7f3a075ee3c1a4462b1a826f63c33eb3db4 100644 (file)
@@ -5739,6 +5739,13 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
        q_handle = vsi->tx_rings[queue_index]->q_handle;
        tc = ice_dcb_get_tc(vsi, queue_index);
 
+       vsi = ice_locate_vsi_using_queue(vsi, queue_index);
+       if (!vsi) {
+               netdev_err(netdev, "Invalid VSI for given queue %d\n",
+                          queue_index);
+               return -EINVAL;
+       }
+
        /* Set BW back to default, when user set maxrate to 0 */
        if (!maxrate)
                status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
@@ -7872,10 +7879,10 @@ static int
 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
                         struct tc_mqprio_qopt_offload *mqprio_qopt)
 {
-       u64 sum_max_rate = 0, sum_min_rate = 0;
        int non_power_of_2_qcount = 0;
        struct ice_pf *pf = vsi->back;
        int max_rss_q_cnt = 0;
+       u64 sum_min_rate = 0;
        struct device *dev;
        int i, speed;
        u8 num_tc;
@@ -7891,6 +7898,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
        dev = ice_pf_to_dev(pf);
        vsi->ch_rss_size = 0;
        num_tc = mqprio_qopt->qopt.num_tc;
+       speed = ice_get_link_speed_kbps(vsi);
 
        for (i = 0; num_tc; i++) {
                int qcount = mqprio_qopt->qopt.count[i];
@@ -7931,7 +7939,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
                 */
                max_rate = mqprio_qopt->max_rate[i];
                max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
-               sum_max_rate += max_rate;
 
                /* min_rate is minimum guaranteed rate and it can't be zero */
                min_rate = mqprio_qopt->min_rate[i];
@@ -7944,6 +7951,12 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
                        return -EINVAL;
                }
 
+               if (max_rate && max_rate > speed) {
+                       dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
+                               i, max_rate, speed);
+                       return -EINVAL;
+               }
+
                iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
                if (rem) {
                        dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
@@ -7981,12 +7994,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
            (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
                return -EINVAL;
 
-       speed = ice_get_link_speed_kbps(vsi);
-       if (sum_max_rate && sum_max_rate > (u64)speed) {
-               dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
-                       sum_max_rate, speed);
-               return -EINVAL;
-       }
        if (sum_min_rate && sum_min_rate > (u64)speed) {
                dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
                        sum_min_rate, speed);
index b54052ef6050d8ed665f3f39c9887de86ab594b8..4a34ef5f58d36bd5600b9449cd41e8d26ecbcfa8 100644 (file)
@@ -750,17 +750,16 @@ exit:
 /**
  * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
  * @vsi: Pointer to VSI
- * @tc_fltr: Pointer to tc_flower_filter
+ * @queue: Queue index
  *
- * Locate the VSI using specified queue. When ADQ is not enabled, always
- * return input VSI, otherwise locate corresponding VSI based on per channel
- * offset and qcount
+ * Locate the VSI using specified "queue". When ADQ is not enabled,
+ * always return input VSI, otherwise locate corresponding
+ * VSI based on per channel "offset" and "qcount"
  */
-static struct ice_vsi *
-ice_locate_vsi_using_queue(struct ice_vsi *vsi,
-                          struct ice_tc_flower_fltr *tc_fltr)
+struct ice_vsi *
+ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue)
 {
-       int num_tc, tc, queue;
+       int num_tc, tc;
 
        /* if ADQ is not active, passed VSI is the candidate VSI */
        if (!ice_is_adq_active(vsi->back))
@@ -770,7 +769,6 @@ ice_locate_vsi_using_queue(struct ice_vsi *vsi,
         * upon queue number)
         */
        num_tc = vsi->mqprio_qopt.qopt.num_tc;
-       queue = tc_fltr->action.fwd.q.queue;
 
        for (tc = 0; tc < num_tc; tc++) {
                int qcount = vsi->mqprio_qopt.qopt.count[tc];
@@ -812,6 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
        struct ice_pf *pf = vsi->back;
        struct device *dev;
        u32 tc_class;
+       int q;
 
        dev = ice_pf_to_dev(pf);
 
@@ -840,7 +839,8 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
                /* Determine destination VSI even though the action is
                 * FWD_TO_QUEUE, because QUEUE is associated with VSI
                 */
-               dest_vsi = tc_fltr->dest_vsi;
+               q = tc_fltr->action.fwd.q.queue;
+               dest_vsi = ice_locate_vsi_using_queue(vsi, q);
                break;
        default:
                dev_err(dev,
@@ -1716,7 +1716,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
        /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
         * ADQ switch filter
         */
-       ch_vsi = ice_locate_vsi_using_queue(vsi, fltr);
+       ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue);
        if (!ch_vsi)
                return -EINVAL;
        fltr->dest_vsi = ch_vsi;
index 8bbc1a62bdb1c01d3cb471c0884070a032add09d..65d387163a46baa34763b5291faf95d9198f6404 100644 (file)
@@ -204,6 +204,7 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
        return pf->num_dmac_chnl_fltrs;
 }
 
+struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
 int
 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
                   struct flow_cls_offload *cls_flower);
index 00a5ee48781275a1494dd06d5a4550c26b16a68b..9db384f66a8ee7245a02e2fae24ad4421c9a5605 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/timecounter.h>
 #include <linux/net_tstamp.h>
 #include <linux/bitfield.h>
+#include <linux/hrtimer.h>
 
 #include "igc_hw.h"
 
@@ -101,6 +102,8 @@ struct igc_ring {
        u32 start_time;
        u32 end_time;
        u32 max_sdu;
+       bool oper_gate_closed;          /* Operating gate. True if the TX Queue is closed */
+       bool admin_gate_closed;         /* Future gate. True if the TX Queue will be closed */
 
        /* CBS parameters */
        bool cbs_enable;                /* indicates if CBS is enabled */
@@ -160,6 +163,7 @@ struct igc_adapter {
        struct timer_list watchdog_timer;
        struct timer_list dma_err_timer;
        struct timer_list phy_info_timer;
+       struct hrtimer hrtimer;
 
        u32 wol;
        u32 en_mng_pt;
@@ -184,10 +188,13 @@ struct igc_adapter {
        u32 max_frame_size;
        u32 min_frame_size;
 
+       int tc_setup_type;
        ktime_t base_time;
        ktime_t cycle_time;
-       bool qbv_enable;
+       bool taprio_offload_enable;
        u32 qbv_config_change_errors;
+       bool qbv_transition;
+       unsigned int qbv_count;
 
        /* OS defined structs */
        struct pci_dev *pdev;
index 0e2cb00622d1a3904d0f0afe38c63a1b72f59f85..93bce729be76a69c562ee5523bd521466356168b 100644 (file)
@@ -1708,6 +1708,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
        /* twisted pair */
        cmd->base.port = PORT_TP;
        cmd->base.phy_address = hw->phy.addr;
+       ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
 
        /* advertising link modes */
        if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
index 019ce91c45aaf60a81d10973b25bd01874f8c5fa..9f93f0f4f752010b91036cc233516fa9d5fa5574 100644 (file)
@@ -711,7 +711,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
        /* disable the queue */
        wr32(IGC_TXDCTL(reg_idx), 0);
        wrfl();
-       mdelay(10);
 
        wr32(IGC_TDLEN(reg_idx),
             ring->count * sizeof(union igc_adv_tx_desc));
@@ -1017,7 +1016,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
        ktime_t base_time = adapter->base_time;
        ktime_t now = ktime_get_clocktai();
        ktime_t baset_est, end_of_cycle;
-       u32 launchtime;
+       s32 launchtime;
        s64 n;
 
        n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
@@ -1030,7 +1029,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
                        *first_flag = true;
                        ring->last_ff_cycle = baset_est;
 
-                       if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
+                       if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
                                *insert_empty = true;
                }
        }
@@ -1573,16 +1572,12 @@ done:
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (tx_ring->max_sdu > 0) {
-               u32 max_sdu = 0;
-
-               max_sdu = tx_ring->max_sdu +
-                         (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0);
+       if (adapter->qbv_transition || tx_ring->oper_gate_closed)
+               goto out_drop;
 
-               if (first->bytecount > max_sdu) {
-                       adapter->stats.txdrop++;
-                       goto out_drop;
-               }
+       if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
+               adapter->stats.txdrop++;
+               goto out_drop;
        }
 
        if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
@@ -3012,8 +3007,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
                    time_after(jiffies, tx_buffer->time_stamp +
                    (adapter->tx_timeout_factor * HZ)) &&
                    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
-                   (rd32(IGC_TDH(tx_ring->reg_idx)) !=
-                    readl(tx_ring->tail))) {
+                   (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
+                   !tx_ring->oper_gate_closed) {
                        /* detected Tx unit hang */
                        netdev_err(tx_ring->netdev,
                                   "Detected Tx Unit Hang\n"
@@ -6102,7 +6097,10 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
 
        adapter->base_time = 0;
        adapter->cycle_time = NSEC_PER_SEC;
+       adapter->taprio_offload_enable = false;
        adapter->qbv_config_change_errors = 0;
+       adapter->qbv_transition = false;
+       adapter->qbv_count = 0;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igc_ring *ring = adapter->tx_ring[i];
@@ -6110,6 +6108,8 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
                ring->start_time = 0;
                ring->end_time = NSEC_PER_SEC;
                ring->max_sdu = 0;
+               ring->oper_gate_closed = false;
+               ring->admin_gate_closed = false;
        }
 
        return 0;
@@ -6121,27 +6121,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
        bool queue_configured[IGC_MAX_TX_QUEUES] = { };
        struct igc_hw *hw = &adapter->hw;
        u32 start_time = 0, end_time = 0;
+       struct timespec64 now;
        size_t n;
        int i;
 
-       switch (qopt->cmd) {
-       case TAPRIO_CMD_REPLACE:
-               adapter->qbv_enable = true;
-               break;
-       case TAPRIO_CMD_DESTROY:
-               adapter->qbv_enable = false;
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       if (!adapter->qbv_enable)
+       if (qopt->cmd == TAPRIO_CMD_DESTROY)
                return igc_tsn_clear_schedule(adapter);
 
+       if (qopt->cmd != TAPRIO_CMD_REPLACE)
+               return -EOPNOTSUPP;
+
        if (qopt->base_time < 0)
                return -ERANGE;
 
-       if (igc_is_device_id_i225(hw) && adapter->base_time)
+       if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)
                return -EALREADY;
 
        if (!validate_schedule(adapter, qopt))
@@ -6149,6 +6142,9 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
 
        adapter->cycle_time = qopt->cycle_time;
        adapter->base_time = qopt->base_time;
+       adapter->taprio_offload_enable = true;
+
+       igc_ptp_read(adapter, &now);
 
        for (n = 0; n < qopt->num_entries; n++) {
                struct tc_taprio_sched_entry *e = &qopt->entries[n];
@@ -6184,7 +6180,10 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
                                ring->start_time = start_time;
                        ring->end_time = end_time;
 
-                       queue_configured[i] = true;
+                       if (ring->start_time >= adapter->cycle_time)
+                               queue_configured[i] = false;
+                       else
+                               queue_configured[i] = true;
                }
 
                start_time += e->interval;
@@ -6194,8 +6193,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
         * If not, set the start and end time to be end time.
         */
        for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct igc_ring *ring = adapter->tx_ring[i];
+
+               if (!is_base_time_past(qopt->base_time, &now)) {
+                       ring->admin_gate_closed = false;
+               } else {
+                       ring->oper_gate_closed = false;
+                       ring->admin_gate_closed = false;
+               }
+
                if (!queue_configured[i]) {
-                       struct igc_ring *ring = adapter->tx_ring[i];
+                       if (!is_base_time_past(qopt->base_time, &now))
+                               ring->admin_gate_closed = true;
+                       else
+                               ring->oper_gate_closed = true;
 
                        ring->start_time = end_time;
                        ring->end_time = end_time;
@@ -6207,7 +6218,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
                struct net_device *dev = adapter->netdev;
 
                if (qopt->max_sdu[i])
-                       ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len;
+                       ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;
                else
                        ring->max_sdu = 0;
        }
@@ -6327,6 +6338,8 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
 {
        struct igc_adapter *adapter = netdev_priv(dev);
 
+       adapter->tc_setup_type = type;
+
        switch (type) {
        case TC_QUERY_CAPS:
                return igc_tc_query_caps(adapter, type_data);
@@ -6574,6 +6587,27 @@ static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
        .xmo_rx_timestamp               = igc_xdp_rx_timestamp,
 };
 
+static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
+{
+       struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
+                                                  hrtimer);
+       unsigned int i;
+
+       adapter->qbv_transition = true;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+               if (tx_ring->admin_gate_closed) {
+                       tx_ring->admin_gate_closed = false;
+                       tx_ring->oper_gate_closed = true;
+               } else {
+                       tx_ring->oper_gate_closed = false;
+               }
+       }
+       adapter->qbv_transition = false;
+       return HRTIMER_NORESTART;
+}
+
 /**
  * igc_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -6752,6 +6786,9 @@ static int igc_probe(struct pci_dev *pdev,
        INIT_WORK(&adapter->reset_task, igc_reset_task);
        INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
 
+       hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+
        /* Initialize link properties that are user-changeable */
        adapter->fc_autoneg = true;
        hw->mac.autoneg = true;
@@ -6855,6 +6892,7 @@ static void igc_remove(struct pci_dev *pdev)
 
        cancel_work_sync(&adapter->reset_task);
        cancel_work_sync(&adapter->watchdog_task);
+       hrtimer_cancel(&adapter->hrtimer);
 
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
index 32ef112f8291a279c0b6e941492dc565ee5edd4a..f0b979a7065521e67e07365bdeaa358909437253 100644 (file)
@@ -356,16 +356,35 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                        tsim &= ~IGC_TSICR_TT0;
                }
                if (on) {
+                       struct timespec64 safe_start;
                        int i = rq->perout.index;
 
                        igc_pin_perout(igc, i, pin, use_freq);
-                       igc->perout[i].start.tv_sec = rq->perout.start.sec;
+                       igc_ptp_read(igc, &safe_start);
+
+                       /* PPS output start time is triggered by Target time(TT)
+                        * register. Programming any past time value into TT
+                        * register will cause PPS to never start. Need to make
+                        * sure we program the TT register a time ahead in
+                        * future. There isn't a stringent need to fire PPS out
+                        * right away. Adding +2 seconds should take care of
+                        * corner cases. Let's say if the SYSTIML is close to
+                        * wrap up and the timer keeps ticking as we program the
+                        * register, adding +2seconds is safe bet.
+                        */
+                       safe_start.tv_sec += 2;
+
+                       if (rq->perout.start.sec < safe_start.tv_sec)
+                               igc->perout[i].start.tv_sec = safe_start.tv_sec;
+                       else
+                               igc->perout[i].start.tv_sec = rq->perout.start.sec;
                        igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igc->perout[i].period.tv_sec = ts.tv_sec;
                        igc->perout[i].period.tv_nsec = ts.tv_nsec;
-                       wr32(trgttimh, rq->perout.start.sec);
+                       wr32(trgttimh, (u32)igc->perout[i].start.tv_sec);
                        /* For now, always select timer 0 as source. */
-                       wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
+                       wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec |
+                                            IGC_TT_IO_TIMER_SEL_SYSTIM0));
                        if (use_freq)
                                wr32(freqout, ns);
                        tsauxc |= tsauxc_mask;
index 94a2b0dfb54d40b7fd00dfac8b47a8a95a96b90d..a9c08321aca901c20f1178088f4764413a40d2ea 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
 {
        unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
 
-       if (adapter->qbv_enable)
+       if (adapter->taprio_offload_enable)
                new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
 
        if (is_any_launchtime(adapter))
@@ -114,7 +114,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
 static int igc_tsn_enable_offload(struct igc_adapter *adapter)
 {
        struct igc_hw *hw = &adapter->hw;
-       bool tsn_mode_reconfig = false;
        u32 tqavctrl, baset_l, baset_h;
        u32 sec, nsec, cycle;
        ktime_t base_time, systim;
@@ -133,8 +132,28 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
                wr32(IGC_STQT(i), ring->start_time);
                wr32(IGC_ENDQT(i), ring->end_time);
 
-               txqctl |= IGC_TXQCTL_STRICT_CYCLE |
-                       IGC_TXQCTL_STRICT_END;
+               if (adapter->taprio_offload_enable) {
+                       /* If taprio_offload_enable is set we are in "taprio"
+                        * mode and we need to be strict about the
+                        * cycles: only transmit a packet if it can be
+                        * completed during that cycle.
+                        *
+                        * If taprio_offload_enable is NOT true when
+                        * enabling TSN offload, the cycle should have
+                        * no external effects, but is only used internally
+                        * to adapt the base time register after a second
+                        * has passed.
+                        *
+                        * Enabling strict mode in this case would
+                        * unnecessarily prevent the transmission of
+                        * certain packets (i.e. at the boundary of a
+                        * second) and thus interfere with the launchtime
+                        * feature that promises transmission at a
+                        * certain point in time.
+                        */
+                       txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+                               IGC_TXQCTL_STRICT_END;
+               }
 
                if (ring->launchtime_enable)
                        txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
@@ -228,11 +247,10 @@ skip_cbs:
 
        tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
 
-       if (tqavctrl & IGC_TQAVCTRL_TRANSMIT_MODE_TSN)
-               tsn_mode_reconfig = true;
-
        tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
 
+       adapter->qbv_count++;
+
        cycle = adapter->cycle_time;
        base_time = adapter->base_time;
 
@@ -249,17 +267,29 @@ skip_cbs:
                 * Gate Control List (GCL) is running.
                 */
                if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
-                   tsn_mode_reconfig)
+                   (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
+                   (adapter->qbv_count > 1))
                        adapter->qbv_config_change_errors++;
        } else {
-               /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
-                * has to be configured before the cycle time and base time.
-                * Tx won't hang if there is a GCL is already running,
-                * so in this case we don't need to set FutScdDis.
-                */
-               if (igc_is_device_id_i226(hw) &&
-                   !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
-                       tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
+               if (igc_is_device_id_i226(hw)) {
+                       ktime_t adjust_time, expires_time;
+
+                      /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
+                       * has to be configured before the cycle time and base time.
+                       * Tx won't hang if a GCL is already running,
+                       * so in this case we don't need to set FutScdDis.
+                       */
+                       if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
+                               tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
+
+                       nsec = rd32(IGC_SYSTIML);
+                       sec = rd32(IGC_SYSTIMH);
+                       systim = ktime_set(sec, nsec);
+
+                       adjust_time = adapter->base_time;
+                       expires_time = ktime_sub_ns(adjust_time, systim);
+                       hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL);
+               }
        }
 
        wr32(IGC_TQAVCTRL, tqavctrl);
@@ -305,7 +335,11 @@ int igc_tsn_offload_apply(struct igc_adapter *adapter)
 {
        struct igc_hw *hw = &adapter->hw;
 
-       if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {
+       /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
+        * cannot be changed dynamically. Require reset the adapter.
+        */
+       if (netif_running(adapter->netdev) &&
+           (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
                schedule_work(&adapter->reset_task);
                return 0;
        }
index ff5647bcdfcaedfca3b3bc5ec0ffd6115985e093..acf4f6ba73a6f97b74b3c2a4e6b2581146d0f651 100644 (file)
@@ -1511,7 +1511,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
                         */
                        if (txq_number == 1)
                                txq_map = (cpu == pp->rxq_def) ?
-                                       MVNETA_CPU_TXQ_ACCESS(1) : 0;
+                                       MVNETA_CPU_TXQ_ACCESS(0) : 0;
 
                } else {
                        txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
@@ -4356,7 +4356,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
                 */
                if (txq_number == 1)
                        txq_map = (cpu == elected_cpu) ?
-                               MVNETA_CPU_TXQ_ACCESS(1) : 0;
+                               MVNETA_CPU_TXQ_ACCESS(0) : 0;
                else
                        txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
                                MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
index 3411e2e47d46b70976c14eff1411c11394539036..0ee420a489fc4980e6356210bcb7b3e0a71a0561 100644 (file)
@@ -208,7 +208,7 @@ struct ptp *ptp_get(void)
        /* Check driver is bound to PTP block */
        if (!ptp)
                ptp = ERR_PTR(-EPROBE_DEFER);
-       else
+       else if (!IS_ERR(ptp))
                pci_dev_get(ptp->pdev);
 
        return ptp;
@@ -388,11 +388,10 @@ static int ptp_extts_on(struct ptp *ptp, int on)
 static int ptp_probe(struct pci_dev *pdev,
                     const struct pci_device_id *ent)
 {
-       struct device *dev = &pdev->dev;
        struct ptp *ptp;
        int err;
 
-       ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+       ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
        if (!ptp) {
                err = -ENOMEM;
                goto error;
@@ -428,20 +427,19 @@ static int ptp_probe(struct pci_dev *pdev,
        return 0;
 
 error_free:
-       devm_kfree(dev, ptp);
+       kfree(ptp);
 
 error:
        /* For `ptp_get()` we need to differentiate between the case
         * when the core has not tried to probe this device and the case when
-        * the probe failed.  In the later case we pretend that the
-        * initialization was successful and keep the error in
+        * the probe failed.  In the later case we keep the error in
         * `dev->driver_data`.
         */
        pci_set_drvdata(pdev, ERR_PTR(err));
        if (!first_ptp_block)
                first_ptp_block = ERR_PTR(err);
 
-       return 0;
+       return err;
 }
 
 static void ptp_remove(struct pci_dev *pdev)
@@ -449,16 +447,17 @@ static void ptp_remove(struct pci_dev *pdev)
        struct ptp *ptp = pci_get_drvdata(pdev);
        u64 clock_cfg;
 
-       if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
-               hrtimer_cancel(&ptp->hrtimer);
-
        if (IS_ERR_OR_NULL(ptp))
                return;
 
+       if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+               hrtimer_cancel(&ptp->hrtimer);
+
        /* Disable PTP clock */
        clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
        clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
        writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+       kfree(ptp);
 }
 
 static const struct pci_device_id ptp_id_table[] = {
index 8dbc35c481f6b28ebc9e3c4fdddfd570d27c5dc4..73df2d564545587ee891221070bce8853f2832b3 100644 (file)
@@ -3252,7 +3252,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        rvu->ptp = ptp_get();
        if (IS_ERR(rvu->ptp)) {
                err = PTR_ERR(rvu->ptp);
-               if (err == -EPROBE_DEFER)
+               if (err)
                        goto err_release_regions;
                rvu->ptp = NULL;
        }
index 0d745ae1cc9a165b2ea569bf58bc51c2db1fccf7..04b0e885f9d2e572442772dadc63ec8f72d16183 100644 (file)
@@ -4069,21 +4069,14 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
        }
 
        /* install/uninstall promisc entry */
-       if (promisc) {
+       if (promisc)
                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
                                              pfvf->rx_chan_base,
                                              pfvf->rx_chan_cnt);
-
-               if (rvu_npc_exact_has_match_table(rvu))
-                       rvu_npc_exact_promisc_enable(rvu, pcifunc);
-       } else {
+       else
                if (!nix_rx_multicast)
                        rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
 
-               if (rvu_npc_exact_has_match_table(rvu))
-                       rvu_npc_exact_promisc_disable(rvu, pcifunc);
-       }
-
        return 0;
 }
 
index 9f11c1e4073737f3f8d187da7f29497d925e2b8b..6fe67f3a7f6f18ebce2b3e11c7c4ba4a2cdd0ce5 100644 (file)
@@ -1164,8 +1164,10 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
 {
        struct npc_exact_table *table;
        u16 *cnt, old_cnt;
+       bool promisc;
 
        table = rvu->hw->table;
+       promisc = table->promisc_mode[drop_mcam_idx];
 
        cnt = &table->cnt_cmd_rules[drop_mcam_idx];
        old_cnt = *cnt;
@@ -1177,13 +1179,18 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
 
        *enable_or_disable_cam = false;
 
-       /* If all rules are deleted, disable cam */
+       if (promisc)
+               goto done;
+
+       /* If all rules are deleted and not already in promisc mode;
+        * disable cam
+        */
        if (!*cnt && val < 0) {
                *enable_or_disable_cam = true;
                goto done;
        }
 
-       /* If rule got added, enable cam */
+       /* If rule got added and not already in promisc mode; enable cam */
        if (!old_cnt && val > 0) {
                *enable_or_disable_cam = true;
                goto done;
@@ -1462,6 +1469,12 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
        *promisc = false;
        mutex_unlock(&table->lock);
 
+       /* Enable drop rule */
+       rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+                                          true);
+
+       dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d)\n",
+               __func__, cgx_id, lmac_id);
        return 0;
 }
 
@@ -1503,6 +1516,12 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
        *promisc = true;
        mutex_unlock(&table->lock);
 
+       /*  disable drop rule */
+       rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
+                                          false);
+
+       dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
+               __func__, cgx_id, lmac_id);
        return 0;
 }
 
index 10e11262d48a0b3ae7a9ba4c74e5b8bfd4e5eae6..2d7713a1a15394e550a2c83b8ae8f5a49b4f9af3 100644 (file)
@@ -872,6 +872,14 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
                                return -EINVAL;
 
                        vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
+
+                       /* Drop rule with vlan_etype == 802.1Q
+                        * and vlan_id == 0 is not supported
+                        */
+                       if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
+                           fsp->ring_cookie == RX_CLS_FLOW_DISC)
+                               return -EINVAL;
+
                        /* Only ETH_P_8021Q and ETH_P_802AD types supported */
                        if (vlan_etype != ETH_P_8021Q &&
                            vlan_etype != ETH_P_8021AD)
index 8a13df592af6fe36d66c3d33826740c69edaba8d..5e56b6c3e60ad825efdbc70fef3dedc606f474d3 100644 (file)
@@ -597,6 +597,21 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
                        return -EOPNOTSUPP;
                }
 
+               if (!match.mask->vlan_id) {
+                       struct flow_action_entry *act;
+                       int i;
+
+                       flow_action_for_each(i, act, &rule->action) {
+                               if (act->id == FLOW_ACTION_DROP) {
+                                       netdev_err(nic->netdev,
+                                                  "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
+                                                  ntohs(match.key->vlan_tpid),
+                                                  match.key->vlan_id);
+                                       return -EOPNOTSUPP;
+                               }
+                       }
+               }
+
                if (match.mask->vlan_id ||
                    match.mask->vlan_dei ||
                    match.mask->vlan_priority) {
index 03cb79adf912f70e703007c888a91cc97f611f37..be83ad9db82a474e5fec76d9af4316492017c271 100644 (file)
@@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
 
        err = fs_any_create_table(fs);
        if (err)
-               return err;
+               goto err_free_any;
 
        err = fs_any_enable(fs);
        if (err)
@@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
 
 err_destroy_table:
        fs_any_destroy_table(fs_any);
-
-       kfree(fs_any);
+err_free_any:
        mlx5e_fs_set_any(fs, NULL);
+       kfree(fs_any);
        return err;
 }
index 3cbebfba582bdbce79741c4c7d798dd13846af64..b0b429a0321edeb97db2d986b86c5f28c6be1a3e 100644 (file)
@@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
 
        c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
        cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
-       if (!c || !cparams)
-               return -ENOMEM;
+       if (!c || !cparams) {
+               err = -ENOMEM;
+               goto err_free;
+       }
 
        c->priv     = priv;
        c->mdev     = priv->mdev;
index a254e728ac95442cbbccde0d02e6616a11b97d72..fadfa8b50bebeb027f96554b37a59add2bef3264 100644 (file)
@@ -1545,7 +1545,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
 
        attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
        attr->ct_attr.zone = act->ct.zone;
-       attr->ct_attr.nf_ft = act->ct.flow_table;
+       if (!(act->ct.action & TCA_CT_ACT_CLEAR))
+               attr->ct_attr.nf_ft = act->ct.flow_table;
        attr->ct_attr.act_miss_cookie = act->miss_cookie;
 
        return 0;
@@ -1990,6 +1991,9 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
        if (!priv)
                return -EOPNOTSUPP;
 
+       if (attr->ct_attr.offloaded)
+               return 0;
+
        if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
                err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
                                                     0, 0, 0, 0);
@@ -1999,11 +2003,15 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
                attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
        }
 
-       if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
+       if (!attr->ct_attr.nf_ft) { /* means only ct clear action, and not ct_clear,ct() */
+               attr->ct_attr.offloaded = true;
                return 0;
+       }
 
        mutex_lock(&priv->control_lock);
        err = __mlx5_tc_ct_flow_offload(priv, attr);
+       if (!err)
+               attr->ct_attr.offloaded = true;
        mutex_unlock(&priv->control_lock);
 
        return err;
@@ -2021,7 +2029,7 @@ void
 mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
                       struct mlx5_flow_attr *attr)
 {
-       if (!attr->ct_attr.ft) /* no ct action, return */
+       if (!attr->ct_attr.offloaded) /* no ct action, return */
                return;
        if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
                return;
index 8e9316fa46d4b5413361dc6b66b197bfcb8a30c2..b66c5f98067f716306147d7ba31c8e6c6db894b2 100644 (file)
@@ -29,6 +29,7 @@ struct mlx5_ct_attr {
        u32 ct_labels_id;
        u32 act_miss_mapping;
        u64 act_miss_cookie;
+       bool offloaded;
        struct mlx5_ct_ft *ft;
 };
 
index f0e6095809faf1c053b1bd77902407dc24aa78ee..40589cebb773002739746987f3d1089d76de2ffb 100644 (file)
@@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                                /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
                                 * as we know this is a page_pool page.
                                 */
-                               page_pool_put_defragged_page(page->pp,
-                                                            page, -1, true);
+                               page_pool_recycle_direct(page->pp, page);
                        } while (++n < num);
 
                        break;
index 88a5aed9d678100bb8bf10097c7df4504f62358b..c7d191f66ad1bc40c8dcab2eaeda82c230b61027 100644 (file)
@@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
        in = kvzalloc(inlen, GFP_KERNEL);
        if  (!in || !ft->g) {
                kfree(ft->g);
+               ft->g = NULL;
                kvfree(in);
                return -ENOMEM;
        }
index 704b022cd1f04bcac0513643c479d9d479ae46ef..41d37159e027b90c77ef724b1e0a821e6560c7d0 100644 (file)
@@ -390,10 +390,18 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
 {
        struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
 
-       if (rq->xsk_pool)
+       if (rq->xsk_pool) {
                mlx5e_xsk_free_rx_wqe(wi);
-       else
+       } else {
                mlx5e_free_rx_wqe(rq, wi);
+
+               /* Avoid a second release of the wqe pages: dealloc is called
+                * for the same missing wqes on regular RQ flush and on regular
+                * RQ close. This happens when XSK RQs come into play.
+                */
+               for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
+                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+       }
 }
 
 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
@@ -1743,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
 
        prog = rcu_dereference(rq->xdp_prog);
        if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
-               if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        struct mlx5e_wqe_frag_info *pwi;
 
                        for (pwi = head_wi; pwi < wi; pwi++)
-                               pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+                               pwi->frag_page->frags++;
                }
                return NULL; /* page/packet was consumed by XDP */
        }
@@ -1817,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                              rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                       /* do not return page to cache,
-                        * it will be returned on XDP_TX completion.
-                        */
-                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
-               }
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                       wi->frag_page->frags++;
                goto wq_cyc_pop;
        }
 
@@ -1868,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                              rq, wi, cqe, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                       /* do not return page to cache,
-                        * it will be returned on XDP_TX completion.
-                        */
-                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
-               }
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                       wi->frag_page->frags++;
                goto wq_cyc_pop;
        }
 
@@ -2052,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
        if (prog) {
                if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-                               int i;
+                               struct mlx5e_frag_page *pfp;
+
+                               for (pfp = head_page; pfp < frag_page; pfp++)
+                                       pfp->frags++;
 
-                               for (i = 0; i < sinfo->nr_frags; i++)
-                                       /* non-atomic */
-                                       __set_bit(page_idx + i, wi->skip_release_bitmap);
-                               return NULL;
+                               wi->linear_page.frags++;
                        }
                        mlx5e_page_release_fragmented(rq, &wi->linear_page);
                        return NULL; /* page/packet was consumed by XDP */
@@ -2155,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
                                 cqe_bcnt, &mxbuf);
                if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
-                               __set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
+                               frag_page->frags++;
                        return NULL; /* page/packet was consumed by XDP */
                }
 
index 41dc26800f4874e289ecd3f546e2ec762fdd3a69..8d0a3f69693e113a2e215166e76020b35b2372f6 100644 (file)
@@ -1639,7 +1639,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
        uplink_priv = &rpriv->uplink_priv;
 
        mutex_lock(&uplink_priv->unready_flows_lock);
-       unready_flow_del(flow);
+       if (flow_flag_test(flow, NOT_READY))
+               unready_flow_del(flow);
        mutex_unlock(&uplink_priv->unready_flows_lock);
 }
 
@@ -1932,8 +1933,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        esw_attr = attr->esw_attr;
        mlx5e_put_flow_tunnel_id(flow);
 
-       if (flow_flag_test(flow, NOT_READY))
-               remove_unready_flow(flow);
+       remove_unready_flow(flow);
 
        if (mlx5e_is_offloaded_flow(flow)) {
                if (flow_flag_test(flow, SLOW))
index faec7d7a4400e4f5a1dbb2aec7fb4122989b10d7..243c455f102979c3d9da7e7bef8bc6fa5ea3978a 100644 (file)
@@ -807,6 +807,9 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
        vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
 
+       if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
+               goto out_free;
+
        memset(query_ctx, 0, query_out_sz);
        err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
                                            MLX5_CAP_GENERAL_2);
index 20bb5eb266c1f11797d9c8ee63269400366ddb49..52199d39657ed2caf0e363baf70a574d0994c63d 100644 (file)
@@ -68,14 +68,19 @@ static struct thermal_zone_device_ops mlx5_thermal_ops = {
 
 int mlx5_thermal_init(struct mlx5_core_dev *mdev)
 {
+       char data[THERMAL_NAME_LENGTH];
        struct mlx5_thermal *thermal;
-       struct thermal_zone_device *tzd;
-       const char *data = "mlx5";
+       int err;
 
-       tzd = thermal_zone_get_zone_by_name(data);
-       if (!IS_ERR(tzd))
+       if (!mlx5_core_is_pf(mdev) && !mlx5_core_is_ecpf(mdev))
                return 0;
 
+       err = snprintf(data, sizeof(data), "mlx5_%s", dev_name(mdev->device));
+       if (err < 0 || err >= sizeof(data)) {
+               mlx5_core_err(mdev, "Failed to setup thermal zone name, %d\n", err);
+               return -EINVAL;
+       }
+
        thermal = kzalloc(sizeof(*thermal), GFP_KERNEL);
        if (!thermal)
                return -ENOMEM;
@@ -89,10 +94,10 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev)
                                                                 &mlx5_thermal_ops,
                                                                 NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
        if (IS_ERR(thermal->tzdev)) {
-               dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n",
-                       data, PTR_ERR(thermal->tzdev));
+               err = PTR_ERR(thermal->tzdev);
+               mlx5_core_err(mdev, "Failed to register thermal zone device (%s) %d\n", data, err);
                kfree(thermal);
-               return -EINVAL;
+               return err;
        }
 
        mdev->thermal = thermal;
index 24c994baad1355560115365d8cec8241d097e9f7..329e374b9539c9ac684149e441b6fd1b6c4dadf8 100644 (file)
@@ -46,7 +46,7 @@ config LAN743X
        tristate "LAN743x support"
        depends on PCI
        depends on PTP_1588_CLOCK_OPTIONAL
-       select PHYLIB
+       select FIXED_PHY
        select CRC16
        select CRC32
        help
index 2fa833d041baa0c5f9101b9ed379aba58acd518b..56ccbd4c37fe6dab5c4df89d9c036270e3719455 100644 (file)
@@ -2927,7 +2927,6 @@ int ocelot_init(struct ocelot *ocelot)
 
        mutex_init(&ocelot->mact_lock);
        mutex_init(&ocelot->fwd_domain_lock);
-       mutex_init(&ocelot->tas_lock);
        spin_lock_init(&ocelot->ptp_clock_lock);
        spin_lock_init(&ocelot->ts_id_lock);
 
index fb3145118d686fd7c3fa77748a34566a987601a5..c815ae64e39dd52d2b81c13fe9cce11435bf6bfd 100644 (file)
@@ -67,10 +67,13 @@ void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port)
                val = mm->preemptible_tcs;
 
        /* Cut through switching doesn't work for preemptible priorities,
-        * so first make sure it is disabled.
+        * so first make sure it is disabled. Also, changing the preemptible
+        * TCs affects the oversized frame dropping logic, so that needs to be
+        * re-triggered. And since tas_guard_bands_update() also implicitly
+        * calls cut_through_fwd(), we don't need to explicitly call it.
         */
        mm->active_preemptible_tcs = val;
-       ocelot->ops->cut_through_fwd(ocelot);
+       ocelot->ops->tas_guard_bands_update(ocelot, port);
 
        dev_dbg(ocelot->dev,
                "port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n",
@@ -89,17 +92,14 @@ void ocelot_port_change_fp(struct ocelot *ocelot, int port,
 {
        struct ocelot_mm_state *mm = &ocelot->mm[port];
 
-       mutex_lock(&ocelot->fwd_domain_lock);
+       lockdep_assert_held(&ocelot->fwd_domain_lock);
 
        if (mm->preemptible_tcs == preemptible_tcs)
-               goto out_unlock;
+               return;
 
        mm->preemptible_tcs = preemptible_tcs;
 
        ocelot_port_update_active_preemptible_tcs(ocelot, port);
-
-out_unlock:
-       mutex_unlock(&ocelot->fwd_domain_lock);
 }
 
 static void ocelot_mm_update_port_status(struct ocelot *ocelot, int port)
index b8678da1cce528ed8029db44e0fa1c39342775cb..ab7d217b98b37bc14704690b6debe35125a0afb4 100644 (file)
@@ -353,12 +353,6 @@ err_out_reset:
        ionic_reset(ionic);
 err_out_teardown:
        ionic_dev_teardown(ionic);
-       pci_clear_master(pdev);
-       /* Don't fail the probe for these errors, keep
-        * the hw interface around for inspection
-        */
-       return 0;
-
 err_out_unmap_bars:
        ionic_unmap_bars(ionic);
 err_out_pci_release_regions:
index 7c20a44e549b1be896fcea9869e9e8e65ae0adfa..612b0015dc43eb261f1cc603a46aa6eed6c6f489 100644 (file)
@@ -475,11 +475,6 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
                                      struct ionic_qcq *n_qcq)
 {
-       if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
-               ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
-               n_qcq->flags &= ~IONIC_QCQ_F_INTR;
-       }
-
        n_qcq->intr.vector = src_qcq->intr.vector;
        n_qcq->intr.index = src_qcq->intr.index;
        n_qcq->napi_qcq = src_qcq->napi_qcq;
index 12405d71c5ee55b2a6f09ed91e1cd3a1a20f721f..0772eb14eabf6e8fda0c8b6c3f8a4e002d37f05b 100644 (file)
@@ -186,9 +186,6 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
        if (eeprom_ptrs)
                kvfree(eeprom_ptrs);
 
-       if (*checksum > TXGBE_EEPROM_SUM)
-               return -EINVAL;
-
        *checksum = TXGBE_EEPROM_SUM - *checksum;
 
        return 0;
index 6045bece2654d69ab7af7fffd787c271caaa6d26..b4d3b9cde8bd685202f135cf9c845d1be76ef428 100644 (file)
@@ -184,13 +184,10 @@ static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
        cookie_len = (count - 1) / 2;
        if ((count - 1) % 2)
                return -EINVAL;
-       buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
-       if (!buf)
-               return -ENOMEM;
 
-       ret = simple_write_to_buffer(buf, count, ppos, data, count);
-       if (ret < 0)
-               goto free_buf;
+       buf = memdup_user(data, count);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
                            GFP_KERNEL | __GFP_NOWARN);
index 7c4cc5f5e1eb463d02e89f380ee074d8db543ccf..dbd13f7aa3e6ead9f31a4e4f3fe46080fafba973 100644 (file)
@@ -6157,8 +6157,11 @@ static int airo_get_rate(struct net_device *dev,
        struct iw_param *vwrq = &wrqu->bitrate;
        struct airo_info *local = dev->ml_priv;
        StatusRid status_rid;           /* Card status info */
+       int ret;
 
-       readStatusRid(local, &status_rid, 1);
+       ret = readStatusRid(local, &status_rid, 1);
+       if (ret)
+               return -EBUSY;
 
        vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
        /* If more than one rate, set auto */
index aa4320ca4c30fa0b10bb937a5fea5483190a8aee..d594694206b33aaa2846071991dbb2e106cfb48b 100644 (file)
@@ -84,7 +84,6 @@ const struct iwl_ht_params iwl_22000_ht_params = {
        .mac_addr_from_csr = 0x380,                                     \
        .ht_params = &iwl_22000_ht_params,                              \
        .nvm_ver = IWL_22000_NVM_VERSION,                               \
-       .trans.use_tfh = true,                                          \
        .trans.rf_id = true,                                            \
        .trans.gen2 = true,                                             \
        .nvm_type = IWL_NVM_EXT,                                        \
@@ -122,7 +121,6 @@ const struct iwl_ht_params iwl_22000_ht_params = {
 
 const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
        .mq_rx_supported = true,
-       .use_tfh = true,
        .rf_id = true,
        .gen2 = true,
        .device_family = IWL_DEVICE_FAMILY_22000,
@@ -134,7 +132,6 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
 
 const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
        .mq_rx_supported = true,
-       .use_tfh = true,
        .rf_id = true,
        .gen2 = true,
        .device_family = IWL_DEVICE_FAMILY_22000,
@@ -146,7 +143,6 @@ const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
 
 const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
        .mq_rx_supported = true,
-       .use_tfh = true,
        .rf_id = true,
        .gen2 = true,
        .device_family = IWL_DEVICE_FAMILY_22000,
@@ -200,7 +196,6 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
        .device_family = IWL_DEVICE_FAMILY_22000,
        .base_params = &iwl_22000_base_params,
        .mq_rx_supported = true,
-       .use_tfh = true,
        .rf_id = true,
        .gen2 = true,
        .bisr_workaround = 1,
index 742096c5a36a19ecda6eca265d30f16d33c5d296..241a9e3f2a1a712cc463e968ee4fe566ac230303 100644 (file)
@@ -256,7 +256,6 @@ enum iwl_cfg_trans_ltr_delay {
  * @xtal_latency: power up latency to get the xtal stabilized
  * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY
  * @rf_id: need to read rf_id to determine the firmware image
- * @use_tfh: use TFH
  * @gen2: 22000 and on transport operation
  * @mq_rx_supported: multi-queue rx support
  * @integrated: discrete or integrated
@@ -271,7 +270,6 @@ struct iwl_cfg_trans_params {
        u32 xtal_latency;
        u32 extra_phy_cfg_flags;
        u32 rf_id:1,
-           use_tfh:1,
            gen2:1,
            mq_rx_supported:1,
            integrated:1,
index bedd78a47f671a3ec0bb05a425a61c324c5593e5..4e4a60ddf9b274510c4c26b4595b74335069cd0f 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
 #ifndef __iwl_fh_h__
@@ -71,7 +71,7 @@
 static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
                                             unsigned int chnl)
 {
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                WARN_ON_ONCE(chnl >= 64);
                return TFH_TFDQ_CBB_TABLE + 8 * chnl;
        }
index b1af9359cea5e0c9d77487832bf487d6331aefca..4bd759432d44b0dc79b19600ec09a973815cb07f 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Copyright (C) 2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2021, 2023 Intel Corporation
  */
 #include <linux/kernel.h>
 #include <linux/bsearch.h>
@@ -42,7 +42,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
 
        WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                trans->txqs.tfd.addr_size = 64;
                trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
                trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
@@ -101,7 +101,7 @@ int iwl_trans_init(struct iwl_trans *trans)
 
        /* Some things must not change even if the config does */
        WARN_ON(trans->txqs.tfd.addr_size !=
-               (trans->trans_cfg->use_tfh ? 64 : 36));
+               (trans->trans_cfg->gen2 ? 64 : 36));
 
        snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
index b83df06312790d88c15a63269e55a2f0d5937f51..b18c91c5dd5d1c0691ab910c38184344776a5566 100644 (file)
@@ -1450,7 +1450,7 @@ static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw)
 static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
 {
        /* TODO - replace with TLV once defined */
-       return mvm->trans->trans_cfg->use_tfh;
+       return mvm->trans->trans_cfg->gen2;
 }
 
 static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
index eacbbdbffb5e92523a4efa32eda147008b32e036..3e988da4497387b61ff0743ab064eba153f2057d 100644 (file)
@@ -819,7 +819,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 
        iwl_enable_interrupts(trans);
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                if (cpu == 1)
                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
                                       0xFFFF);
@@ -3394,7 +3394,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
                        u8 tfdidx;
                        u32 caplen, cmdlen;
 
-                       if (trans->trans_cfg->use_tfh)
+                       if (trans->trans_cfg->gen2)
                                tfdidx = idx;
                        else
                                tfdidx = ptr;
index 1337fa95f6571d43a8156912b19e5038fb182437..790e5b124740e4133b6fe088acac13511e53fb3a 100644 (file)
@@ -364,7 +364,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
        for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
             txq_id++) {
                struct iwl_txq *txq = trans->txqs.txq[txq_id];
-               if (trans->trans_cfg->use_tfh)
+               if (trans->trans_cfg->gen2)
                        iwl_write_direct64(trans,
                                           FH_MEM_CBBC_QUEUE(trans, txq_id),
                                           txq->dma_addr);
index fbacbe9ada156501b181dba782b3dc197abeae20..5bb3cc3367c9fe86fa269715614289858e9191b1 100644 (file)
@@ -985,7 +985,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
        bool active;
        u8 fifo;
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
                        txq->read_ptr, txq->write_ptr);
                /* TODO: access new SCD registers and dump them */
@@ -1040,7 +1040,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
        if (WARN_ON(txq->entries || txq->tfds))
                return -EINVAL;
 
-       if (trans->trans_cfg->use_tfh)
+       if (trans->trans_cfg->gen2)
                tfd_sz = trans->txqs.tfd.size * slots_num;
 
        timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
@@ -1347,7 +1347,7 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
        dma_addr_t addr;
        dma_addr_t hi_len;
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                struct iwl_tfh_tfd *tfh_tfd = _tfd;
                struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
 
@@ -1408,7 +1408,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
 
        meta->tbs = 0;
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
 
                tfd_fh->num_tbs = 0;
@@ -1625,7 +1625,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                txq->entries[read_ptr].skb = NULL;
 
-               if (!trans->trans_cfg->use_tfh)
+               if (!trans->trans_cfg->gen2)
                        iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
 
                iwl_txq_free_tfd(trans, txq);
index eca53bfd326d1a63ddbbf2a2dd8988f49298d067..1e4a24ab9bab223be86fb01233d5fc26badea314 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2020-2022 Intel Corporation
+ * Copyright (C) 2020-2023 Intel Corporation
  */
 #ifndef __iwl_trans_queue_tx_h__
 #define __iwl_trans_queue_tx_h__
@@ -38,7 +38,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
                                    struct iwl_txq *txq, int idx)
 {
-       if (trans->trans_cfg->use_tfh)
+       if (trans->trans_cfg->gen2)
                idx = iwl_txq_get_cmd_index(txq, idx);
 
        return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
@@ -135,7 +135,7 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
 {
        struct iwl_tfd *tfd;
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                struct iwl_tfh_tfd *tfh_tfd = _tfd;
 
                return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
@@ -151,7 +151,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
        struct iwl_tfd *tfd;
        struct iwl_tfd_tb *tb;
 
-       if (trans->trans_cfg->use_tfh) {
+       if (trans->trans_cfg->gen2) {
                struct iwl_tfh_tfd *tfh_tfd = _tfd;
                struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
 
index f0a80c2b476ab319582b81a832db23c3de79808f..4153cd6c2a01d94d27a841ed7b0d8a39650864f4 100644 (file)
@@ -231,10 +231,6 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        if (ret)
                return ret;
 
-       ret = mt7921_wfsys_reset(dev);
-       if (ret)
-               return ret;
-
        /* init tx queue */
        ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
                                         MT7921_TX_RING_SIZE,
index c69ce6df49561f11d24b7ea9361fdadc3f50ad9b..f55caa00ac69b8e940c2aced2cb46a67a248cf76 100644 (file)
@@ -476,12 +476,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
 {
        int ret;
 
-       ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
-       if (ret && mt76_is_mmio(&dev->mt76)) {
-               dev_dbg(dev->mt76.dev, "Firmware is already download\n");
-               goto fw_loaded;
-       }
-
        ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
        if (ret)
                return ret;
@@ -504,8 +498,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
                return -EIO;
        }
 
-fw_loaded:
-
 #ifdef CONFIG_PM
        dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
 #endif /* CONFIG_PM */
index ddb1fa4ee01d766701980cfb7bb0c5e957f45a3e..95610a117d2f0e6644cc1f7d832d31ceb0558df4 100644 (file)
@@ -325,6 +325,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
        bus_ops->rmw = mt7921_rmw;
        dev->mt76.bus = bus_ops;
 
+       ret = mt7921e_mcu_fw_pmctrl(dev);
+       if (ret)
+               goto err_free_dev;
+
        ret = __mt7921e_mcu_drv_pmctrl(dev);
        if (ret)
                goto err_free_dev;
@@ -333,6 +337,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
                    (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
        dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
 
+       ret = mt7921_wfsys_reset(dev);
+       if (ret)
+               goto err_free_dev;
+
        mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
 
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
index 1db2d59d33ff70296420073df1fc498892d84e1d..a4bbac916e22b9ec4c5a8182f88c26f08166e6ef 100644 (file)
@@ -3026,17 +3026,18 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
        struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
        struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
        u8 *h2c;
+       int ret;
        u16 h2c_len = count / 2;
 
        h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
        if (IS_ERR(h2c))
                return -EFAULT;
 
-       rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
+       ret = rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
 
        kfree(h2c);
 
-       return count;
+       return ret ? ret : count;
 }
 
 static int
index 47d7ba2827ff29a84dc4c871fae307b701690627..37b6fa7466620436b2ab23cd00eb0de91309bb30 100644 (file)
@@ -3431,10 +3431,40 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
 
        ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
        if (ret) {
-               dev_err(ctrl->device,
-                       "globally duplicate IDs for nsid %d\n", info->nsid);
+               /*
+                * We've found two different namespaces on two different
+                * subsystems that report the same ID.  This is pretty nasty
+                * for anything that actually requires unique device
+                * identification.  In the kernel we need this for multipathing,
+                * and in user space the /dev/disk/by-id/ links rely on it.
+                *
+                * If the device also claims to be multi-path capable back off
+                * here now and refuse the probe the second device as this is a
+                * recipe for data corruption.  If not this is probably a
+                * cheap consumer device if on the PCIe bus, so let the user
+                * proceed and use the shiny toy, but warn that with changing
+                * probing order (which due to our async probing could just be
+                * device taking longer to startup) the other device could show
+                * up at any time.
+                */
                nvme_print_device_info(ctrl);
-               return ret;
+               if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
+                   ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
+                    info->is_shared)) {
+                       dev_err(ctrl->device,
+                               "ignoring nsid %d because of duplicate IDs\n",
+                               info->nsid);
+                       return ret;
+               }
+
+               dev_err(ctrl->device,
+                       "clearing duplicate IDs for nsid %d\n", info->nsid);
+               dev_err(ctrl->device,
+                       "use of /dev/disk/by-id/ may cause data corruption\n");
+               memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
+               memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
+               memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
+               ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
        }
 
        mutex_lock(&ctrl->subsys->lock);
index 83d2e6860d388bfb05be3b8813b8d0ea2733aaf8..1ba10a5c656d1da4545da8f53b963b56d4357bc0 100644 (file)
@@ -27,7 +27,7 @@ void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
 
        /* create debugfs directory and attribute */
        parent = debugfs_create_dir(dev_name, NULL);
-       if (!parent) {
+       if (IS_ERR(parent)) {
                pr_warn("%s: failed to create debugfs directory\n", dev_name);
                return;
        }
index 691f2df574ce9ff0966ac5b03139f95e68a0d5f4..1cd2bf82319a988021cbb04d311ebee3dc017bed 100644 (file)
@@ -2548,14 +2548,24 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
         * the controller.  Abort any ios on the association and let the
         * create_association error path resolve things.
         */
-       if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
-               __nvme_fc_abort_outstanding_ios(ctrl, true);
+       enum nvme_ctrl_state state;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ctrl->lock, flags);
+       state = ctrl->ctrl.state;
+       if (state == NVME_CTRL_CONNECTING) {
                set_bit(ASSOC_FAILED, &ctrl->flags);
+               spin_unlock_irqrestore(&ctrl->lock, flags);
+               __nvme_fc_abort_outstanding_ios(ctrl, true);
+               dev_warn(ctrl->ctrl.device,
+                       "NVME-FC{%d}: transport error during (re)connect\n",
+                       ctrl->cnum);
                return;
        }
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        /* Otherwise, only proceed if in LIVE state - e.g. on first error */
-       if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+       if (state != NVME_CTRL_LIVE)
                return;
 
        dev_warn(ctrl->ctrl.device,
@@ -3110,7 +3120,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
         */
 
        ret = nvme_enable_ctrl(&ctrl->ctrl);
-       if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+       if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+               ret = -EIO;
+       if (ret)
                goto out_disconnect_admin_queue;
 
        ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
@@ -3120,7 +3132,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        nvme_unquiesce_admin_queue(&ctrl->ctrl);
 
        ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
-       if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+       if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+               ret = -EIO;
+       if (ret)
                goto out_disconnect_admin_queue;
 
        /* sanity checks */
@@ -3165,10 +3179,16 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
                else
                        ret = nvme_fc_recreate_io_queues(ctrl);
        }
-       if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
-               goto out_term_aen_ops;
 
+       spin_lock_irqsave(&ctrl->lock, flags);
+       if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+               ret = -EIO;
+       if (ret) {
+               spin_unlock_irqrestore(&ctrl->lock, flags);
+               goto out_term_aen_ops;
+       }
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
        ctrl->ctrl.nr_reconnects = 0;
 
@@ -3180,6 +3200,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 out_term_aen_ops:
        nvme_fc_term_aen_ops(ctrl);
 out_disconnect_admin_queue:
+       dev_warn(ctrl->ctrl.device,
+               "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
+               ctrl->cnum, ctrl->association_id, ret);
        /* send a Disconnect(association) LS to fc-nvme target */
        nvme_fc_xmt_disconnect_assoc(ctrl);
        spin_lock_irqsave(&ctrl->lock, flags);
index 72725729cb6c3f7300dc3237af426de75882a334..baf69af7ea78efd27e57c1c2cfe763ff17867c4e 100644 (file)
@@ -967,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
                struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
                dma_unmap_page(dev->dev, iod->meta_dma,
-                              rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+                              rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
        }
 
        if (blk_rq_nr_phys_segments(req))
@@ -1298,9 +1298,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
         */
        if (nvme_should_reset(dev, csts)) {
                nvme_warn_reset(dev, csts);
-               nvme_dev_disable(dev, false);
-               nvme_reset_ctrl(&dev->ctrl);
-               return BLK_EH_DONE;
+               goto disable;
        }
 
        /*
@@ -1351,10 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
-               nvme_dev_disable(dev, false);
-               nvme_reset_ctrl(&dev->ctrl);
-
-               return BLK_EH_DONE;
+               goto disable;
        }
 
        if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
@@ -1391,6 +1386,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
         * as the device then is in a faulty state.
         */
        return BLK_EH_RESET_TIMER;
+
+disable:
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+               return BLK_EH_DONE;
+
+       nvme_dev_disable(dev, false);
+       if (nvme_try_sched_reset(&dev->ctrl))
+               nvme_unquiesce_io_queues(&dev->ctrl);
+       return BLK_EH_DONE;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -3278,6 +3282,10 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
        case pci_channel_io_frozen:
                dev_warn(dev->ctrl.device,
                        "frozen state error detected, reset controller\n");
+               if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+                       nvme_dev_disable(dev, true);
+                       return PCI_ERS_RESULT_DISCONNECT;
+               }
                nvme_dev_disable(dev, false);
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
@@ -3294,7 +3302,8 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
 
        dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
-       nvme_reset_ctrl(&dev->ctrl);
+       if (!nvme_try_sched_reset(&dev->ctrl))
+               nvme_unquiesce_io_queues(&dev->ctrl);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -3396,6 +3405,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x144d, 0xa809),   /* Samsung MZALQ256HBJD 256G */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x144d, 0xa802),   /* Samsung SM953 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1cc4, 0x6303),   /* UMIS RPJTJ512MGE1QDY 512G */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1cc4, 0x6302),   /* UMIS RPJTJ256MGE1QDY 256G */
index 45e91811f905cba8b725536c836c76f3f2de9c51..212e1b05d2984f7a9d571612cec4e0f3d3f9f834 100644 (file)
@@ -92,7 +92,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
         * we have no UUID set
         */
        if (uuid_is_null(&ids->uuid)) {
-               dev_warn_ratelimited(dev,
+               dev_warn_once(dev,
                        "No UUID available providing old NGUID\n");
                return sysfs_emit(buf, "%pU\n", ids->nguid);
        }
index 12316ab51bda66af875ca9bffb8253702e1c83ba..ec8557810c2102328a84e4b7c97100f503ef55da 100644 (file)
 int nvme_revalidate_zones(struct nvme_ns *ns)
 {
        struct request_queue *q = ns->queue;
-       int ret;
 
-       ret = blk_revalidate_disk_zones(ns->disk, NULL);
-       if (!ret)
-               blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
-       return ret;
+       blk_queue_chunk_sectors(q, ns->zsze);
+       blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
+
+       return blk_revalidate_disk_zones(ns->disk, NULL);
 }
 
 static int nvme_set_max_append(struct nvme_ctrl *ctrl)
index f2d24b2d992f8721609b59414a4b5beafa33d0ad..48d5df054cd0245e62acf3997be6c2413af9064e 100644 (file)
@@ -373,7 +373,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
                goto out_cleanup_tagset;
 
        ctrl->ctrl.max_hw_sectors =
-               (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
+               (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
 
        nvme_unquiesce_admin_queue(&ctrl->ctrl);
 
index 71a9c1cc57f59c265478b8c816c8e4f5a9b65f61..9fe07d7efa96cf1fee3a5ea5ec433fbed93c012b 100644 (file)
@@ -102,14 +102,14 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
         * which depends on the host's memory fragementation. To solve this,
         * ensure mdts is limited to the pages equal to the number of segments.
         */
-       max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
+       max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,
                                      pctrl->max_hw_sectors);
 
        /*
         * nvmet_passthru_map_sg is limitted to using a single bio so limit
         * the mdts based on BIO_MAX_VECS as well
         */
-       max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
+       max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
                                      max_hw_sectors);
 
        page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
index 0f00f1b8070829f354b8cdfa2fe7af57aa4a24fa..90131de6d75bf1e36d8afa03c984fbf6f95ca160 100644 (file)
@@ -312,6 +312,7 @@ void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
        }
        mutex_unlock(&of_mutex);
 }
+EXPORT_SYMBOL_GPL(of_device_uevent);
 
 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
 {
index ddc75cd50825e9cfe4ccbec7208a7f2f42180f98..cf8dacf3e3b84d8a56aae6aa1bd79da7cf8e7fea 100644 (file)
@@ -1266,6 +1266,7 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
+DEFINE_SIMPLE_PROP(panel, "panel", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1354,6 +1355,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_resets, },
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
+       { .parse_prop = parse_panel, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index ebca5eab9c9beeac4393c898bb3dbf272bfacd64..56897d4d4fd3efa339f2e001985112bd752108a5 100644 (file)
@@ -181,9 +181,6 @@ void riscv_pmu_start(struct perf_event *event, int flags)
        uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
        u64 init_val;
 
-       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
-               return;
-
        if (flags & PERF_EF_RELOAD)
                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
 
index 3c4220be30ecb8c80840e8ee70aa8811c342a0b2..4a8c1b57a90d6300591a76d5af3a4f5b1c6034e1 100644 (file)
@@ -116,21 +116,19 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 }
 
-static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
-               unsigned debounce)
+static int amd_gpio_set_debounce(struct amd_gpio *gpio_dev, unsigned int offset,
+                                unsigned int debounce)
 {
        u32 time;
        u32 pin_reg;
        int ret = 0;
-       unsigned long flags;
-       struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-
-       raw_spin_lock_irqsave(&gpio_dev->lock, flags);
 
        /* Use special handling for Pin0 debounce */
-       pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
-       if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
-               debounce = 0;
+       if (offset == 0) {
+               pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+               if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
+                       debounce = 0;
+       }
 
        pin_reg = readl(gpio_dev->base + offset * 4);
 
@@ -182,23 +180,10 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
                pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
        }
        writel(pin_reg, gpio_dev->base + offset * 4);
-       raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
        return ret;
 }
 
-static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
-                              unsigned long config)
-{
-       u32 debounce;
-
-       if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
-               return -ENOTSUPP;
-
-       debounce = pinconf_to_config_argument(config);
-       return amd_gpio_set_debounce(gc, offset, debounce);
-}
-
 #ifdef CONFIG_DEBUG_FS
 static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
 {
@@ -220,7 +205,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
        char *pin_sts;
        char *interrupt_sts;
        char *wake_sts;
-       char *pull_up_sel;
        char *orientation;
        char debounce_value[40];
        char *debounce_enable;
@@ -328,14 +312,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        seq_printf(s, "   %s|", wake_sts);
 
                        if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
-                               if (pin_reg & BIT(PULL_UP_SEL_OFF))
-                                       pull_up_sel = "8k";
-                               else
-                                       pull_up_sel = "4k";
-                               seq_printf(s, "%s â†‘|",
-                                          pull_up_sel);
+                               seq_puts(s, "  â†‘ |");
                        } else if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF)) {
-                               seq_puts(s, "   â†“|");
+                               seq_puts(s, "  â†“ |");
                        } else  {
                                seq_puts(s, "    |");
                        }
@@ -761,7 +740,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
                break;
 
        case PIN_CONFIG_BIAS_PULL_UP:
-               arg = (pin_reg >> PULL_UP_SEL_OFF) & (BIT(0) | BIT(1));
+               arg = (pin_reg >> PULL_UP_ENABLE_OFF) & BIT(0);
                break;
 
        case PIN_CONFIG_DRIVE_STRENGTH:
@@ -780,7 +759,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
 }
 
 static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
-                               unsigned long *configs, unsigned num_configs)
+                          unsigned long *configs, unsigned int num_configs)
 {
        int i;
        u32 arg;
@@ -798,9 +777,8 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 
                switch (param) {
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       pin_reg &= ~DB_TMR_OUT_MASK;
-                       pin_reg |= arg & DB_TMR_OUT_MASK;
-                       break;
+                       ret = amd_gpio_set_debounce(gpio_dev, pin, arg);
+                       goto out_unlock;
 
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        pin_reg &= ~BIT(PULL_DOWN_ENABLE_OFF);
@@ -808,10 +786,8 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
 
                case PIN_CONFIG_BIAS_PULL_UP:
-                       pin_reg &= ~BIT(PULL_UP_SEL_OFF);
-                       pin_reg |= (arg & BIT(0)) << PULL_UP_SEL_OFF;
                        pin_reg &= ~BIT(PULL_UP_ENABLE_OFF);
-                       pin_reg |= ((arg>>1) & BIT(0)) << PULL_UP_ENABLE_OFF;
+                       pin_reg |= (arg & BIT(0)) << PULL_UP_ENABLE_OFF;
                        break;
 
                case PIN_CONFIG_DRIVE_STRENGTH:
@@ -829,6 +805,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
 
                writel(pin_reg, gpio_dev->base + pin*4);
        }
+out_unlock:
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
        return ret;
@@ -870,6 +847,14 @@ static int amd_pinconf_group_set(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static int amd_gpio_set_config(struct gpio_chip *gc, unsigned int pin,
+                              unsigned long config)
+{
+       struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+
+       return amd_pinconf_set(gpio_dev->pctrl, pin, &config, 1);
+}
+
 static const struct pinconf_ops amd_pinconf_ops = {
        .pin_config_get         = amd_pinconf_get,
        .pin_config_set         = amd_pinconf_set,
index 1cf2d06bbd8c47823f59abc7d27162b97d4da55a..34c5c3e71fb261cdeb8c6c5ffdfe59c126db8ae0 100644 (file)
@@ -36,7 +36,6 @@
 #define WAKE_CNTRL_OFF_S4               15
 #define PIN_STS_OFF                    16
 #define DRV_STRENGTH_SEL_OFF           17
-#define PULL_UP_SEL_OFF                        19
 #define PULL_UP_ENABLE_OFF             20
 #define PULL_DOWN_ENABLE_OFF           21
 #define OUTPUT_VALUE_OFF               22
index 9511d920565e975220320bc9df86ff610136937c..b53d26167da52ec26ad7ba43260bdec70dba7999 100644 (file)
@@ -249,6 +249,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map,
 
 static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                                   struct device_node *np,
+                                  struct device_node *parent,
                                   struct pinctrl_map **map,
                                   unsigned int *num_maps,
                                   unsigned int *index)
@@ -266,6 +267,7 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
        struct property *prop;
        int ret, gsel, fsel;
        const char **pin_fn;
+       const char *name;
        const char *pin;
 
        pinmux = of_find_property(np, "pinmux", NULL);
@@ -349,8 +351,19 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                psel_val[i] = MUX_FUNC(value);
        }
 
+       if (parent) {
+               name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
+                                     parent, np);
+               if (!name) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+       } else {
+               name = np->name;
+       }
+
        /* Register a single pin group listing all the pins we read from DT */
-       gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+       gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
        if (gsel < 0) {
                ret = gsel;
                goto done;
@@ -360,17 +373,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
         * Register a single group function where the 'data' is an array PSEL
         * register values read from DT.
         */
-       pin_fn[0] = np->name;
-       fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
-                                          psel_val);
+       pin_fn[0] = name;
+       fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
        if (fsel < 0) {
                ret = fsel;
                goto remove_group;
        }
 
        maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
-       maps[idx].data.mux.group = np->name;
-       maps[idx].data.mux.function = np->name;
+       maps[idx].data.mux.group = name;
+       maps[idx].data.mux.function = name;
        idx++;
 
        dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
@@ -417,7 +429,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
        index = 0;
 
        for_each_child_of_node(np, child) {
-               ret = rzg2l_dt_subnode_to_map(pctldev, child, map,
+               ret = rzg2l_dt_subnode_to_map(pctldev, child, np, map,
                                              num_maps, &index);
                if (ret < 0) {
                        of_node_put(child);
@@ -426,7 +438,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
        }
 
        if (*num_maps == 0) {
-               ret = rzg2l_dt_subnode_to_map(pctldev, np, map,
+               ret = rzg2l_dt_subnode_to_map(pctldev, np, NULL, map,
                                              num_maps, &index);
                if (ret < 0)
                        goto done;
index e5472293bc7fb2472d57dadeef6acffea849ee2c..35b23c1a5684d3e461fc3665f6f3369d6be3aee9 100644 (file)
@@ -209,6 +209,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map,
 
 static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                                   struct device_node *np,
+                                  struct device_node *parent,
                                   struct pinctrl_map **map,
                                   unsigned int *num_maps,
                                   unsigned int *index)
@@ -226,6 +227,7 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
        struct property *prop;
        int ret, gsel, fsel;
        const char **pin_fn;
+       const char *name;
        const char *pin;
 
        pinmux = of_find_property(np, "pinmux", NULL);
@@ -309,8 +311,19 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                psel_val[i] = MUX_FUNC(value);
        }
 
+       if (parent) {
+               name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
+                                     parent, np);
+               if (!name) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+       } else {
+               name = np->name;
+       }
+
        /* Register a single pin group listing all the pins we read from DT */
-       gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+       gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
        if (gsel < 0) {
                ret = gsel;
                goto done;
@@ -320,17 +333,16 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
         * Register a single group function where the 'data' is an array PSEL
         * register values read from DT.
         */
-       pin_fn[0] = np->name;
-       fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
-                                          psel_val);
+       pin_fn[0] = name;
+       fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
        if (fsel < 0) {
                ret = fsel;
                goto remove_group;
        }
 
        maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
-       maps[idx].data.mux.group = np->name;
-       maps[idx].data.mux.function = np->name;
+       maps[idx].data.mux.group = name;
+       maps[idx].data.mux.function = name;
        idx++;
 
        dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
@@ -377,7 +389,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
        index = 0;
 
        for_each_child_of_node(np, child) {
-               ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
+               ret = rzv2m_dt_subnode_to_map(pctldev, child, np, map,
                                              num_maps, &index);
                if (ret < 0) {
                        of_node_put(child);
@@ -386,7 +398,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
        }
 
        if (*num_maps == 0) {
-               ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
+               ret = rzv2m_dt_subnode_to_map(pctldev, np, NULL, map,
                                              num_maps, &index);
                if (ret < 0)
                        goto done;
index 2c229198e24c967e0f73015ee0bad82b70e0beb6..65732f0a3913f1b4271519bc42b0a5588bbcdc78 100644 (file)
@@ -4,7 +4,7 @@
 # AMD x86 Platform-Specific Drivers
 #
 
-amd-pmc-y                      := pmc.o
+amd-pmc-y                      := pmc.o pmc-quirks.o
 obj-$(CONFIG_AMD_PMC)          += amd-pmc.o
 amd_hsmp-y                     := hsmp.o
 obj-$(CONFIG_AMD_HSMP)         += amd_hsmp.o
diff --git a/drivers/platform/x86/amd/pmc-quirks.c b/drivers/platform/x86/amd/pmc-quirks.c
new file mode 100644 (file)
index 0000000..362e7c0
--- /dev/null
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SoC Power Management Controller Driver Quirks
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "pmc.h"
+
+struct quirk_entry {
+       u32 s2idle_bug_mmio;
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+       .s2idle_bug_mmio = 0xfed80380,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+       {
+               .ident = "L14 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+               }
+       },
+       {
+               .ident = "T14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+               }
+       },
+       {
+               .ident = "X13 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+               }
+       },
+       {
+               .ident = "T14 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+               }
+       },
+       {
+               .ident = "T14 Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+               }
+       },
+       {
+               .ident = "T14 Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+               }
+       },
+       {
+               .ident = "T14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+               }
+       },
+       {
+               .ident = "T14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+               }
+       },
+       {
+               .ident = "P14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+               }
+       },
+       {
+               .ident = "P14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+               }
+       },
+       {
+               .ident = "P14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+               }
+       },
+       /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
+       {
+               .ident = "HP Laptop 15s-eq2xxx",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
+               }
+       },
+       {}
+};
+
+/*
+ * Laptops that run a SMI handler during the D3->D0 transition that occurs
+ * specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio)
+{
+       struct resource *res;
+       void __iomem *addr;
+       u8 val;
+
+       res = request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80");
+       if (!res)
+               return;
+
+       addr = ioremap(s2idle_bug_mmio, 1);
+       if (!addr)
+               goto cleanup_resource;
+
+       val = ioread8(addr);
+       iowrite8(val & ~BIT(0), addr);
+
+       iounmap(addr);
+cleanup_resource:
+       release_resource(res);
+       kfree(res);
+}
+
+void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
+{
+       if (dev->quirks && dev->quirks->s2idle_bug_mmio)
+               amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio);
+}
+
+void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+{
+       const struct dmi_system_id *dmi_id;
+
+       dmi_id = dmi_first_match(fwbug_list);
+       if (!dmi_id)
+               return;
+       dev->quirks = dmi_id->driver_data;
+       if (dev->quirks->s2idle_bug_mmio)
+               pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+                       dmi_id->ident);
+}
index 7d3d080ff174879890e3956d345c98679562b87d..c1e788b67a74831fa6ae3bb88b203932b08c3512 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 
+#include "pmc.h"
+
 /* SMU communication registers */
 #define AMD_PMC_REGISTER_MESSAGE       0x538
 #define AMD_PMC_REGISTER_RESPONSE      0x980
@@ -94,6 +96,7 @@
 #define AMD_CPU_ID_CB                  0x14D8
 #define AMD_CPU_ID_PS                  0x14E8
 #define AMD_CPU_ID_SP                  0x14A4
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
 
 #define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
@@ -146,29 +149,6 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
        {}
 };
 
-struct amd_pmc_dev {
-       void __iomem *regbase;
-       void __iomem *smu_virt_addr;
-       void __iomem *stb_virt_addr;
-       void __iomem *fch_virt_addr;
-       bool msg_port;
-       u32 base_addr;
-       u32 cpu_id;
-       u32 active_ips;
-       u32 dram_size;
-       u32 num_ips;
-       u32 s2d_msg_id;
-/* SMU version information */
-       u8 smu_program;
-       u8 major;
-       u8 minor;
-       u8 rev;
-       struct device *dev;
-       struct pci_dev *rdev;
-       struct mutex lock; /* generic mutex lock */
-       struct dentry *dbgfs_dir;
-};
-
 static bool enable_stb;
 module_param(enable_stb, bool, 0644);
 MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
@@ -891,6 +871,8 @@ static void amd_pmc_s2idle_restore(void)
 
        /* Notify on failed entry */
        amd_pmc_validate_deepest(pdev);
+
+       amd_pmc_process_restore_quirks(pdev);
 }
 
 static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
@@ -926,6 +908,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
        { }
 };
 
@@ -1087,6 +1070,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
                err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
                if (err)
                        dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+               if (!disable_workarounds)
+                       amd_pmc_quirks_init(dev);
        }
 
        amd_pmc_dbgfs_register(dev);
@@ -1115,6 +1100,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0007", 0},
        {"AMDI0008", 0},
        {"AMDI0009", 0},
+       {"AMDI000A", 0},
        {"AMD0004", 0},
        {"AMD0005", 0},
        { }
diff --git a/drivers/platform/x86/amd/pmc.h b/drivers/platform/x86/amd/pmc.h
new file mode 100644 (file)
index 0000000..c27bd6a
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef PMC_H
+#define PMC_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+struct amd_pmc_dev {
+       void __iomem *regbase;
+       void __iomem *smu_virt_addr;
+       void __iomem *stb_virt_addr;
+       void __iomem *fch_virt_addr;
+       bool msg_port;
+       u32 base_addr;
+       u32 cpu_id;
+       u32 active_ips;
+       u32 dram_size;
+       u32 num_ips;
+       u32 s2d_msg_id;
+/* SMU version information */
+       u8 smu_program;
+       u8 major;
+       u8 minor;
+       u8 rev;
+       struct device *dev;
+       struct pci_dev *rdev;
+       struct mutex lock; /* generic mutex lock */
+       struct dentry *dbgfs_dir;
+       struct quirk_entry *quirks;
+};
+
+void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
+void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
+
+#endif /* PMC_H */
index 7780705917b76f006cbdd3b678f43c165b9e63c7..d8732557f9db163290cf3814dcf7006dd3e42521 100644 (file)
@@ -40,6 +40,7 @@
 /* List of supported CPU ids */
 #define AMD_CPU_ID_RMB                 0x14b5
 #define AMD_CPU_ID_PS                  0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT        0x1507
 
 #define PMF_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
@@ -242,6 +243,7 @@ out_unlock:
 static const struct pci_device_id pmf_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
        { }
 };
 
@@ -333,6 +335,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
        {"AMDI0100", 0x100},
        {"AMDI0102", 0},
+       {"AMDI0103", 0},
        { }
 };
 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
index 2750dee99c3e255a8821b591139775c1d4c641cf..db1e9240dd02c27e07926dd865222288632e3d4e 100644 (file)
@@ -616,7 +616,8 @@ static int dell_wmi_ddv_hwmon_add(struct dell_wmi_ddv_data *data)
        }
 
        if (index < 2) {
-               ret = -ENODEV;
+               /* Finding no available sensors is not an error */
+               ret = 0;
 
                goto err_release;
        }
@@ -841,13 +842,13 @@ static int dell_wmi_ddv_probe(struct wmi_device *wdev, const void *context)
 
        if (IS_REACHABLE(CONFIG_ACPI_BATTERY)) {
                ret = dell_wmi_ddv_battery_add(data);
-               if (ret < 0 && ret != -ENODEV)
+               if (ret < 0)
                        dev_warn(&wdev->dev, "Unable to register ACPI battery hook: %d\n", ret);
        }
 
        if (IS_REACHABLE(CONFIG_HWMON)) {
                ret = dell_wmi_ddv_hwmon_add(data);
-               if (ret < 0 && ret != -ENODEV)
+               if (ret < 0)
                        dev_warn(&wdev->dev, "Unable to register hwmon interface: %d\n", ret);
        }
 
index 61aeca804ba20e8e63138f7c99b4d9bd7b7ff982..ef4b3141efcdc0ad47ba3cbad9d61d06bcf3be2e 100644 (file)
@@ -260,7 +260,7 @@ static_assert(ARRAY_SIZE(skl_int3472_regulator_map_supplies) ==
  * This DMI table contains the name of the second sensor. This is used to add
  * entries for the second sensor to the supply_map.
  */
-const struct dmi_system_id skl_int3472_regulator_second_sensor[] = {
+static const struct dmi_system_id skl_int3472_regulator_second_sensor[] = {
        {
                /* Lenovo Miix 510-12IKB */
                .matches = {
index 9c606ee2030c67fd65a7ce854b98b88f069d7586..d1fd6e69401c2d81c9f0dfb9fb7734da8aa08cdb 100644 (file)
@@ -356,9 +356,7 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
                if (!pfs_start)
                        pfs_start = res_start;
 
-               pfs->pfs_header.cap_offset *= TPMI_CAP_OFFSET_UNIT;
-
-               pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset;
+               pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT;
 
                /*
                 * Process TPMI_INFO to get PCI device to CPU package ID.
index 187018ffb06867dd960a378f676ee9fce68fa71a..ad460417f901af56c23dd68c3f4689f6b1d74eaf 100644 (file)
@@ -315,17 +315,12 @@ struct ibm_init_struct {
 /* DMI Quirks */
 struct quirk_entry {
        bool btusb_bug;
-       u32 s2idle_bug_mmio;
 };
 
 static struct quirk_entry quirk_btusb_bug = {
        .btusb_bug = true,
 };
 
-static struct quirk_entry quirk_s2idle_bug = {
-       .s2idle_bug_mmio = 0xfed80380,
-};
-
 static struct {
        u32 bluetooth:1;
        u32 hotkey:1;
@@ -4422,136 +4417,9 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
                        DMI_MATCH(DMI_BOARD_NAME, "20MV"),
                },
        },
-       {
-               .ident = "L14 Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
-               }
-       },
-       {
-               .ident = "T14s Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
-               }
-       },
-       {
-               .ident = "X13 Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
-               }
-       },
-       {
-               .ident = "T14 Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
-               }
-       },
-       {
-               .ident = "T14 Gen1 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
-               }
-       },
-       {
-               .ident = "T14 Gen1 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
-               }
-       },
-       {
-               .ident = "T14s Gen1 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
-               }
-       },
-       {
-               .ident = "T14s Gen1 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
-               }
-       },
-       {
-               .ident = "P14s Gen1 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
-               }
-       },
-       {
-               .ident = "P14s Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
-               }
-       },
-       {
-               .ident = "P14s Gen2 AMD",
-               .driver_data = &quirk_s2idle_bug,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
-               }
-       },
        {}
 };
 
-#ifdef CONFIG_SUSPEND
-/*
- * Lenovo laptops from a variety of generations run a SMI handler during the D3->D0
- * transition that occurs specifically when exiting suspend to idle which can cause
- * large delays during resume when the IOMMU translation layer is enabled (the default
- * behavior) for NVME devices:
- *
- * To avoid this firmware problem, skip the SMI handler on these machines before the
- * D0 transition occurs.
- */
-static void thinkpad_acpi_amd_s2idle_restore(void)
-{
-       struct resource *res;
-       void __iomem *addr;
-       u8 val;
-
-       res = request_mem_region_muxed(tp_features.quirks->s2idle_bug_mmio, 1,
-                                       "thinkpad_acpi_pm80");
-       if (!res)
-               return;
-
-       addr = ioremap(tp_features.quirks->s2idle_bug_mmio, 1);
-       if (!addr)
-               goto cleanup_resource;
-
-       val = ioread8(addr);
-       iowrite8(val & ~BIT(0), addr);
-
-       iounmap(addr);
-cleanup_resource:
-       release_resource(res);
-       kfree(res);
-}
-
-static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
-       .restore = thinkpad_acpi_amd_s2idle_restore,
-};
-#endif
-
 static const struct pci_device_id fwbug_cards_ids[] __initconst = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
@@ -11668,10 +11536,6 @@ static void thinkpad_acpi_module_exit(void)
 
        tpacpi_lifecycle = TPACPI_LIFE_EXITING;
 
-#ifdef CONFIG_SUSPEND
-       if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio)
-               acpi_unregister_lps0_dev(&thinkpad_acpi_s2idle_dev_ops);
-#endif
        if (tpacpi_hwmon)
                hwmon_device_unregister(tpacpi_hwmon);
        if (tp_features.sensors_pdrv_registered)
@@ -11861,13 +11725,6 @@ static int __init thinkpad_acpi_module_init(void)
                tp_features.input_device_registered = 1;
        }
 
-#ifdef CONFIG_SUSPEND
-       if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio) {
-               if (!acpi_register_lps0_dev(&thinkpad_acpi_s2idle_dev_ops))
-                       pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
-                               (dmi_id && dmi_id->ident) ? dmi_id->ident : "");
-       }
-#endif
        return 0;
 }
 
index 68e66b60445c3e40946308d3be7cc5421543d5a3..a5b687eed8f3896201c04c9d1da36361d5423485 100644 (file)
@@ -26,6 +26,21 @@ struct ts_dmi_data {
 
 /* NOTE: Please keep all entries sorted alphabetically */
 
+static const struct property_entry archos_101_cesium_educ_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1280),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1850),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
+       { }
+};
+
+static const struct ts_dmi_data archos_101_cesium_educ_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = archos_101_cesium_educ_props,
+};
+
 static const struct property_entry chuwi_hi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1047,6 +1062,13 @@ static const struct ts_dmi_data vinga_twizzle_j116_data = {
 
 /* NOTE: Please keep this table sorted alphabetically */
 const struct dmi_system_id touchscreen_dmi_table[] = {
+       {
+               /* Archos 101 Cesium Educ */
+               .driver_data = (void *)&archos_101_cesium_educ_data,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 Cesium Educ"),
+               },
+       },
        {
                /* Chuwi Hi8 */
                .driver_data = (void *)&chuwi_hi8_data,
index 5b95d7aa5c2f1a1263fd1e6ca14ec115e74d39ed..a78ddd83cda02fe68fd941339bb7f29355226f8c 100644 (file)
@@ -136,6 +136,16 @@ static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
        return AE_NOT_FOUND;
 }
 
+static bool guid_parse_and_compare(const char *string, const guid_t *guid)
+{
+       guid_t guid_input;
+
+       if (guid_parse(string, &guid_input))
+               return false;
+
+       return guid_equal(&guid_input, guid);
+}
+
 static const void *find_guid_context(struct wmi_block *wblock,
                                     struct wmi_driver *wdriver)
 {
@@ -146,11 +156,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
                return NULL;
 
        while (*id->guid_string) {
-               guid_t guid_input;
-
-               if (guid_parse(id->guid_string, &guid_input))
-                       continue;
-               if (guid_equal(&wblock->gblock.guid, &guid_input))
+               if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
                        return id->context;
                id++;
        }
@@ -895,11 +901,7 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
                return 0;
 
        while (*id->guid_string) {
-               guid_t driver_guid;
-
-               if (WARN_ON(guid_parse(id->guid_string, &driver_guid)))
-                       continue;
-               if (guid_equal(&driver_guid, &wblock->gblock.guid))
+               if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
                        return 1;
 
                id++;
@@ -1239,11 +1241,7 @@ static bool guid_already_parsed_for_legacy(struct acpi_device *device, const gui
        list_for_each_entry(wblock, &wmi_block_list, list) {
                /* skip warning and register if we know the driver will use struct wmi_driver */
                for (int i = 0; allow_duplicates[i] != NULL; i++) {
-                       guid_t tmp;
-
-                       if (guid_parse(allow_duplicates[i], &tmp))
-                               continue;
-                       if (guid_equal(&tmp, guid))
+                       if (guid_parse_and_compare(allow_duplicates[i], guid))
                                return false;
                }
                if (guid_equal(&wblock->gblock.guid, guid)) {
index 9b5fccdbc7d6508888b8b7c3c001b63dca2c6091..6df7f377d2f90cabb7aeb76d7f6fa680f1aa5104 100644 (file)
@@ -36,7 +36,7 @@ static const struct smcd_ops ism_ops;
 static struct ism_client *clients[MAX_CLIENTS];        /* use an array rather than */
                                                /* a list for fast mapping  */
 static u8 max_client;
-static DEFINE_SPINLOCK(clients_lock);
+static DEFINE_MUTEX(clients_lock);
 struct ism_dev_list {
        struct list_head list;
        struct mutex mutex; /* protects ism device list */
@@ -47,14 +47,22 @@ static struct ism_dev_list ism_dev_list = {
        .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
 };
 
+static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ism->lock, flags);
+       ism->subs[client->id] = client;
+       spin_unlock_irqrestore(&ism->lock, flags);
+}
+
 int ism_register_client(struct ism_client *client)
 {
        struct ism_dev *ism;
-       unsigned long flags;
        int i, rc = -ENOSPC;
 
        mutex_lock(&ism_dev_list.mutex);
-       spin_lock_irqsave(&clients_lock, flags);
+       mutex_lock(&clients_lock);
        for (i = 0; i < MAX_CLIENTS; ++i) {
                if (!clients[i]) {
                        clients[i] = client;
@@ -65,12 +73,14 @@ int ism_register_client(struct ism_client *client)
                        break;
                }
        }
-       spin_unlock_irqrestore(&clients_lock, flags);
+       mutex_unlock(&clients_lock);
+
        if (i < MAX_CLIENTS) {
                /* initialize with all devices that we got so far */
                list_for_each_entry(ism, &ism_dev_list.list, list) {
                        ism->priv[i] = NULL;
                        client->add(ism);
+                       ism_setup_forwarding(client, ism);
                }
        }
        mutex_unlock(&ism_dev_list.mutex);
@@ -86,25 +96,32 @@ int ism_unregister_client(struct ism_client *client)
        int rc = 0;
 
        mutex_lock(&ism_dev_list.mutex);
-       spin_lock_irqsave(&clients_lock, flags);
-       clients[client->id] = NULL;
-       if (client->id + 1 == max_client)
-               max_client--;
-       spin_unlock_irqrestore(&clients_lock, flags);
        list_for_each_entry(ism, &ism_dev_list.list, list) {
+               spin_lock_irqsave(&ism->lock, flags);
+               /* Stop forwarding IRQs and events */
+               ism->subs[client->id] = NULL;
                for (int i = 0; i < ISM_NR_DMBS; ++i) {
                        if (ism->sba_client_arr[i] == client->id) {
-                               pr_err("%s: attempt to unregister client '%s'"
-                                      "with registered dmb(s)\n", __func__,
-                                      client->name);
+                               WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+                                    __func__, client->name);
                                rc = -EBUSY;
-                               goto out;
+                               goto err_reg_dmb;
                        }
                }
+               spin_unlock_irqrestore(&ism->lock, flags);
        }
-out:
        mutex_unlock(&ism_dev_list.mutex);
 
+       mutex_lock(&clients_lock);
+       clients[client->id] = NULL;
+       if (client->id + 1 == max_client)
+               max_client--;
+       mutex_unlock(&clients_lock);
+       return rc;
+
+err_reg_dmb:
+       spin_unlock_irqrestore(&ism->lock, flags);
+       mutex_unlock(&ism_dev_list.mutex);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ism_unregister_client);
@@ -328,6 +345,7 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
                     struct ism_client *client)
 {
        union ism_reg_dmb cmd;
+       unsigned long flags;
        int ret;
 
        ret = ism_alloc_dmb(ism, dmb);
@@ -351,7 +369,9 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
                goto out;
        }
        dmb->dmb_tok = cmd.response.dmb_tok;
+       spin_lock_irqsave(&ism->lock, flags);
        ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
+       spin_unlock_irqrestore(&ism->lock, flags);
 out:
        return ret;
 }
@@ -360,6 +380,7 @@ EXPORT_SYMBOL_GPL(ism_register_dmb);
 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 {
        union ism_unreg_dmb cmd;
+       unsigned long flags;
        int ret;
 
        memset(&cmd, 0, sizeof(cmd));
@@ -368,7 +389,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 
        cmd.request.dmb_tok = dmb->dmb_tok;
 
+       spin_lock_irqsave(&ism->lock, flags);
        ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
+       spin_unlock_irqrestore(&ism->lock, flags);
 
        ret = ism_cmd(ism, &cmd);
        if (ret && ret != ISM_ERROR)
@@ -491,6 +514,7 @@ static u16 ism_get_chid(struct ism_dev *ism)
 static void ism_handle_event(struct ism_dev *ism)
 {
        struct ism_event *entry;
+       struct ism_client *clt;
        int i;
 
        while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
@@ -499,21 +523,21 @@ static void ism_handle_event(struct ism_dev *ism)
 
                entry = &ism->ieq->entry[ism->ieq_idx];
                debug_event(ism_debug_info, 2, entry, sizeof(*entry));
-               spin_lock(&clients_lock);
-               for (i = 0; i < max_client; ++i)
-                       if (clients[i])
-                               clients[i]->handle_event(ism, entry);
-               spin_unlock(&clients_lock);
+               for (i = 0; i < max_client; ++i) {
+                       clt = ism->subs[i];
+                       if (clt)
+                               clt->handle_event(ism, entry);
+               }
        }
 }
 
 static irqreturn_t ism_handle_irq(int irq, void *data)
 {
        struct ism_dev *ism = data;
-       struct ism_client *clt;
        unsigned long bit, end;
        unsigned long *bv;
        u16 dmbemask;
+       u8 client_id;
 
        bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
        end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
@@ -530,8 +554,10 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
                dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
                ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
                barrier();
-               clt = clients[ism->sba_client_arr[bit]];
-               clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+               client_id = ism->sba_client_arr[bit];
+               if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
+                       continue;
+               ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
        }
 
        if (ism->sba->e) {
@@ -548,20 +574,9 @@ static u64 ism_get_local_gid(struct ism_dev *ism)
        return ism->local_gid;
 }
 
-static void ism_dev_add_work_func(struct work_struct *work)
-{
-       struct ism_client *client = container_of(work, struct ism_client,
-                                                add_work);
-
-       client->add(client->tgt_ism);
-       atomic_dec(&client->tgt_ism->add_dev_cnt);
-       wake_up(&client->tgt_ism->waitq);
-}
-
 static int ism_dev_init(struct ism_dev *ism)
 {
        struct pci_dev *pdev = ism->pdev;
-       unsigned long flags;
        int i, ret;
 
        ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
@@ -594,25 +609,16 @@ static int ism_dev_init(struct ism_dev *ism)
                /* hardware is V2 capable */
                ism_create_system_eid();
 
-       init_waitqueue_head(&ism->waitq);
-       atomic_set(&ism->free_clients_cnt, 0);
-       atomic_set(&ism->add_dev_cnt, 0);
-
-       wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
-       spin_lock_irqsave(&clients_lock, flags);
-       for (i = 0; i < max_client; ++i)
+       mutex_lock(&ism_dev_list.mutex);
+       mutex_lock(&clients_lock);
+       for (i = 0; i < max_client; ++i) {
                if (clients[i]) {
-                       INIT_WORK(&clients[i]->add_work,
-                                 ism_dev_add_work_func);
-                       clients[i]->tgt_ism = ism;
-                       atomic_inc(&ism->add_dev_cnt);
-                       schedule_work(&clients[i]->add_work);
+                       clients[i]->add(ism);
+                       ism_setup_forwarding(clients[i], ism);
                }
-       spin_unlock_irqrestore(&clients_lock, flags);
-
-       wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
+       }
+       mutex_unlock(&clients_lock);
 
-       mutex_lock(&ism_dev_list.mutex);
        list_add(&ism->list, &ism_dev_list.list);
        mutex_unlock(&ism_dev_list.mutex);
 
@@ -687,36 +693,24 @@ err_dev:
        return ret;
 }
 
-static void ism_dev_remove_work_func(struct work_struct *work)
-{
-       struct ism_client *client = container_of(work, struct ism_client,
-                                                remove_work);
-
-       client->remove(client->tgt_ism);
-       atomic_dec(&client->tgt_ism->free_clients_cnt);
-       wake_up(&client->tgt_ism->waitq);
-}
-
-/* Callers must hold ism_dev_list.mutex */
 static void ism_dev_exit(struct ism_dev *ism)
 {
        struct pci_dev *pdev = ism->pdev;
        unsigned long flags;
        int i;
 
-       wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
-       spin_lock_irqsave(&clients_lock, flags);
+       spin_lock_irqsave(&ism->lock, flags);
        for (i = 0; i < max_client; ++i)
-               if (clients[i]) {
-                       INIT_WORK(&clients[i]->remove_work,
-                                 ism_dev_remove_work_func);
-                       clients[i]->tgt_ism = ism;
-                       atomic_inc(&ism->free_clients_cnt);
-                       schedule_work(&clients[i]->remove_work);
-               }
-       spin_unlock_irqrestore(&clients_lock, flags);
+               ism->subs[i] = NULL;
+       spin_unlock_irqrestore(&ism->lock, flags);
 
-       wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
+       mutex_lock(&ism_dev_list.mutex);
+       mutex_lock(&clients_lock);
+       for (i = 0; i < max_client; ++i) {
+               if (clients[i])
+                       clients[i]->remove(ism);
+       }
+       mutex_unlock(&clients_lock);
 
        if (SYSTEM_EID.serial_number[0] != '0' ||
            SYSTEM_EID.type[0] != '0')
@@ -727,15 +721,14 @@ static void ism_dev_exit(struct ism_dev *ism)
        kfree(ism->sba_client_arr);
        pci_free_irq_vectors(pdev);
        list_del_init(&ism->list);
+       mutex_unlock(&ism_dev_list.mutex);
 }
 
 static void ism_remove(struct pci_dev *pdev)
 {
        struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
 
-       mutex_lock(&ism_dev_list.mutex);
        ism_dev_exit(ism);
-       mutex_unlock(&ism_dev_list.mutex);
 
        pci_release_mem_regions(pdev);
        pci_disable_device(pdev);
index 7c6efde75da664b3819161d5aa96466b0d26aaa1..73b6ac0c01f549a1ab20fcee5172b42a0232a35a 100644 (file)
@@ -2618,7 +2618,7 @@ struct aac_hba_info {
 struct aac_aifcmd {
        __le32 command;         /* Tell host what type of notify this is */
        __le32 seqnum;          /* To allow ordering of reports (if necessary) */
-       u8 data[1];             /* Undefined length (from kernel viewpoint) */
+       u8 data[];              /* Undefined length (from kernel viewpoint) */
 };
 
 /**
index f3c3a26a1384dbe70dde94e015051c955ccc6d67..be0d7c57b242b28725a65018c7c2a94a982b635b 100644 (file)
@@ -465,7 +465,7 @@ int fnic_trace_buf_init(void)
        fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
                                          FNIC_ENTRY_SIZE_BYTES;
 
-       fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
+       fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
        if (!fnic_trace_buf_p) {
                printk(KERN_ERR PFX "Failed to allocate memory "
                                  "for fnic_trace_buf_p\n");
index 499849b58ee47ba530adfeaf860e558a85c1a21a..fdd7f69d87efebb51ce54b579ac5670fbf6c9bb7 100644 (file)
@@ -6944,7 +6944,9 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
        if (rc)
                return;
        /* Reset HBA FCF states after successful unregister FCF */
+       spin_lock_irq(&phba->hbalock);
        phba->fcf.fcf_flag = 0;
+       spin_unlock_irq(&phba->hbalock);
        phba->fcf.current_rec.flag = 0;
 
        /*
index d44c4d37b50b45748db1ff09fa08dfab6bdeaadb..4ae38305c15a115730dfdf4615a21aa2c11f49f9 100644 (file)
@@ -4462,7 +4462,6 @@ struct qla_hw_data {
 
        /* n2n */
        struct fc_els_flogi plogi_els_payld;
-#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
 
        void            *swl;
 
index c3dd8dd4f73409ed7d46c7d340dfefdd67e8d311..367fba27fe699310f0b17f1b51a8e87a36522aac 100644 (file)
@@ -8434,7 +8434,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
                ql_dbg(ql_dbg_init, vha, 0x0163,
                    "-> fwdt%u template allocate template %#x words...\n",
                    j, risc_size);
-               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
                if (!fwdt->template) {
                        ql_log(ql_log_warn, vha, 0x0164,
                            "-> fwdt%u failed allocate template.\n", j);
@@ -8689,7 +8689,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
                ql_dbg(ql_dbg_init, vha, 0x0173,
                    "-> fwdt%u template allocate template %#x words...\n",
                    j, risc_size);
-               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
                if (!fwdt->template) {
                        ql_log(ql_log_warn, vha, 0x0174,
                            "-> fwdt%u failed allocate template.\n", j);
index a1675f056a5c250e80c2f2cf1f976296f95d68ef..730d8609276c6dd6657685a78d9f20c427bec8a3 100644 (file)
@@ -3073,7 +3073,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        memset(ptr, 0, sizeof(struct els_plogi_payload));
        memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
        memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
-           &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
+              (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp),
+              sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp));
 
        elsio->u.els_plogi.els_cmd = els_opcode;
        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
@@ -3911,7 +3912,7 @@ qla2x00_start_sp(srb_t *sp)
 
        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
        if (!pkt) {
-               rval = EAGAIN;
+               rval = -EAGAIN;
                ql_log(ql_log_warn, vha, 0x700c,
                    "qla2x00_alloc_iocbs failed.\n");
                goto done;
index 8c58128ad32a29b870039668efcec06fda8aa231..9c0af50501f9ac934b49873899d18e25efe5db5f 100644 (file)
@@ -841,11 +841,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 static int poll_queues; /* iouring iopoll interface.*/
 
-static DEFINE_RWLOCK(atomic_rw);
-static DEFINE_RWLOCK(atomic_rw2);
-
-static rwlock_t *ramdisk_lck_a[2];
-
 static char sdebug_proc_name[] = MY_NAME;
 static const char *my_name = MY_NAME;
 
@@ -6818,9 +6813,6 @@ static int __init scsi_debug_init(void)
        int k, ret, hosts_to_add;
        int idx = -1;
 
-       ramdisk_lck_a[0] = &atomic_rw;
-       ramdisk_lck_a[1] = &atomic_rw2;
-
        if (sdebug_ndelay >= 1000 * 1000 * 1000) {
                pr_warn("ndelay must be less than 1 second, ignored\n");
                sdebug_ndelay = 0;
index abbd08933ac76da8358cdd43c31d29bef1f6aa8f..a25215507668d8e6ae83b91f2acfd799fa5dcbce 100644 (file)
@@ -831,7 +831,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
        struct request_queue *q = disk->queue;
        u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
        unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
-       u32 max_append;
        int ret = 0;
        unsigned int flags;
 
@@ -876,6 +875,11 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
                goto unlock;
        }
 
+       blk_queue_chunk_sectors(q,
+                       logical_to_sectors(sdkp->device, zone_blocks));
+       blk_queue_max_zone_append_sectors(q,
+                       q->limits.max_segments << PAGE_SECTORS_SHIFT);
+
        ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
 
        memalloc_noio_restore(flags);
@@ -888,12 +892,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
                goto unlock;
        }
 
-       max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
-                          q->limits.max_segments << PAGE_SECTORS_SHIFT);
-       max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
-
-       blk_queue_max_zone_append_sectors(q, max_append);
-
        sd_zbc_print_zones(sdkp);
 
 unlock:
index 659196a2f63ad33d3f16e9cb0e26449e78ef46a9..7f12d931fe7c1464c4e39d123666f0444efc5417 100644 (file)
@@ -318,6 +318,7 @@ enum storvsc_request_type {
 #define SRB_STATUS_INVALID_REQUEST     0x06
 #define SRB_STATUS_DATA_OVERRUN                0x12
 #define SRB_STATUS_INVALID_LUN         0x20
+#define SRB_STATUS_INTERNAL_ERROR      0x30
 
 #define SRB_STATUS(status) \
        (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -978,6 +979,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
        case SRB_STATUS_ERROR:
        case SRB_STATUS_ABORTED:
        case SRB_STATUS_INVALID_REQUEST:
+       case SRB_STATUS_INTERNAL_ERROR:
                if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
                        /* Check for capacity change */
                        if ((asc == 0x2a) && (ascq == 0x9)) {
index 9aecb77c3d892da8422a01d511e871f713d5438a..07b5b71b2352031b1b9f3bfd69c0be2f6f95862a 100644 (file)
@@ -126,7 +126,7 @@ enum bcm63xx_regs_spi {
        SPI_MSG_DATA_SIZE,
 };
 
-#define BCM63XX_SPI_MAX_PREPEND                15
+#define BCM63XX_SPI_MAX_PREPEND                7
 
 #define BCM63XX_SPI_MAX_CS             8
 #define BCM63XX_SPI_BUS_NUM            0
index fd55697144cc26790b82acdb5fdcea86451ac909..b6c2659a66ca684d52ca20810b8cbaa615cf14d8 100644 (file)
@@ -684,6 +684,8 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
 
        if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
                val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
+       else
+               val &= ~S3C64XX_SPI_MODE_SELF_LOOPBACK;
 
        writel(val, regs + S3C64XX_SPI_MODE_CFG);
 
index 4d29e8c1014e0d21cb6167d5390e066c36ee04c0..5dda3c65a38e7ed37312cf4e0a66c49f8023082f 100644 (file)
@@ -2,6 +2,7 @@
 menuconfig FB_TFT
        tristate "Support for small TFT LCD display modules"
        depends on FB && SPI
+       depends on FB_DEVICE
        depends on GPIOLIB || COMPILE_TEST
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
index 3a4abf3bae406ff17467c7b83143c9483ef447d7..eac1d570f4372d059f333fdd73c7d9940e5c00e9 100644 (file)
@@ -684,7 +684,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        info->var.transp.offset =  0;
        info->var.transp.length =  0;
 
-       info->flags =              FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->flags =              FBINFO_VIRTFB;
 
        par = info->par;
        par->info = info;
index 55e302a27847d95d286170559680a77bc86de62a..79bcd5bd49380ae3a00096e39eb326b5ecc669d2 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/mm_types.h>
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
-#include <linux/screen_info.h>
 #include <linux/console.h>
 
 #include "sm750.h"
@@ -808,7 +807,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
        info->screen_base = crtc->v_screen;
        pr_debug("screen_base vaddr = %p\n", info->screen_base);
        info->screen_size = line_length * var->yres_virtual;
-       info->flags = FBINFO_FLAG_DEFAULT | 0;
 
        /* set info->fix */
        fix->type = FB_TYPE_PACKED_PIXELS;
index 24b9077a634a60f7e58f611bc419e3e80020c989..44b9e3fe3a41d7576581c6c291c71ffcca316b7c 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pagemap.h>
 #include <linux/console.h>
 #include <linux/platform_device.h>
-#include <linux/screen_info.h>
 
 #include "sm750.h"
 #include "sm750_accel.h"
index 43e6f52c2551f4401369881a407e1120925f1753..eea4d1bd36ce7f7d62cc06292e4e0f5ad0b2b096 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pagemap.h>
 #include <linux/console.h>
 #include <linux/platform_device.h>
-#include <linux/screen_info.h>
 
 #include "sm750.h"
 #include "sm750_cursor.h"
index 55cb00e8b0d1cb3a73403c8467f68a6a6684a738..71247eaf26eef50f1ce9041e607f1152351510a0 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/mtrr.h>
 #endif
 #include <linux/platform_device.h>
-#include <linux/screen_info.h>
 #include <linux/sizes.h>
 
 #include "sm750.h"
index 983fae84d9e80995468c7ff2fd1b8e92bd1537e6..1294467757964e52127bde42f53774487f1dd4c2 100644 (file)
@@ -8520,6 +8520,41 @@ out:
        return ret;
 }
 
+static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
+{
+       int err;
+       struct ufs_query_req *request = NULL;
+       struct ufs_query_res *response = NULL;
+       struct ufs_dev_info *dev_info = &hba->dev_info;
+       struct utp_upiu_query_v4_0 *upiu_data;
+
+       if (dev_info->wspecversion < 0x400)
+               return;
+
+       ufshcd_hold(hba);
+
+       mutex_lock(&hba->dev_cmd.lock);
+
+       ufshcd_init_query(hba, &request, &response,
+                         UPIU_QUERY_OPCODE_WRITE_ATTR,
+                         QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
+
+       request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+
+       upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
+
+       put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
+
+       err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+       if (err)
+               dev_err(hba->dev, "%s: failed to set timestamp %d\n",
+                       __func__, err);
+
+       mutex_unlock(&hba->dev_cmd.lock);
+       ufshcd_release(hba);
+}
+
 /**
  * ufshcd_add_lus - probe and add UFS logical units
  * @hba: per-adapter instance
@@ -8708,6 +8743,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
 
+       ufshcd_set_timestamp_attr(hba);
+
        /* Gear up to HS gear if supported */
        if (hba->max_pwr_info.is_valid) {
                /*
@@ -9749,6 +9786,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
                if (ret)
                        goto set_old_link_state;
+               ufshcd_set_timestamp_attr(hba);
        }
 
        if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
index 16624ba08050f0ab42b4b462cab500d215187bf3..580c8d0bd8bbd812fed5c2363c3af382b7e5511b 100644 (file)
@@ -72,6 +72,7 @@ config SCSI_UFS_QCOM
 config SCSI_UFS_MEDIATEK
        tristate "Mediatek specific hooks to UFS controller platform driver"
        depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
+       depends on RESET_CONTROLLER
        select PHY_MTK_UFS
        select RESET_TI_SYSCON
        help
index 8b2b9ac37c3df5e4184bb46320993d1c4ad2472e..b694d7669d3200b148c5575a468f9464192114c0 100644 (file)
@@ -25,6 +25,8 @@ config VIDEO_NOMODESET
        bool
        default n
 
+source "drivers/auxdisplay/Kconfig"
+
 if HAS_IOMEM
 
 config HAVE_FB_ATMEL
@@ -61,7 +63,7 @@ if VT
        source "drivers/video/console/Kconfig"
 endif
 
-if FB || SGI_NEWPORT_CONSOLE
+if FB_CORE || SGI_NEWPORT_CONSOLE
        source "drivers/video/logo/Kconfig"
 
 endif
index 9a885d398c2289dd5039601a2df9b502a3c19a74..86e1cdc8e369774a9010178e48ca6c90e0458119 100644 (file)
@@ -79,8 +79,8 @@ static const char *const backlight_scale_types[] = {
        [BACKLIGHT_SCALE_NON_LINEAR]    = "non-linear",
 };
 
-#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
-                          defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
+#if defined(CONFIG_FB_CORE) || (defined(CONFIG_FB_CORE_MODULE) && \
+                               defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
 /*
  * fb_notifier_callback
  *
@@ -155,7 +155,7 @@ static inline int backlight_register_fb(struct backlight_device *bd)
 static inline void backlight_unregister_fb(struct backlight_device *bd)
 {
 }
-#endif /* CONFIG_FB */
+#endif /* CONFIG_FB_CORE */
 
 static void backlight_generate_event(struct backlight_device *bd,
                                     enum backlight_update_reason reason)
index 7df25faa07a59f80ebf19557d6b44fbf761896b1..c95a12bf0ce263e015c31a2ade139e6e228cbd1a 100644 (file)
@@ -104,7 +104,7 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight,
 {
        struct bd6107 *bd = bl_get_data(backlight);
 
-       return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev;
+       return !bd->pdata->dev || bd->pdata->dev == info->device;
 }
 
 static const struct backlight_ops bd6107_backlight_ops = {
index 6f78d928f054a11e4ab1e5cb1aeb977ee458867c..d3bea42407f1593b0b8b855a5fb7ed32b83a5685 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/slab.h>
 
 struct gpio_backlight {
-       struct device *fbdev;
+       struct device *dev;
        struct gpio_desc *gpiod;
 };
 
@@ -35,7 +35,7 @@ static int gpio_backlight_check_fb(struct backlight_device *bl,
 {
        struct gpio_backlight *gbl = bl_get_data(bl);
 
-       return gbl->fbdev == NULL || gbl->fbdev == info->dev;
+       return !gbl->dev || gbl->dev == info->device;
 }
 
 static const struct backlight_ops gpio_backlight_ops = {
@@ -59,7 +59,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        if (pdata)
-               gbl->fbdev = pdata->fbdev;
+               gbl->dev = pdata->dev;
 
        def_value = device_property_read_bool(dev, "default-on");
 
index 56695ce67e4801246050ec330bb40b366c01c9d4..1f1d06b4e119a1f05c73e25a6f781be3fa0f15c6 100644 (file)
@@ -67,7 +67,7 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
 {
        struct lv5207lp *lv = bl_get_data(backlight);
 
-       return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev;
+       return !lv->pdata->dev || lv->pdata->dev == info->device;
 }
 
 static const struct backlight_ops lv5207lp_backlight_ops = {
index a2a88d42edf0cdaef1354856f58c7488dff070e2..1b5a319971ed07f148e54d65de16f267a720252f 100644 (file)
@@ -72,7 +72,7 @@ config DUMMY_CONSOLE_ROWS
 
 config FRAMEBUFFER_CONSOLE
        bool "Framebuffer Console support"
-       depends on FB && !UML
+       depends on FB_CORE && !UML
        select VT_HW_CONSOLE_BINDING
        select CRC32
        select FONT_SUPPORT
index 07d6e8dc686bb483c9dc16a4e012db15bcc72c6c..956dd2399cc0bc1477c2a289b102e62b17220ccf 100644 (file)
@@ -448,7 +448,7 @@ static int __init mc68x328fb_init(void)
                fb_info.var.red.offset = fb_info.var.green.offset = fb_info.var.blue.offset = 0;
        }
        fb_info.pseudo_palette = &mc68x328fb_pseudo_palette;
-       fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       fb_info.flags = FBINFO_HWACCEL_YPAN;
 
        if (fb_alloc_cmap(&fb_info.cmap, 256, 0))
                return -ENOMEM;
index 6df9bd09454a26b45559bf49168c166cb016d180..5d93ecc01f6a44d74eb2e101470c5a2098980fca 100644 (file)
@@ -3,13 +3,10 @@
 # fbdev configuration
 #
 
-config FB_NOTIFY
-       bool
-
 menuconfig FB
-       tristate "Support for frame buffer devices"
+       tristate "Support for frame buffer device drivers"
+       select FB_CORE
        select FB_NOTIFY
-       select VIDEO_CMDLINE
        help
          The frame buffer device provides an abstraction for the graphics
          hardware. It represents the frame buffer of some video hardware and
@@ -33,6 +30,12 @@ menuconfig FB
          <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
          information.
 
+         This enables support for native frame buffer device (fbdev) drivers.
+
+         The DRM subsystem provides support for emulated frame buffer devices
+         on top of KMS drivers, but this option allows legacy fbdev drivers to
+         be enabled as well.
+
          Say Y here and to the driver for your graphics board below if you
          are compiling a kernel for a non-x86 architecture.
 
@@ -42,147 +45,10 @@ menuconfig FB
          (e.g. an accelerated X server) and that are not frame buffer
          device-aware may cause unexpected results. If unsure, say N.
 
-config FIRMWARE_EDID
-       bool "Enable firmware EDID"
-       depends on FB
-       help
-         This enables access to the EDID transferred from the firmware.
-         On the i386, this is from the Video BIOS. Enable this if DDC/I2C
-         transfers do not work for your driver and if you are using
-         nvidiafb, i810fb or savagefb.
-
-         In general, choosing Y for this option is safe.  If you
-         experience extremely long delays while booting before you get
-         something on your display, try setting this to N.  Matrox cards in
-         combination with certain motherboards and monitors are known to
-         suffer from this problem.
-
-config FB_DDC
-       tristate
-       depends on FB
-       select I2C_ALGOBIT
-       select I2C
-
-config FB_CFB_FILLRECT
-       tristate
-       depends on FB
-       help
-         Include the cfb_fillrect function for generic software rectangle
-         filling. This is used by drivers that don't provide their own
-         (accelerated) version.
-
-config FB_CFB_COPYAREA
-       tristate
-       depends on FB
-       help
-         Include the cfb_copyarea function for generic software area copying.
-         This is used by drivers that don't provide their own (accelerated)
-         version.
-
-config FB_CFB_IMAGEBLIT
-       tristate
-       depends on FB
-       help
-         Include the cfb_imageblit function for generic software image
-         blitting. This is used by drivers that don't provide their own
-         (accelerated) version.
-
-config FB_CFB_REV_PIXELS_IN_BYTE
-       bool
-       depends on FB
-       help
-         Allow generic frame-buffer functions to work on displays with 1, 2
-         and 4 bits per pixel depths which has opposite order of pixels in
-         byte order to bytes in long order.
-
-config FB_SYS_FILLRECT
-       tristate
-       depends on FB
-       help
-         Include the sys_fillrect function for generic software rectangle
-         filling. This is used by drivers that don't provide their own
-         (accelerated) version and the framebuffer is in system RAM.
-
-config FB_SYS_COPYAREA
-       tristate
-       depends on FB
-       help
-         Include the sys_copyarea function for generic software area copying.
-         This is used by drivers that don't provide their own (accelerated)
-         version and the framebuffer is in system RAM.
-
-config FB_SYS_IMAGEBLIT
-       tristate
-       depends on FB
-       help
-         Include the sys_imageblit function for generic software image
-         blitting. This is used by drivers that don't provide their own
-         (accelerated) version and the framebuffer is in system RAM.
-
-config FB_PROVIDE_GET_FB_UNMAPPED_AREA
-       bool
-       depends on FB
-       help
-         Allow generic frame-buffer to provide get_fb_unmapped_area
-         function to provide shareable character device support on nommu.
-
-menuconfig FB_FOREIGN_ENDIAN
-       bool "Framebuffer foreign endianness support"
-       depends on FB
-       help
-         This menu will let you enable support for the framebuffers with
-         non-native endianness (e.g. Little-Endian framebuffer on a
-         Big-Endian machine). Most probably you don't have such hardware,
-         so it's safe to say "n" here.
-
-choice
-       prompt "Choice endianness support"
-       depends on FB_FOREIGN_ENDIAN
-
-config FB_BOTH_ENDIAN
-       bool "Support for Big- and Little-Endian framebuffers"
-
-config FB_BIG_ENDIAN
-       bool "Support for Big-Endian framebuffers only"
-
-config FB_LITTLE_ENDIAN
-       bool "Support for Little-Endian framebuffers only"
-
-endchoice
-
-config FB_SYS_FOPS
-       tristate
-       depends on FB
-
-config FB_DEFERRED_IO
-       bool
-       depends on FB
-
-config FB_IO_HELPERS
-       bool
-       depends on FB
-       select FB_CFB_COPYAREA
-       select FB_CFB_FILLRECT
-       select FB_CFB_IMAGEBLIT
-
-config FB_SYS_HELPERS
-       bool
-       depends on FB
-       select FB_SYS_COPYAREA
-       select FB_SYS_FILLRECT
-       select FB_SYS_FOPS
-       select FB_SYS_IMAGEBLIT
-
-config FB_SYS_HELPERS_DEFERRED
-       bool
-       depends on FB
-       select FB_DEFERRED_IO
-       select FB_SYS_HELPERS
-
 config FB_HECUBA
        tristate
        depends on FB
-       depends on FB_DEFERRED_IO
+       select FB_SYSMEM_HELPERS_DEFERRED
 
 config FB_SVGALIB
        tristate
@@ -195,47 +61,10 @@ config FB_MACMODES
        tristate
        depends on FB
 
-config FB_BACKLIGHT
-       tristate
-       depends on FB
-       select BACKLIGHT_CLASS_DEVICE
-
-config FB_MODE_HELPERS
-       bool "Enable Video Mode Handling Helpers"
-       depends on FB
-       help
-         This enables functions for handling video modes using the
-         Generalized Timing Formula and the EDID parser. A few drivers rely
-         on this feature such as the radeonfb, rivafb, and the i810fb. If
-         your driver does not take advantage of this feature, choosing Y will
-         just increase the kernel size by about 5K.
-
-config FB_TILEBLITTING
-       bool "Enable Tile Blitting Support"
-       depends on FB
-       help
-         This enables tile blitting.  Tile blitting is a drawing technique
-         where the screen is divided into rectangular sections (tiles), whereas
-         the standard blitting divides the screen into pixels. Because the
-         default drawing element is a tile, drawing functions will be passed
-         parameters in terms of number of tiles instead of number of pixels.
-         For example, to draw a single character, instead of using bitmaps,
-         an index to an array of bitmaps will be used.  To clear or move a
-         rectangular section of a screen, the rectangle will be described in
-         terms of number of tiles in the x- and y-axis.
-
-         This is particularly important to one driver, matroxfb.  If
-         unsure, say N.
-
-comment "Frame buffer hardware drivers"
-       depends on FB
-
 config FB_GRVGA
        tristate "Aeroflex Gaisler framebuffer support"
        depends on FB && SPARC
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
 
@@ -308,9 +137,7 @@ config FB_ARMCLCD
 config FB_ACORN
        bool "Acorn VIDC support"
        depends on (FB = y) && ARM && ARCH_ACORN
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          This is the frame buffer device driver for the Acorn VIDC graphics
          hardware found in Acorn RISC PCs and other ARM-based machines.  If
@@ -348,9 +175,7 @@ config FB_IMX
        depends on FB && HAVE_CLK && HAS_IOMEM
        depends on ARCH_MXC || COMPILE_TEST
        select LCD_CLASS_DEVICE
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
        select VIDEOMODE_HELPERS
 
@@ -397,9 +222,7 @@ config FB_Q40
        bool
        depends on (FB = y) && Q40
        default y
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
 
 config FB_AMIGA
        tristate "Amiga native chipset support"
@@ -440,9 +263,7 @@ config FB_AMIGA_AGA
 config FB_FM2
        bool "Amiga FrameMaster II/Rainbow II support"
        depends on (FB = y) && ZORRO
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          This is the frame buffer device driver for the Amiga FrameMaster
          card from BSC (exhibited 1992 but not shipped as a CBM product).
@@ -478,9 +299,7 @@ config FB_OF
        depends on FB && PPC && (!PPC_PSERIES || PCI)
        depends on !DRM_OFDRM
        select APERTURE_HELPERS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MACMODES
        help
          Say Y if you want support with Open Firmware for your graphics
@@ -500,9 +319,7 @@ config FB_CONTROL
 config FB_PLATINUM
        bool "Apple \"platinum\" display support"
        depends on (FB = y) && PPC_PMAC && PPC32
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MACMODES
        help
          This driver supports a frame buffer for the "platinum" graphics
@@ -511,9 +328,7 @@ config FB_PLATINUM
 config FB_VALKYRIE
        bool "Apple \"valkyrie\" display support"
        depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MACMODES
        help
          This driver supports a frame buffer for the "valkyrie" graphics
@@ -522,9 +337,7 @@ config FB_VALKYRIE
 config FB_CT65550
        bool "Chips 65550 display support"
        depends on (FB = y) && PPC32 && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          This is the frame buffer device driver for the Chips & Technologies
@@ -533,9 +346,7 @@ config FB_CT65550
 config FB_ASILIANT
        bool "Asiliant (Chips) 69000 display support"
        depends on (FB = y) && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          This is the frame buffer device driver for the Asiliant 69030 chipset
@@ -588,9 +399,7 @@ config FB_STI
 config FB_MAC
        bool "Generic Macintosh display support"
        depends on (FB = y) && MAC
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MACMODES
 
 config FB_HP300
@@ -627,9 +436,7 @@ config FB_UVESA
        tristate "Userspace VESA VGA graphics support"
        depends on FB && CONNECTOR
        depends on !UML
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
        help
          This is the frame buffer driver for generic VBE 2.0 compliant
@@ -646,9 +453,7 @@ config FB_VESA
        bool "VESA VGA graphics support"
        depends on (FB = y) && X86
        select APERTURE_HELPERS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select SYSFB
        help
          This is the frame buffer device driver for generic VESA 2.0
@@ -661,9 +466,7 @@ config FB_EFI
        depends on (FB = y) && !IA64 && EFI
        select APERTURE_HELPERS
        select DRM_PANEL_ORIENTATION_QUIRKS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select SYSFB
        help
          This is the EFI frame buffer device driver. If the firmware on
@@ -673,11 +476,6 @@ config FB_EFI
 config FB_N411
        tristate "N411 Apollo/Hecuba devkit support"
        depends on FB && X86 && MMU
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
-       select FB_DEFERRED_IO
        select FB_HECUBA
        help
          This enables support for the Apollo display controller in its
@@ -813,9 +611,7 @@ config FB_XVR500
 config FB_XVR2500
        bool "Sun XVR-2500 3DLABS Wildcat support"
        depends on (FB = y) && PCI && SPARC64
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          This is the framebuffer device for the Sun XVR-2500 and similar
@@ -827,9 +623,7 @@ config FB_XVR2500
 config FB_XVR1000
        bool "Sun XVR-1000 support"
        depends on (FB = y) && SPARC64
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          This is the framebuffer device for the Sun XVR-1000 and similar
          graphics cards.  The driver only works on sparc64 systems where
@@ -862,9 +656,7 @@ config FB_PVR2
 config FB_OPENCORES
        tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
        depends on FB && HAS_DMA
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          This enables support for the OpenCores VGA/LCD core.
 
@@ -891,9 +683,7 @@ config FB_ATMEL
        depends on FB && OF && HAVE_CLK && HAS_IOMEM
        depends on HAVE_FB_ATMEL || COMPILE_TEST
        select FB_BACKLIGHT
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
        select VIDEOMODE_HELPERS
        help
@@ -997,10 +787,8 @@ config FB_RIVA_BACKLIGHT
 config FB_I740
        tristate "Intel740 support"
        depends on FB && PCI
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select VGASTATE
        select VIDEO_NOMODESET
        select FB_DDC
@@ -1278,10 +1066,8 @@ config FB_RADEON_DEBUG
 config FB_ATY128
        tristate "ATI Rage128 display support"
        depends on FB && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
+       select FB_IOMEM_HELPERS
        select FB_MACMODES if PPC_PMAC
        select VIDEO_NOMODESET
        help
@@ -1500,9 +1286,7 @@ config FB_NEOMAGIC
 config FB_KYRO
        tristate "IMG Kyro support"
        depends on FB && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
@@ -1545,9 +1329,8 @@ config FB_3DFX_I2C
 config FB_VOODOO1
        tristate "3Dfx Voodoo Graphics (sst1) support"
        depends on FB && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       depends on FB_DEVICE
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
@@ -1630,9 +1413,7 @@ config FB_PM3
 config FB_CARMINE
        tristate "Fujitsu carmine frame buffer support"
        depends on FB && PCI
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          This is the frame buffer device driver for the Fujitsu Carmine chip.
@@ -1724,9 +1505,7 @@ config FB_HIT
 config FB_PMAG_AA
        tristate "PMAG-AA TURBOchannel framebuffer support"
        depends on FB && TC
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
          used mainly in the MIPS-based DECstation series.
@@ -1734,9 +1513,7 @@ config FB_PMAG_AA
 config FB_PMAG_BA
        tristate "PMAG-BA TURBOchannel framebuffer support"
        depends on FB && TC
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
          used mainly in the MIPS-based DECstation series.
@@ -1744,9 +1521,7 @@ config FB_PMAG_BA
 config FB_PMAGB_B
        tristate "PMAGB-B TURBOchannel framebuffer support"
        depends on FB && TC
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Support for the PMAGB-B TURBOchannel framebuffer card used mainly
          in the MIPS-based DECstation series. The card is currently only
@@ -1755,9 +1530,7 @@ config FB_PMAGB_B
 config FB_MAXINE
        bool "Maxine (Personal DECstation) onboard framebuffer support"
        depends on (FB = y) && MACH_DECSTATION
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Support for the onboard framebuffer (1024x768x8) in the Personal
          DECstation series (Personal DECstation 5000/20, /25, /33, /50,
@@ -1766,9 +1539,7 @@ config FB_MAXINE
 config FB_G364
        bool "G364 frame buffer support"
        depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          The G364 driver is the framebuffer used in MIPS Magnum 4000 and
          Olivetti M700-10 systems.
@@ -1787,9 +1558,7 @@ config FB_PXA168
        tristate "PXA168/910 LCD framebuffer support"
        depends on FB && HAVE_CLK && HAS_IOMEM
        depends on CPU_PXA168 || CPU_PXA910 || COMPILE_TEST
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Frame buffer driver for the built-in LCD controller in the Marvell
          MMP processor.
@@ -1797,9 +1566,7 @@ config FB_PXA168
 config FB_PXA
        tristate "PXA LCD framebuffer support"
        depends on FB && ARCH_PXA
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEOMODE_HELPERS if OF
        select FB_MODE_HELPERS if OF
        help
@@ -1850,10 +1617,8 @@ config PXA3XX_GCU
 config FB_FSL_DIU
        tristate "Freescale DIU framebuffer support"
        depends on FB && FSL_SOC
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select PPC_LIB_RHEAP
        help
          Framebuffer driver for the Freescale SoC DIU
@@ -1862,6 +1627,7 @@ config FB_SH_MOBILE_LCDC
        tristate "SuperH Mobile LCDC framebuffer support"
        depends on FB && HAVE_CLK && HAS_IOMEM
        depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+       depends on FB_DEVICE
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
@@ -1875,9 +1641,7 @@ config FB_S3C
        tristate "Samsung S3C framebuffer support"
        depends on FB && HAVE_CLK && HAS_IOMEM
        depends on ARCH_S3C64XX || COMPILE_TEST
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Frame buffer driver for the built-in FB controller in the Samsung
          SoC line such as the S3C6400 and S3C6410.
@@ -1930,6 +1694,7 @@ config FB_SMSCUFX
 config FB_UDL
        tristate "Displaylink USB Framebuffer support"
        depends on FB && USB
+       depends on FB_DEVICE
        select FB_MODE_HELPERS
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
@@ -1945,9 +1710,7 @@ config FB_UDL
 config FB_IBM_GXT4500
        tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
        depends on FB
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Say Y here to enable support for the IBM GXT4000P/6000P and
@@ -1979,9 +1742,7 @@ config FB_PS3_DEFAULT_SIZE_M
 config FB_XILINX
        tristate "Xilinx frame buffer support"
        depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Include support for the Xilinx ML300/ML403 reference design
          framebuffer. ML300 carries a 640*480 LCD display on the board,
@@ -1991,9 +1752,7 @@ config FB_GOLDFISH
        tristate "Goldfish Framebuffer"
        depends on FB
        depends on GOLDFISH || COMPILE_TEST
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Framebuffer driver for Goldfish Virtual Platform
 
@@ -2005,9 +1764,7 @@ config FB_SH7760
        bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
        depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
                || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Support for the SH7760/SH7763/SH7720/SH7721 integrated
          (D)STN/TFT LCD Controller.
@@ -2020,10 +1777,8 @@ config FB_DA8XX
        tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
        depends on FB && HAVE_CLK && HAS_IOMEM
        depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select FB_CFB_REV_PIXELS_IN_BYTE
+       select FB_IOMEM_HELPERS
        select FB_MODE_HELPERS
        select VIDEOMODE_HELPERS
        help
@@ -2057,11 +1812,7 @@ config FB_VIRTUAL
 config XEN_FBDEV_FRONTEND
        tristate "Xen virtual frame buffer support"
        depends on FB && XEN
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
-       select FB_DEFERRED_IO
+       select FB_SYSMEM_HELPERS_DEFERRED
        select XEN_XENBUS_FRONTEND
        default y
        help
@@ -2072,11 +1823,7 @@ config XEN_FBDEV_FRONTEND
 config FB_METRONOME
        tristate "E-Ink Metronome/8track controller support"
        depends on FB
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
-       select FB_DEFERRED_IO
+       select FB_SYSMEM_HELPERS_DEFERRED
        help
          This driver implements support for the E-Ink Metronome
          controller. The pre-release name for this device was 8track
@@ -2086,9 +1833,7 @@ config FB_MB862XX
        tristate "Fujitsu MB862xx GDC support"
        depends on FB
        depends on PCI || (OF && PPC)
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
@@ -2146,9 +1891,7 @@ config FB_MX3
        tristate "MX3 Framebuffer support"
        depends on FB && MX3_IPU
        select BACKLIGHT_CLASS_DEVICE
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        default y
        help
          This is a framebuffer device for the i.MX31 LCD Controller. So
@@ -2158,11 +1901,7 @@ config FB_MX3
 config FB_BROADSHEET
        tristate "E-Ink Broadsheet/Epson S1D13521 controller support"
        depends on FB && (ARCH_PXA || COMPILE_TEST)
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
-       select FB_DEFERRED_IO
+       select FB_SYSMEM_HELPERS_DEFERRED
        help
          This driver implements support for the E-Ink Broadsheet
          controller. The release name for this device was Epson S1D13521
@@ -2186,9 +1925,7 @@ config FB_SIMPLE
        depends on FB
        depends on !DRM_SIMPLEDRM
        select APERTURE_HELPERS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Say Y if you want support for a simple frame-buffer.
 
@@ -2203,12 +1940,8 @@ config FB_SSD1307
        tristate "Solomon SSD1307 framebuffer support"
        depends on FB && I2C
        depends on GPIOLIB || COMPILE_TEST
-       select FB_SYS_FOPS
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_DEFERRED_IO
        select FB_BACKLIGHT
+       select FB_SYSMEM_HELPERS_DEFERRED
        help
          This driver implements support for the Solomon SSD1307
          OLED controller over I2C.
@@ -2231,3 +1964,5 @@ config FB_SM712
 source "drivers/video/fbdev/omap/Kconfig"
 source "drivers/video/fbdev/omap2/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
+
+source "drivers/video/fbdev/core/Kconfig"
index 1b72edc01cfb58378c00c2a148685f35dfda1aab..163d2c9f951c3a1610f7841a6e5d5a81a2211678 100644 (file)
@@ -605,13 +605,11 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
 
 static const struct fb_ops acornfb_ops = {
        .owner          = THIS_MODULE,
+       FB_IOMEM_DEFAULT_OPS,
        .fb_check_var   = acornfb_check_var,
        .fb_set_par     = acornfb_set_par,
        .fb_setcolreg   = acornfb_setcolreg,
        .fb_pan_display = acornfb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /*
@@ -694,7 +692,7 @@ static void acornfb_init_fbinfo(void)
        first = 0;
 
        fb_info.fbops           = &acornfb_ops;
-       fb_info.flags           = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       fb_info.flags           = FBINFO_HWACCEL_YPAN;
        fb_info.pseudo_palette  = current_par.pseudo_palette;
 
        strcpy(fb_info.fix.id, "Acorn");
index e45338227be6e03e102b2cf06a95d5afffa21d13..24d89e6fb7805791f2d8b52e031ad6cd35595224 100644 (file)
@@ -461,7 +461,6 @@ static int clcdfb_register(struct clcd_fb *fb)
        }
 
        fb->fb.fbops            = &clcdfb_ops;
-       fb->fb.flags            = FBINFO_FLAG_DEFAULT;
        fb->fb.pseudo_palette   = fb->cmap;
 
        strncpy(fb->fb.fix.id, clcd_name, sizeof(fb->fb.fix.id));
index d88265dbebf4cb199934bd88878130e6a087338e..cea782283b9c1fedd8a79feb3eedd6c03323ff6f 100644 (file)
@@ -2427,7 +2427,7 @@ static int amifb_set_par(struct fb_info *info)
                info->fix.ywrapstep = 1;
                info->fix.xpanstep = 0;
                info->fix.ypanstep = 0;
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YWRAP |
+               info->flags = FBINFO_HWACCEL_YWRAP |
                        FBINFO_READS_FAST; /* override SCROLL_REDRAW */
        } else {
                info->fix.ywrapstep = 0;
@@ -2436,7 +2436,7 @@ static int amifb_set_par(struct fb_info *info)
                else
                        info->fix.xpanstep = 16 << maxfmode;
                info->fix.ypanstep = 1;
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+               info->flags = FBINFO_HWACCEL_YPAN;
        }
        return 0;
 }
@@ -3660,7 +3660,6 @@ default_chipset:
        }
 
        info->fbops = &amifb_ops;
-       info->flags = FBINFO_DEFAULT;
        info->device = &pdev->dev;
 
        if (!fb_find_mode(&info->var, info, mode_option, ami_modedb,
index 9aaea3be82811a4ff81f18df78c94262d48cda04..cff11cb04a551aee1c6becc3d9506d1f5d8bb3f4 100644 (file)
@@ -546,7 +546,6 @@ static int arcfb_probe(struct platform_device *dev)
        par->c2io_addr = c2io_addr;
        par->cslut[0] = 0x00;
        par->cslut[1] = 0x06;
-       info->flags = FBINFO_FLAG_DEFAULT;
        spin_lock_init(&par->lock);
        if (irq) {
                par->irq = irq;
index 8383468f5577549cf15e7eae816ce4c7a227c50e..bf3c116684dc5b66cce0dfefea257d1e1495d9de 100644 (file)
@@ -98,12 +98,10 @@ static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
 
 static const struct fb_ops asiliantfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = asiliantfb_check_var,
        .fb_set_par     = asiliantfb_set_par,
        .fb_setcolreg   = asiliantfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /* Calculate the ratios for the dot clocks without using a single long long
@@ -516,7 +514,6 @@ static int init_asiliant(struct fb_info *p, unsigned long addr)
        p->fix.smem_start       = addr;
        p->var                  = asiliantfb_var;
        p->fbops                = &asiliantfb_ops;
-       p->flags                = FBINFO_DEFAULT;
 
        err = fb_alloc_cmap(&p->cmap, 256, 0);
        if (err) {
index 2bc4089865e60ac2a1e10ea3bf2209d755173ac9..c4a420b791b9a99856d366578b5e9c7faa745676 100644 (file)
@@ -3112,7 +3112,6 @@ static int __init atafb_probe(struct platform_device *pdev)
 #ifdef ATAFB_FALCON
        fb_info.pseudo_palette = current_par.hw.falcon.pseudo_palette;
 #endif
-       fb_info.flags = FBINFO_FLAG_DEFAULT;
 
        if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, atafb_modedb,
                          NUM_TOTAL_MODES, &atafb_modedb[defmode],
index 987c5f5f02414be9f9c7e24916e6d7ea27bd4f8a..c75a62287ec43cab5ab2b426a2ec9f77477b28b3 100644 (file)
@@ -806,14 +806,12 @@ static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
 
 static const struct fb_ops atmel_lcdfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = atmel_lcdfb_check_var,
        .fb_set_par     = atmel_lcdfb_set_par,
        .fb_setcolreg   = atmel_lcdfb_setcolreg,
        .fb_blank       = atmel_lcdfb_blank,
        .fb_pan_display = atmel_lcdfb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static irqreturn_t atmel_lcdfb_interrupt(int irq, void *dev_id)
@@ -1059,7 +1057,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
        if (IS_ERR(sinfo->reg_lcd))
                sinfo->reg_lcd = NULL;
 
-       info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+       info->flags = FBINFO_PARTIAL_PAN_OK |
                      FBINFO_HWACCEL_YPAN;
        info->pseudo_palette = sinfo->pseudo_palette;
        info->fbops = &atmel_lcdfb_ops;
index 36a9ac05a340f93ab6f1259001ad2feee6791e24..f4de11f192357701540e7e690f93feb39e44a18e 100644 (file)
@@ -504,6 +504,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power);
 
 static const struct fb_ops aty128fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = aty128fb_check_var,
        .fb_set_par     = aty128fb_set_par,
        .fb_setcolreg   = aty128fb_setcolreg,
@@ -511,9 +512,6 @@ static const struct fb_ops aty128fb_ops = {
        .fb_blank       = aty128fb_blank,
        .fb_ioctl       = aty128fb_ioctl,
        .fb_sync        = aty128fb_sync,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
     /*
@@ -1846,7 +1844,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
-       bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
+       bd = backlight_device_register(name, info->device, par, &aty128_bl_data,
                                       &props);
        if (IS_ERR(bd)) {
                info->bl_dev = NULL;
@@ -1927,7 +1925,6 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* fill in info */
        info->fbops = &aty128fb_ops;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        par->lcd_on = default_lcd_on;
        par->crt_on = default_crt_on;
@@ -2028,14 +2025,14 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
        par->asleep = 0;
        par->lock_blank = 0;
 
+       if (register_framebuffer(info) < 0)
+               return 0;
+
 #ifdef CONFIG_FB_ATY128_BACKLIGHT
        if (backlight)
                aty128_bl_init(par);
 #endif
 
-       if (register_framebuffer(info) < 0)
-               return 0;
-
        fb_info(info, "%s frame buffer device on %s\n",
                info->fix.id, video_card);
 
@@ -2167,12 +2164,12 @@ static void aty128_remove(struct pci_dev *pdev)
 
        par = info->par;
 
-       unregister_framebuffer(info);
-
 #ifdef CONFIG_FB_ATY128_BACKLIGHT
        aty128_bl_exit(info->bl_dev);
 #endif
 
+       unregister_framebuffer(info);
+
        arch_phys_wc_del(par->wc_cookie);
        iounmap(par->regbase);
        iounmap(info->screen_base);
index cba2b113b28b0533418e017e8658138c97d21fd7..5c87817a4f4ce62afe173a2d0859b9c84a226858 100644 (file)
@@ -2255,7 +2255,7 @@ static void aty_bl_init(struct atyfb_par *par)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
-       bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
+       bd = backlight_device_register(name, info->device, par, &aty_bl_data,
                                       &props);
        if (IS_ERR(bd)) {
                info->bl_dev = NULL;
@@ -2637,8 +2637,7 @@ static int aty_init(struct fb_info *info)
 
        info->fbops = &atyfb_ops;
        info->pseudo_palette = par->pseudo_palette;
-       info->flags = FBINFO_DEFAULT           |
-                     FBINFO_HWACCEL_IMAGEBLIT |
+       info->flags = FBINFO_HWACCEL_IMAGEBLIT |
                      FBINFO_HWACCEL_FILLRECT  |
                      FBINFO_HWACCEL_COPYAREA  |
                      FBINFO_HWACCEL_YPAN      |
@@ -2654,11 +2653,6 @@ static int aty_init(struct fb_info *info)
                           USE_F32KHZ | TRISTATE_MEM_EN, par);
        } else
 #endif
-       if (M64_HAS(MOBIL_BUS) && backlight) {
-#ifdef CONFIG_FB_ATY_BACKLIGHT
-               aty_bl_init(par);
-#endif
-       }
 
        memset(&var, 0, sizeof(var));
 #ifdef CONFIG_PPC
@@ -2751,6 +2745,12 @@ static int aty_init(struct fb_info *info)
                goto aty_init_exit;
        }
 
+       if (M64_HAS(MOBIL_BUS) && backlight) {
+#ifdef CONFIG_FB_ATY_BACKLIGHT
+               aty_bl_init(par);
+#endif
+       }
+
        fb_list = info;
 
        PRINTKI("fb%d: %s frame buffer device on %s\n",
@@ -3716,12 +3716,13 @@ static void atyfb_remove(struct fb_info *info)
        aty_set_crtc(par, &par->saved_crtc);
        par->pll_ops->set_pll(info, &par->saved_pll);
 
-       unregister_framebuffer(info);
-
 #ifdef CONFIG_FB_ATY_BACKLIGHT
        if (M64_HAS(MOBIL_BUS))
                aty_bl_exit(info->bl_dev);
 #endif
+
+       unregister_framebuffer(info);
+
        arch_phys_wc_del(par->wc_cookie);
 
 #ifndef __sparc__
index 427adc838f77e20eb3b37f75c1907a30b9785df2..23a38c3f3977e6b765701a2a2d2dce9ad8b31bb4 100644 (file)
@@ -147,7 +147,7 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
-       bd = backlight_device_register(name, rinfo->info->dev, pdata,
+       bd = backlight_device_register(name, rinfo->info->device, pdata,
                                       &radeon_bl_data, &props);
        if (IS_ERR(bd)) {
                rinfo->info->bl_dev = NULL;
index 972c4bbedfa36291b3514db77497fb0f6e97b7cd..93fd1773402c5bcde43da16c56c58813563f4cd9 100644 (file)
@@ -1972,8 +1972,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
 
        info->par = rinfo;
        info->pseudo_palette = rinfo->pseudo_palette;
-       info->flags = FBINFO_DEFAULT
-                   | FBINFO_HWACCEL_COPYAREA
+       info->flags = FBINFO_HWACCEL_COPYAREA
                    | FBINFO_HWACCEL_FILLRECT
                    | FBINFO_HWACCEL_XPAN
                    | FBINFO_HWACCEL_YPAN;
@@ -2517,9 +2516,8 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
 
        del_timer_sync(&rinfo->lvds_timer);
        arch_phys_wc_del(rinfo->wc_cookie);
-        unregister_framebuffer(info);
-
         radeonfb_bl_exit(rinfo);
+       unregister_framebuffer(info);
 
         iounmap(rinfo->mmio_base);
         iounmap(rinfo->fb_base);
index 06403a4fe0e340aba68ffc0c1e566e790fa5b817..e857b15e9f5deb3ae28df0fd2c4775d83706016e 100644 (file)
@@ -970,90 +970,28 @@ static void broadsheetfb_dpy_deferred_io(struct fb_info *info, struct list_head
        }
 }
 
-static void broadsheetfb_fillrect(struct fb_info *info,
-                                  const struct fb_fillrect *rect)
+static void broadsheetfb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
 {
        struct broadsheetfb_par *par = info->par;
 
-       sys_fillrect(info, rect);
-
        broadsheetfb_dpy_update(par);
 }
 
-static void broadsheetfb_copyarea(struct fb_info *info,
-                                  const struct fb_copyarea *area)
+static void broadsheetfb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+                                          u32 width, u32 height)
 {
        struct broadsheetfb_par *par = info->par;
 
-       sys_copyarea(info, area);
-
        broadsheetfb_dpy_update(par);
 }
 
-static void broadsheetfb_imageblit(struct fb_info *info,
-                               const struct fb_image *image)
-{
-       struct broadsheetfb_par *par = info->par;
-
-       sys_imageblit(info, image);
-
-       broadsheetfb_dpy_update(par);
-}
-
-/*
- * this is the slow path from userspace. they can seek and write to
- * the fb. it's inefficient to do anything less than a full screen draw
- */
-static ssize_t broadsheetfb_write(struct fb_info *info, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       struct broadsheetfb_par *par = info->par;
-       unsigned long p = *ppos;
-       void *dst;
-       int err = 0;
-       unsigned long total_size;
-
-       if (!info->screen_buffer)
-               return -ENODEV;
-
-       total_size = info->fix.smem_len;
-
-       if (p > total_size)
-               return -EFBIG;
-
-       if (count > total_size) {
-               err = -EFBIG;
-               count = total_size;
-       }
-
-       if (count + p > total_size) {
-               if (!err)
-                       err = -ENOSPC;
-
-               count = total_size - p;
-       }
-
-       dst = info->screen_buffer + p;
-
-       if (copy_from_user(dst, buf, count))
-               err = -EFAULT;
-
-       if  (!err)
-               *ppos += count;
-
-       broadsheetfb_dpy_update(par);
-
-       return (err) ? err : count;
-}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(broadsheetfb,
+                                  broadsheetfb_defio_damage_range,
+                                  broadsheetfb_defio_damage_area)
 
 static const struct fb_ops broadsheetfb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_read        = fb_sys_read,
-       .fb_write       = broadsheetfb_write,
-       .fb_fillrect    = broadsheetfb_fillrect,
-       .fb_copyarea    = broadsheetfb_copyarea,
-       .fb_imageblit   = broadsheetfb_imageblit,
-       .fb_mmap        = fb_deferred_io_mmap,
+       .owner  = THIS_MODULE,
+       FB_DEFAULT_DEFERRED_OPS(broadsheetfb),
 };
 
 static struct fb_deferred_io broadsheetfb_defio = {
@@ -1131,7 +1069,7 @@ static int broadsheetfb_probe(struct platform_device *dev)
 
        mutex_init(&par->io_lock);
 
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->flags = FBINFO_VIRTFB;
 
        info->fbdefio = &broadsheetfb_defio;
        fb_deferred_io_init(info);
@@ -1200,7 +1138,7 @@ static void broadsheetfb_remove(struct platform_device *dev)
        if (info) {
                struct broadsheetfb_par *par = info->par;
 
-               device_remove_file(info->dev, &dev_attr_loadstore_waveform);
+               device_remove_file(info->device, &dev_attr_loadstore_waveform);
                unregister_framebuffer(info);
                fb_deferred_io_cleanup(info);
                par->board->cleanup(par);
index 025d663dc6fdcae85bc10e8e1813cd5305ca97e5..6da22044cbc547bd02fa37bfc211bbb3c5bfaee1 100644 (file)
@@ -314,7 +314,6 @@ static int bw2_probe(struct platform_device *op)
 
        info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &bw2_ops;
 
        info->screen_base = of_ioremap(&op->resource[0], 0,
index 4ae21dbdb8caac2638fc2e2c68ce4b8651dc171e..e56065cdba974b4f990b9ab24f483cdcc8beddab 100644 (file)
@@ -530,10 +530,7 @@ static int init_hardware(struct carmine_hw *hw)
 
 static const struct fb_ops carminefb_ops = {
        .owner          = THIS_MODULE,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
-
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = carmine_check_var,
        .fb_set_par     = carmine_set_par,
        .fb_setcolreg   = carmine_setcolreg,
@@ -561,7 +558,6 @@ static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
 
        info->fix = carminefb_fix;
        info->pseudo_palette = par->pseudo_palette;
-       info->flags = FBINFO_DEFAULT;
 
        ret = fb_alloc_cmap(&info->cmap, 256, 1);
        if (ret < 0)
index 832a82f45c8096468babc2e0e29d24a081c7f025..b2ecd9ff2a61507695a6628994a85ed1edb40b1a 100644 (file)
@@ -533,7 +533,7 @@ static int cg14_probe(struct platform_device *op)
        par->mode = MDI_8_PIX;
        par->ramsize = (is_8mb ? 0x800000 : 0x400000);
 
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
        info->fbops = &cg14_ops;
 
        __cg14_reset(par);
index 6335cd364c74c8c523d2f2066528309a0e5e12fa..c0e6179c66a14276610f2ba28ab83da5023113cc 100644 (file)
@@ -384,7 +384,6 @@ static int cg3_probe(struct platform_device *op)
        if (!par->regs)
                goto out_release_fb;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &cg3_ops;
        info->screen_base = of_ioremap(&op->resource[0], CG3_RAM_OFFSET,
                                       info->fix.smem_len, "cg3 ram");
index 6884572efea16fafa60adf4a377758bdd7f1eaf3..e6cb55be7d8b2de5aa4ce67c3ff6714474f0e00b 100644 (file)
@@ -782,7 +782,7 @@ static int cg6_probe(struct platform_device *op)
        par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET,
                                sizeof(u32), "cgsix fhc");
 
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT |
+       info->flags = FBINFO_HWACCEL_IMAGEBLIT |
                        FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
                        FBINFO_READS_FAST;
        info->fbops = &cg6_ops;
index 2a27ba94f652b36e97b6765049cb6db3e4ab5fac..b80711f13df8a16888f0e991aca36449fb56cb54 100644 (file)
@@ -82,13 +82,11 @@ static int chipsfb_blank(int blank, struct fb_info *info);
 
 static const struct fb_ops chipsfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = chipsfb_check_var,
        .fb_set_par     = chipsfb_set_par,
        .fb_setcolreg   = chipsfb_setcolreg,
        .fb_blank       = chipsfb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int chipsfb_check_var(struct fb_var_screeninfo *var,
@@ -340,7 +338,6 @@ static void init_chips(struct fb_info *p, unsigned long addr)
        p->var = chipsfb_var;
 
        p->fbops = &chipsfb_ops;
-       p->flags = FBINFO_DEFAULT;
 
        fb_alloc_cmap(&p->cmap, 256, 0);
 
index ba45e2147c52af6ed505ac0c70af662ce374ae06..9d369b6a4dcc7e7f0ba2799fb23e29a2e9414093 100644 (file)
@@ -1978,8 +1978,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
        struct fb_var_screeninfo *var = &info->var;
 
        info->pseudo_palette = cinfo->pseudo_palette;
-       info->flags = FBINFO_DEFAULT
-                   | FBINFO_HWACCEL_XPAN
+       info->flags = FBINFO_HWACCEL_XPAN
                    | FBINFO_HWACCEL_YPAN
                    | FBINFO_HWACCEL_FILLRECT
                    | FBINFO_HWACCEL_IMAGEBLIT
index ac0d058152a3c03eafe3fd4b59593ad443611804..e956c90efcdcc98cfbf8ea82317618ed78094117 100644 (file)
@@ -310,7 +310,6 @@ static int clps711x_fb_probe(struct platform_device *pdev)
        }
 
        info->fbops = &clps711x_fb_ops;
-       info->flags = FBINFO_DEFAULT;
        info->var.activate = FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
        info->var.height = -1;
        info->var.width = -1;
index 3d59a01ec677b6a2acc5c5ca70aa52409cf6bb7b..b94e7c97264cbf73d1f4831bfd2ac8a4ed23c092 100644 (file)
@@ -313,7 +313,6 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
        info->fix.smem_len = info->screen_size;
        info->pseudo_palette = NULL;
        info->par = NULL;
-       info->flags = FBINFO_DEFAULT;
 
        retval = register_framebuffer(info);
        if (retval < 0) {
index 82eeb139c4ebb121a8805857965c9ced6c6b7fd6..717134c141ffe24ba04d0343be3f7315ed0c37bc 100644 (file)
@@ -775,7 +775,7 @@ static void __init control_init_info(struct fb_info *info, struct fb_info_contro
        info->par = &p->par;
        info->fbops = &controlfb_ops;
        info->pseudo_palette = p->pseudo_palette;
-        info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
        info->screen_base = p->frame_buffer + CTRLFB_OFF;
 
        fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
new file mode 100644 (file)
index 0000000..baf7e85
--- /dev/null
@@ -0,0 +1,198 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# fbdev core configuration
+#
+
+config FB_CORE
+       select VIDEO_CMDLINE
+       tristate
+
+config FB_NOTIFY
+       bool
+
+config FIRMWARE_EDID
+       bool "Enable firmware EDID"
+       depends on FB
+       help
+         This enables access to the EDID transferred from the firmware.
+         On the i386, this is from the Video BIOS. Enable this if DDC/I2C
+         transfers do not work for your driver and if you are using
+         nvidiafb, i810fb or savagefb.
+
+         In general, choosing Y for this option is safe.  If you
+         experience extremely long delays while booting before you get
+         something on your display, try setting this to N.  Matrox cards in
+         combination with certain motherboards and monitors are known to
+         suffer from this problem.
+
+config FB_DEVICE
+       bool "Provide legacy /dev/fb* device"
+       depends on FB_CORE
+       default y
+       help
+         Say Y here if you want the legacy /dev/fb* device file and
+         interfaces within sysfs anc procfs. It is only required if you
+         have userspace programs that depend on fbdev for graphics output.
+         This does not affect the framebuffer console. If unsure, say N.
+
+config FB_DDC
+       tristate
+       depends on FB
+       select I2C_ALGOBIT
+       select I2C
+
+config FB_CFB_FILLRECT
+       tristate
+       depends on FB_CORE
+       help
+         Include the cfb_fillrect function for generic software rectangle
+         filling. This is used by drivers that don't provide their own
+         (accelerated) version.
+
+config FB_CFB_COPYAREA
+       tristate
+       depends on FB_CORE
+       help
+         Include the cfb_copyarea function for generic software area copying.
+         This is used by drivers that don't provide their own (accelerated)
+         version.
+
+config FB_CFB_IMAGEBLIT
+       tristate
+       depends on FB_CORE
+       help
+         Include the cfb_imageblit function for generic software image
+         blitting. This is used by drivers that don't provide their own
+         (accelerated) version.
+
+config FB_CFB_REV_PIXELS_IN_BYTE
+       bool
+       depends on FB_CORE
+       help
+         Allow generic frame-buffer functions to work on displays with 1, 2
+         and 4 bits per pixel depths which has opposite order of pixels in
+         byte order to bytes in long order.
+
+config FB_SYS_FILLRECT
+       tristate
+       depends on FB_CORE
+       help
+         Include the sys_fillrect function for generic software rectangle
+         filling. This is used by drivers that don't provide their own
+         (accelerated) version and the framebuffer is in system RAM.
+
+config FB_SYS_COPYAREA
+       tristate
+       depends on FB_CORE
+       help
+         Include the sys_copyarea function for generic software area copying.
+         This is used by drivers that don't provide their own (accelerated)
+         version and the framebuffer is in system RAM.
+
+config FB_SYS_IMAGEBLIT
+       tristate
+       depends on FB_CORE
+       help
+         Include the sys_imageblit function for generic software image
+         blitting. This is used by drivers that don't provide their own
+         (accelerated) version and the framebuffer is in system RAM.
+
+config FB_PROVIDE_GET_FB_UNMAPPED_AREA
+       bool
+       depends on FB
+       help
+         Allow generic frame-buffer to provide get_fb_unmapped_area
+         function to provide shareable character device support on nommu.
+
+menuconfig FB_FOREIGN_ENDIAN
+       bool "Framebuffer foreign endianness support"
+       depends on FB
+       help
+         This menu will let you enable support for the framebuffers with
+         non-native endianness (e.g. Little-Endian framebuffer on a
+         Big-Endian machine). Most probably you don't have such hardware,
+         so it's safe to say "n" here.
+
+choice
+       prompt "Choice endianness support"
+       depends on FB_FOREIGN_ENDIAN
+
+config FB_BOTH_ENDIAN
+       bool "Support for Big- and Little-Endian framebuffers"
+
+config FB_BIG_ENDIAN
+       bool "Support for Big-Endian framebuffers only"
+
+config FB_LITTLE_ENDIAN
+       bool "Support for Little-Endian framebuffers only"
+
+endchoice
+
+config FB_SYS_FOPS
+       tristate
+       depends on FB_CORE
+
+config FB_DEFERRED_IO
+       bool
+       depends on FB_CORE
+
+config FB_DMAMEM_HELPERS
+       bool
+       depends on FB_CORE
+       select FB_SYS_COPYAREA
+       select FB_SYS_FILLRECT
+       select FB_SYS_FOPS
+       select FB_SYS_IMAGEBLIT
+
+config FB_IOMEM_HELPERS
+       bool
+       depends on FB_CORE
+       select FB_CFB_COPYAREA
+       select FB_CFB_FILLRECT
+       select FB_CFB_IMAGEBLIT
+
+config FB_SYSMEM_HELPERS
+       bool
+       depends on FB_CORE
+       select FB_SYS_COPYAREA
+       select FB_SYS_FILLRECT
+       select FB_SYS_FOPS
+       select FB_SYS_IMAGEBLIT
+
+config FB_SYSMEM_HELPERS_DEFERRED
+       bool
+       depends on FB_CORE
+       select FB_DEFERRED_IO
+       select FB_SYSMEM_HELPERS
+
+config FB_BACKLIGHT
+       tristate
+       depends on FB
+       select BACKLIGHT_CLASS_DEVICE
+
+config FB_MODE_HELPERS
+       bool "Enable Video Mode Handling Helpers"
+       depends on FB
+       help
+         This enables functions for handling video modes using the
+         Generalized Timing Formula and the EDID parser. A few drivers rely
+         on this feature such as the radeonfb, rivafb, and the i810fb. If
+         your driver does not take advantage of this feature, choosing Y will
+         just increase the kernel size by about 5K.
+
+config FB_TILEBLITTING
+       bool "Enable Tile Blitting Support"
+       depends on FB
+       help
+         This enables tile blitting.  Tile blitting is a drawing technique
+         where the screen is divided into rectangular sections (tiles), whereas
+         the standard blitting divides the screen into pixels. Because the
+         default drawing element is a tile, drawing functions will be passed
+         parameters in terms of number of tiles instead of number of pixels.
+         For example, to draw a single character, instead of using bitmaps,
+         an index to an array of bitmaps will be used.  To clear or move a
+         rectangular section of a screen, the rectangle will be described in
+         terms of number of tiles in the x- and y-axis.
+
+         This is particularly important to one driver, matroxfb.  If
+         unsure, say N.
index 8f0060160ffb7e2dfe2433f409bd21182f4c9d12..edfde2948e5c873ad23d0d5ca1b7235576665b6e 100644 (file)
@@ -1,9 +1,16 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_FB_NOTIFY)           += fb_notify.o
-obj-$(CONFIG_FB)                  += fb.o
-fb-y                              := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
+obj-$(CONFIG_FB_CORE)             += fb.o
+fb-y                              := fb_info.o \
+                                     fbmem.o fbcmap.o \
                                      modedb.o fbcvt.o fb_cmdline.o fb_io_fops.o
+ifdef CONFIG_FB
+fb-y                              += fb_backlight.o fbmon.o
+endif
 fb-$(CONFIG_FB_DEFERRED_IO)       += fb_defio.o
+fb-$(CONFIG_FB_DEVICE)            += fb_chrdev.o \
+                                     fb_procfs.o \
+                                     fbsysfs.o
 
 ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y)
 fb-y                             += fbcon.o bitblit.o softcursor.o
diff --git a/drivers/video/fbdev/core/fb_backlight.c b/drivers/video/fbdev/core/fb_backlight.c
new file mode 100644 (file)
index 0000000..e2d3b3a
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/export.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+/*
+ * This function generates a linear backlight curve
+ *
+ *     0: off
+ *   1-7: min
+ * 8-127: linear from min to max
+ */
+void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
+{
+       unsigned int i, flat, count, range = (max - min);
+
+       mutex_lock(&fb_info->bl_curve_mutex);
+
+       fb_info->bl_curve[0] = off;
+
+       for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat)
+               fb_info->bl_curve[flat] = min;
+
+       count = FB_BACKLIGHT_LEVELS * 15 / 16;
+       for (i = 0; i < count; ++i)
+               fb_info->bl_curve[flat + i] = min + (range * (i + 1) / count);
+
+       mutex_unlock(&fb_info->bl_curve_mutex);
+}
+EXPORT_SYMBOL_GPL(fb_bl_default_curve);
+#endif
diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c
new file mode 100644 (file)
index 0000000..eadb81f
--- /dev/null
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compat.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/fbcon.h>
+#include <linux/major.h>
+
+#include "fb_internal.h"
+
+/*
+ * We hold a reference to the fb_info in file->private_data,
+ * but if the current registered fb has changed, we don't
+ * actually want to use it.
+ *
+ * So look up the fb_info using the inode minor number,
+ * and just verify it against the reference we have.
+ */
+static struct fb_info *file_fb_info(struct file *file)
+{
+       struct inode *inode = file_inode(file);
+       int fbidx = iminor(inode);
+       struct fb_info *info = registered_fb[fbidx];
+
+       if (info != file->private_data)
+               info = NULL;
+       return info;
+}
+
+static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+       struct fb_info *info = file_fb_info(file);
+
+       if (!info)
+               return -ENODEV;
+
+       if (info->state != FBINFO_STATE_RUNNING)
+               return -EPERM;
+
+       if (info->fbops->fb_read)
+               return info->fbops->fb_read(info, buf, count, ppos);
+
+       return fb_io_read(info, buf, count, ppos);
+}
+
+static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+       struct fb_info *info = file_fb_info(file);
+
+       if (!info)
+               return -ENODEV;
+
+       if (info->state != FBINFO_STATE_RUNNING)
+               return -EPERM;
+
+       if (info->fbops->fb_write)
+               return info->fbops->fb_write(info, buf, count, ppos);
+
+       return fb_io_write(info, buf, count, ppos);
+}
+
+static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+                       unsigned long arg)
+{
+       const struct fb_ops *fb;
+       struct fb_var_screeninfo var;
+       struct fb_fix_screeninfo fix;
+       struct fb_cmap cmap_from;
+       struct fb_cmap_user cmap;
+       void __user *argp = (void __user *)arg;
+       long ret = 0;
+
+       switch (cmd) {
+       case FBIOGET_VSCREENINFO:
+               lock_fb_info(info);
+               var = info->var;
+               unlock_fb_info(info);
+
+               ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0;
+               break;
+       case FBIOPUT_VSCREENINFO:
+               if (copy_from_user(&var, argp, sizeof(var)))
+                       return -EFAULT;
+               /* only for kernel-internal use */
+               var.activate &= ~FB_ACTIVATE_KD_TEXT;
+               console_lock();
+               lock_fb_info(info);
+               ret = fbcon_modechange_possible(info, &var);
+               if (!ret)
+                       ret = fb_set_var(info, &var);
+               if (!ret)
+                       fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
+               unlock_fb_info(info);
+               console_unlock();
+               if (!ret && copy_to_user(argp, &var, sizeof(var)))
+                       ret = -EFAULT;
+               break;
+       case FBIOGET_FSCREENINFO:
+               lock_fb_info(info);
+               memcpy(&fix, &info->fix, sizeof(fix));
+               if (info->flags & FBINFO_HIDE_SMEM_START)
+                       fix.smem_start = 0;
+               unlock_fb_info(info);
+
+               ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
+               break;
+       case FBIOPUTCMAP:
+               if (copy_from_user(&cmap, argp, sizeof(cmap)))
+                       return -EFAULT;
+               ret = fb_set_user_cmap(&cmap, info);
+               break;
+       case FBIOGETCMAP:
+               if (copy_from_user(&cmap, argp, sizeof(cmap)))
+                       return -EFAULT;
+               lock_fb_info(info);
+               cmap_from = info->cmap;
+               unlock_fb_info(info);
+               ret = fb_cmap_to_user(&cmap_from, &cmap);
+               break;
+       case FBIOPAN_DISPLAY:
+               if (copy_from_user(&var, argp, sizeof(var)))
+                       return -EFAULT;
+               console_lock();
+               lock_fb_info(info);
+               ret = fb_pan_display(info, &var);
+               unlock_fb_info(info);
+               console_unlock();
+               if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
+                       return -EFAULT;
+               break;
+       case FBIO_CURSOR:
+               ret = -EINVAL;
+               break;
+       case FBIOGET_CON2FBMAP:
+               ret = fbcon_get_con2fb_map_ioctl(argp);
+               break;
+       case FBIOPUT_CON2FBMAP:
+               ret = fbcon_set_con2fb_map_ioctl(argp);
+               break;
+       case FBIOBLANK:
+               if (arg > FB_BLANK_POWERDOWN)
+                       return -EINVAL;
+               console_lock();
+               lock_fb_info(info);
+               ret = fb_blank(info, arg);
+               /* might again call into fb_blank */
+               fbcon_fb_blanked(info, arg);
+               unlock_fb_info(info);
+               console_unlock();
+               break;
+       default:
+               lock_fb_info(info);
+               fb = info->fbops;
+               if (fb->fb_ioctl)
+                       ret = fb->fb_ioctl(info, cmd, arg);
+               else
+                       ret = -ENOTTY;
+               unlock_fb_info(info);
+       }
+       return ret;
+}
+
+static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct fb_info *info = file_fb_info(file);
+
+       if (!info)
+               return -ENODEV;
+       return do_fb_ioctl(info, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+struct fb_fix_screeninfo32 {
+       char                    id[16];
+       compat_caddr_t          smem_start;
+       u32                     smem_len;
+       u32                     type;
+       u32                     type_aux;
+       u32                     visual;
+       u16                     xpanstep;
+       u16                     ypanstep;
+       u16                     ywrapstep;
+       u32                     line_length;
+       compat_caddr_t          mmio_start;
+       u32                     mmio_len;
+       u32                     accel;
+       u16                     reserved[3];
+};
+
+struct fb_cmap32 {
+       u32                     start;
+       u32                     len;
+       compat_caddr_t  red;
+       compat_caddr_t  green;
+       compat_caddr_t  blue;
+       compat_caddr_t  transp;
+};
+
+static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+                         unsigned long arg)
+{
+       struct fb_cmap32 cmap32;
+       struct fb_cmap cmap_from;
+       struct fb_cmap_user cmap;
+
+       if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32)))
+               return -EFAULT;
+
+       cmap = (struct fb_cmap_user) {
+               .start  = cmap32.start,
+               .len    = cmap32.len,
+               .red    = compat_ptr(cmap32.red),
+               .green  = compat_ptr(cmap32.green),
+               .blue   = compat_ptr(cmap32.blue),
+               .transp = compat_ptr(cmap32.transp),
+       };
+
+       if (cmd == FBIOPUTCMAP)
+               return fb_set_user_cmap(&cmap, info);
+
+       lock_fb_info(info);
+       cmap_from = info->cmap;
+       unlock_fb_info(info);
+
+       return fb_cmap_to_user(&cmap_from, &cmap);
+}
+
+static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
+                                 struct fb_fix_screeninfo32 __user *fix32)
+{
+       __u32 data;
+       int err;
+
+       err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
+
+       data = (__u32) (unsigned long) fix->smem_start;
+       err |= put_user(data, &fix32->smem_start);
+
+       err |= put_user(fix->smem_len, &fix32->smem_len);
+       err |= put_user(fix->type, &fix32->type);
+       err |= put_user(fix->type_aux, &fix32->type_aux);
+       err |= put_user(fix->visual, &fix32->visual);
+       err |= put_user(fix->xpanstep, &fix32->xpanstep);
+       err |= put_user(fix->ypanstep, &fix32->ypanstep);
+       err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
+       err |= put_user(fix->line_length, &fix32->line_length);
+
+       data = (__u32) (unsigned long) fix->mmio_start;
+       err |= put_user(data, &fix32->mmio_start);
+
+       err |= put_user(fix->mmio_len, &fix32->mmio_len);
+       err |= put_user(fix->accel, &fix32->accel);
+       err |= copy_to_user(fix32->reserved, fix->reserved,
+                           sizeof(fix->reserved));
+
+       if (err)
+               return -EFAULT;
+       return 0;
+}
+
+static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
+                             unsigned long arg)
+{
+       struct fb_fix_screeninfo fix;
+
+       lock_fb_info(info);
+       fix = info->fix;
+       if (info->flags & FBINFO_HIDE_SMEM_START)
+               fix.smem_start = 0;
+       unlock_fb_info(info);
+       return do_fscreeninfo_to_user(&fix, compat_ptr(arg));
+}
+
+static long fb_compat_ioctl(struct file *file, unsigned int cmd,
+                           unsigned long arg)
+{
+       struct fb_info *info = file_fb_info(file);
+       const struct fb_ops *fb;
+       long ret = -ENOIOCTLCMD;
+
+       if (!info)
+               return -ENODEV;
+       fb = info->fbops;
+       switch (cmd) {
+       case FBIOGET_VSCREENINFO:
+       case FBIOPUT_VSCREENINFO:
+       case FBIOPAN_DISPLAY:
+       case FBIOGET_CON2FBMAP:
+       case FBIOPUT_CON2FBMAP:
+               arg = (unsigned long) compat_ptr(arg);
+               fallthrough;
+       case FBIOBLANK:
+               ret = do_fb_ioctl(info, cmd, arg);
+               break;
+
+       case FBIOGET_FSCREENINFO:
+               ret = fb_get_fscreeninfo(info, cmd, arg);
+               break;
+
+       case FBIOGETCMAP:
+       case FBIOPUTCMAP:
+               ret = fb_getput_cmap(info, cmd, arg);
+               break;
+
+       default:
+               if (fb->fb_compat_ioctl)
+                       ret = fb->fb_compat_ioctl(info, cmd, arg);
+               break;
+       }
+       return ret;
+}
+#endif
+
+static int fb_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct fb_info *info = file_fb_info(file);
+       unsigned long mmio_pgoff;
+       unsigned long start;
+       u32 len;
+
+       if (!info)
+               return -ENODEV;
+       mutex_lock(&info->mm_lock);
+
+       if (info->fbops->fb_mmap) {
+               int res;
+
+               /*
+                * The framebuffer needs to be accessed decrypted, be sure
+                * SME protection is removed ahead of the call
+                */
+               vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+               res = info->fbops->fb_mmap(info, vma);
+               mutex_unlock(&info->mm_lock);
+               return res;
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+       } else if (info->fbdefio) {
+               /*
+                * FB deferred I/O wants you to handle mmap in your drivers. At a
+                * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
+                */
+               dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
+               mutex_unlock(&info->mm_lock);
+               return -ENODEV;
+#endif
+       }
+
+       /*
+        * Ugh. This can be either the frame buffer mapping, or
+        * if pgoff points past it, the mmio mapping.
+        */
+       start = info->fix.smem_start;
+       len = info->fix.smem_len;
+       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+       if (vma->vm_pgoff >= mmio_pgoff) {
+               if (info->var.accel_flags) {
+                       mutex_unlock(&info->mm_lock);
+                       return -EINVAL;
+               }
+
+               vma->vm_pgoff -= mmio_pgoff;
+               start = info->fix.mmio_start;
+               len = info->fix.mmio_len;
+       }
+       mutex_unlock(&info->mm_lock);
+
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       fb_pgprotect(file, vma, start);
+
+       return vm_iomap_memory(vma, start, len);
+}
+
+static int fb_open(struct inode *inode, struct file *file)
+__acquires(&info->lock)
+__releases(&info->lock)
+{
+       int fbidx = iminor(inode);
+       struct fb_info *info;
+       int res = 0;
+
+       info = get_fb_info(fbidx);
+       if (!info) {
+               request_module("fb%d", fbidx);
+               info = get_fb_info(fbidx);
+               if (!info)
+                       return -ENODEV;
+       }
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       lock_fb_info(info);
+       if (!try_module_get(info->fbops->owner)) {
+               res = -ENODEV;
+               goto out;
+       }
+       file->private_data = info;
+       if (info->fbops->fb_open) {
+               res = info->fbops->fb_open(info, 1);
+               if (res)
+                       module_put(info->fbops->owner);
+       }
+#ifdef CONFIG_FB_DEFERRED_IO
+       if (info->fbdefio)
+               fb_deferred_io_open(info, inode, file);
+#endif
+out:
+       unlock_fb_info(info);
+       if (res)
+               put_fb_info(info);
+       return res;
+}
+
+static int fb_release(struct inode *inode, struct file *file)
+__acquires(&info->lock)
+__releases(&info->lock)
+{
+       struct fb_info * const info = file->private_data;
+
+       lock_fb_info(info);
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+       if (info->fbdefio)
+               fb_deferred_io_release(info);
+#endif
+       if (info->fbops->fb_release)
+               info->fbops->fb_release(info, 1);
+       module_put(info->fbops->owner);
+       unlock_fb_info(info);
+       put_fb_info(info);
+       return 0;
+}
+
+#if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
+static unsigned long get_fb_unmapped_area(struct file *filp,
+                                  unsigned long addr, unsigned long len,
+                                  unsigned long pgoff, unsigned long flags)
+{
+       struct fb_info * const info = filp->private_data;
+       unsigned long fb_size = PAGE_ALIGN(info->fix.smem_len);
+
+       if (pgoff > fb_size || len > fb_size - pgoff)
+               return -EINVAL;
+
+       return (unsigned long)info->screen_base + pgoff;
+}
+#endif
+
+static const struct file_operations fb_fops = {
+       .owner = THIS_MODULE,
+       .read = fb_read,
+       .write = fb_write,
+       .unlocked_ioctl = fb_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = fb_compat_ioctl,
+#endif
+       .mmap = fb_mmap,
+       .open = fb_open,
+       .release = fb_release,
+#if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \
+       (defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && \
+        !defined(CONFIG_MMU))
+       .get_unmapped_area = get_fb_unmapped_area,
+#endif
+#ifdef CONFIG_FB_DEFERRED_IO
+       .fsync = fb_deferred_io_fsync,
+#endif
+       .llseek = default_llseek,
+};
+
+int fb_register_chrdev(void)
+{
+       int ret;
+
+       ret = register_chrdev(FB_MAJOR, "fb", &fb_fops);
+       if (ret) {
+               pr_err("Unable to get major %d for fb devs\n", FB_MAJOR);
+               return ret;
+       }
+
+       return ret;
+}
+
+void fb_unregister_chrdev(void)
+{
+       unregister_chrdev(FB_MAJOR, "fb");
+}
diff --git a/drivers/video/fbdev/core/fb_info.c b/drivers/video/fbdev/core/fb_info.c
new file mode 100644 (file)
index 0000000..4847ebe
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/export.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/**
+ * framebuffer_alloc - creates a new frame buffer info structure
+ *
+ * @size: size of driver private data, can be zero
+ * @dev: pointer to the device for this fb, this can be NULL
+ *
+ * Creates a new frame buffer info structure. Also reserves @size bytes
+ * for driver private data (info->par). info->par (if any) will be
+ * aligned to sizeof(long). The new instances of struct fb_info and
+ * the driver private data are both cleared to zero.
+ *
+ * Returns the new structure, or NULL if an error occurred.
+ *
+ */
+struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
+{
+#define BYTES_PER_LONG (BITS_PER_LONG/8)
+#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
+       int fb_info_size = sizeof(struct fb_info);
+       struct fb_info *info;
+       char *p;
+
+       if (size)
+               fb_info_size += PADDING;
+
+       p = kzalloc(fb_info_size + size, GFP_KERNEL);
+
+       if (!p)
+               return NULL;
+
+       info = (struct fb_info *) p;
+
+       if (size)
+               info->par = p + fb_info_size;
+
+       info->device = dev;
+       info->fbcon_rotate_hint = -1;
+
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+       mutex_init(&info->bl_curve_mutex);
+#endif
+
+       return info;
+#undef PADDING
+#undef BYTES_PER_LONG
+}
+EXPORT_SYMBOL(framebuffer_alloc);
+
+/**
+ * framebuffer_release - marks the structure available for freeing
+ *
+ * @info: frame buffer info structure
+ *
+ * Drop the reference count of the device embedded in the
+ * framebuffer info structure.
+ *
+ */
+void framebuffer_release(struct fb_info *info)
+{
+       if (!info)
+               return;
+
+       if (WARN_ON(refcount_read(&info->count)))
+               return;
+
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+       mutex_destroy(&info->bl_curve_mutex);
+#endif
+
+       kfree(info);
+}
+EXPORT_SYMBOL(framebuffer_release);
diff --git a/drivers/video/fbdev/core/fb_internal.h b/drivers/video/fbdev/core/fb_internal.h
new file mode 100644 (file)
index 0000000..4c8d509
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FB_INTERNAL_H
+#define _FB_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+
+/* fb_devfs.c */
+#if defined(CONFIG_FB_DEVICE)
+int fb_register_chrdev(void);
+void fb_unregister_chrdev(void);
+#else
+static inline int fb_register_chrdev(void)
+{
+       return 0;
+}
+static inline void fb_unregister_chrdev(void)
+{ }
+#endif
+
+/* fbmem.c */
+extern struct class *fb_class;
+extern struct mutex registration_lock;
+extern struct fb_info *registered_fb[FB_MAX];
+extern int num_registered_fb;
+struct fb_info *get_fb_info(unsigned int idx);
+void put_fb_info(struct fb_info *fb_info);
+
+/* fb_procfs.c */
+#if defined(CONFIG_FB_DEVICE)
+int fb_init_procfs(void);
+void fb_cleanup_procfs(void);
+#else
+static inline int fb_init_procfs(void)
+{
+       return 0;
+}
+static inline void fb_cleanup_procfs(void)
+{ }
+#endif
+
+/* fbsysfs.c */
+#if defined(CONFIG_FB_DEVICE)
+int fb_device_create(struct fb_info *fb_info);
+void fb_device_destroy(struct fb_info *fb_info);
+#else
+static inline int fb_device_create(struct fb_info *fb_info)
+{
+       /*
+        * Acquire a reference on the parent device to avoid
+        * unplug operations behind our back. With the fbdev
+        * device enabled, this is performed within register_device().
+        */
+       get_device(fb_info->device);
+
+       return 0;
+}
+static inline void fb_device_destroy(struct fb_info *fb_info)
+{
+       /* Undo the get_device() from fb_device_create() */
+       put_device(fb_info->device);
+}
+#endif
+
+#endif
diff --git a/drivers/video/fbdev/core/fb_procfs.c b/drivers/video/fbdev/core/fb_procfs.c
new file mode 100644 (file)
index 0000000..5964114
--- /dev/null
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/proc_fs.h>
+
+#include "fb_internal.h"
+
+static struct proc_dir_entry *fb_proc_dir_entry;
+
+static void *fb_seq_start(struct seq_file *m, loff_t *pos)
+{
+       mutex_lock(&registration_lock);
+
+       return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static void fb_seq_stop(struct seq_file *m, void *v)
+{
+       mutex_unlock(&registration_lock);
+}
+
+static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+
+       return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static int fb_seq_show(struct seq_file *m, void *v)
+{
+       int i = *(loff_t *)v;
+       struct fb_info *fi = registered_fb[i];
+
+       if (fi)
+               seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
+
+       return 0;
+}
+
+static const struct seq_operations __maybe_unused fb_proc_seq_ops = {
+       .start  = fb_seq_start,
+       .stop   = fb_seq_stop,
+       .next   = fb_seq_next,
+       .show   = fb_seq_show,
+};
+
+int fb_init_procfs(void)
+{
+       struct proc_dir_entry *proc;
+
+       proc = proc_create_seq("fb", 0, NULL, &fb_proc_seq_ops);
+       if (!proc)
+               return -ENOMEM;
+
+       fb_proc_dir_entry = proc;
+
+       return 0;
+}
+
+void fb_cleanup_procfs(void)
+{
+       proc_remove(fb_proc_dir_entry);
+}
index c6c9d040bdec7ca9581faa1656c4c1f892772f43..9f265271b5b9c1f141d99510472ec05829443dc5 100644 (file)
@@ -78,6 +78,7 @@
 #include <asm/irq.h>
 
 #include "fbcon.h"
+#include "fb_internal.h"
 
 /*
  * FIXME: Locking
@@ -102,8 +103,8 @@ enum {
 
 static struct fbcon_display fb_display[MAX_NR_CONSOLES];
 
-struct fb_info *fbcon_registered_fb[FB_MAX];
-int fbcon_num_registered_fb;
+static struct fb_info *fbcon_registered_fb[FB_MAX];
+static int fbcon_num_registered_fb;
 
 #define fbcon_for_each_registered_fb(i)                \
        for (i = 0; WARN_CONSOLE_UNLOCKED(), i < FB_MAX; i++)           \
@@ -576,7 +577,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
                if (scr_readw(r) != vc->vc_video_erase_char)
                        break;
        if (r != q && new_rows >= rows + logo_lines) {
-               save = kzalloc(array3_size(logo_lines, new_cols, 2),
+               save = kmalloc(array3_size(logo_lines, new_cols, 2),
                               GFP_KERNEL);
                if (save) {
                        int i = min(cols, new_cols);
index 329d16e49a9004c18cc76c279ce2530a1657b104..ee44a46a66be1c1037bc47c1ae8049ecac1aaf06 100644 (file)
 
 #include <linux/module.h>
 
-#include <linux/compat.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/major.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/vt.h>
 #include <linux/init.h>
 #include <linux/linux_logo.h>
-#include <linux/proc_fs.h>
 #include <linux/platform_device.h>
-#include <linux/seq_file.h>
 #include <linux/console.h>
 #include <linux/kmod.h>
 #include <linux/err.h>
 #include <video/nomodeset.h>
 #include <video/vga.h>
 
+#include "fb_internal.h"
+
     /*
      *  Frame buffer device initialization and setup routines
      */
 
 #define FBPIXMAPSIZE   (1024 * 8)
 
-static DEFINE_MUTEX(registration_lock);
+struct class *fb_class;
 
+DEFINE_MUTEX(registration_lock);
 struct fb_info *registered_fb[FB_MAX] __read_mostly;
 int num_registered_fb __read_mostly;
 #define for_each_registered_fb(i)              \
@@ -58,7 +57,7 @@ bool fb_center_logo __read_mostly;
 
 int fb_logo_count __read_mostly = -1;
 
-static struct fb_info *get_fb_info(unsigned int idx)
+struct fb_info *get_fb_info(unsigned int idx)
 {
        struct fb_info *fb_info;
 
@@ -74,7 +73,7 @@ static struct fb_info *get_fb_info(unsigned int idx)
        return fb_info;
 }
 
-static void put_fb_info(struct fb_info *fb_info)
+void put_fb_info(struct fb_info *fb_info)
 {
        if (!refcount_dec_and_test(&fb_info->count))
                return;
@@ -703,93 +702,6 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
 EXPORT_SYMBOL(fb_prepare_logo);
 EXPORT_SYMBOL(fb_show_logo);
 
-static void *fb_seq_start(struct seq_file *m, loff_t *pos)
-{
-       mutex_lock(&registration_lock);
-       return (*pos < FB_MAX) ? pos : NULL;
-}
-
-static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       (*pos)++;
-       return (*pos < FB_MAX) ? pos : NULL;
-}
-
-static void fb_seq_stop(struct seq_file *m, void *v)
-{
-       mutex_unlock(&registration_lock);
-}
-
-static int fb_seq_show(struct seq_file *m, void *v)
-{
-       int i = *(loff_t *)v;
-       struct fb_info *fi = registered_fb[i];
-
-       if (fi)
-               seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
-       return 0;
-}
-
-static const struct seq_operations __maybe_unused proc_fb_seq_ops = {
-       .start  = fb_seq_start,
-       .next   = fb_seq_next,
-       .stop   = fb_seq_stop,
-       .show   = fb_seq_show,
-};
-
-/*
- * We hold a reference to the fb_info in file->private_data,
- * but if the current registered fb has changed, we don't
- * actually want to use it.
- *
- * So look up the fb_info using the inode minor number,
- * and just verify it against the reference we have.
- */
-static struct fb_info *file_fb_info(struct file *file)
-{
-       struct inode *inode = file_inode(file);
-       int fbidx = iminor(inode);
-       struct fb_info *info = registered_fb[fbidx];
-
-       if (info != file->private_data)
-               info = NULL;
-       return info;
-}
-
-static ssize_t
-fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
-       struct fb_info *info = file_fb_info(file);
-
-       if (!info)
-               return -ENODEV;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return -EPERM;
-
-       if (info->fbops->fb_read)
-               return info->fbops->fb_read(info, buf, count, ppos);
-
-       return fb_io_read(info, buf, count, ppos);
-}
-
-static ssize_t
-fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-       struct fb_info *info = file_fb_info(file);
-
-       if (!info)
-               return -ENODEV;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return -EPERM;
-
-       if (info->fbops->fb_write)
-               return info->fbops->fb_write(info, buf, count, ppos);
-
-       return fb_io_write(info, buf, count, ppos);
-}
-
 int
 fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
 {
@@ -989,419 +901,6 @@ fb_blank(struct fb_info *info, int blank)
 }
 EXPORT_SYMBOL(fb_blank);
 
-static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
-                       unsigned long arg)
-{
-       const struct fb_ops *fb;
-       struct fb_var_screeninfo var;
-       struct fb_fix_screeninfo fix;
-       struct fb_cmap cmap_from;
-       struct fb_cmap_user cmap;
-       void __user *argp = (void __user *)arg;
-       long ret = 0;
-
-       switch (cmd) {
-       case FBIOGET_VSCREENINFO:
-               lock_fb_info(info);
-               var = info->var;
-               unlock_fb_info(info);
-
-               ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0;
-               break;
-       case FBIOPUT_VSCREENINFO:
-               if (copy_from_user(&var, argp, sizeof(var)))
-                       return -EFAULT;
-               /* only for kernel-internal use */
-               var.activate &= ~FB_ACTIVATE_KD_TEXT;
-               console_lock();
-               lock_fb_info(info);
-               ret = fbcon_modechange_possible(info, &var);
-               if (!ret)
-                       ret = fb_set_var(info, &var);
-               if (!ret)
-                       fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
-               unlock_fb_info(info);
-               console_unlock();
-               if (!ret && copy_to_user(argp, &var, sizeof(var)))
-                       ret = -EFAULT;
-               break;
-       case FBIOGET_FSCREENINFO:
-               lock_fb_info(info);
-               memcpy(&fix, &info->fix, sizeof(fix));
-               if (info->flags & FBINFO_HIDE_SMEM_START)
-                       fix.smem_start = 0;
-               unlock_fb_info(info);
-
-               ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
-               break;
-       case FBIOPUTCMAP:
-               if (copy_from_user(&cmap, argp, sizeof(cmap)))
-                       return -EFAULT;
-               ret = fb_set_user_cmap(&cmap, info);
-               break;
-       case FBIOGETCMAP:
-               if (copy_from_user(&cmap, argp, sizeof(cmap)))
-                       return -EFAULT;
-               lock_fb_info(info);
-               cmap_from = info->cmap;
-               unlock_fb_info(info);
-               ret = fb_cmap_to_user(&cmap_from, &cmap);
-               break;
-       case FBIOPAN_DISPLAY:
-               if (copy_from_user(&var, argp, sizeof(var)))
-                       return -EFAULT;
-               console_lock();
-               lock_fb_info(info);
-               ret = fb_pan_display(info, &var);
-               unlock_fb_info(info);
-               console_unlock();
-               if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
-                       return -EFAULT;
-               break;
-       case FBIO_CURSOR:
-               ret = -EINVAL;
-               break;
-       case FBIOGET_CON2FBMAP:
-               ret = fbcon_get_con2fb_map_ioctl(argp);
-               break;
-       case FBIOPUT_CON2FBMAP:
-               ret = fbcon_set_con2fb_map_ioctl(argp);
-               break;
-       case FBIOBLANK:
-               if (arg > FB_BLANK_POWERDOWN)
-                       return -EINVAL;
-               console_lock();
-               lock_fb_info(info);
-               ret = fb_blank(info, arg);
-               /* might again call into fb_blank */
-               fbcon_fb_blanked(info, arg);
-               unlock_fb_info(info);
-               console_unlock();
-               break;
-       default:
-               lock_fb_info(info);
-               fb = info->fbops;
-               if (fb->fb_ioctl)
-                       ret = fb->fb_ioctl(info, cmd, arg);
-               else
-                       ret = -ENOTTY;
-               unlock_fb_info(info);
-       }
-       return ret;
-}
-
-static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct fb_info *info = file_fb_info(file);
-
-       if (!info)
-               return -ENODEV;
-       return do_fb_ioctl(info, cmd, arg);
-}
-
-#ifdef CONFIG_COMPAT
-struct fb_fix_screeninfo32 {
-       char                    id[16];
-       compat_caddr_t          smem_start;
-       u32                     smem_len;
-       u32                     type;
-       u32                     type_aux;
-       u32                     visual;
-       u16                     xpanstep;
-       u16                     ypanstep;
-       u16                     ywrapstep;
-       u32                     line_length;
-       compat_caddr_t          mmio_start;
-       u32                     mmio_len;
-       u32                     accel;
-       u16                     reserved[3];
-};
-
-struct fb_cmap32 {
-       u32                     start;
-       u32                     len;
-       compat_caddr_t  red;
-       compat_caddr_t  green;
-       compat_caddr_t  blue;
-       compat_caddr_t  transp;
-};
-
-static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
-                         unsigned long arg)
-{
-       struct fb_cmap32 cmap32;
-       struct fb_cmap cmap_from;
-       struct fb_cmap_user cmap;
-
-       if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32)))
-               return -EFAULT;
-
-       cmap = (struct fb_cmap_user) {
-               .start  = cmap32.start,
-               .len    = cmap32.len,
-               .red    = compat_ptr(cmap32.red),
-               .green  = compat_ptr(cmap32.green),
-               .blue   = compat_ptr(cmap32.blue),
-               .transp = compat_ptr(cmap32.transp),
-       };
-
-       if (cmd == FBIOPUTCMAP)
-               return fb_set_user_cmap(&cmap, info);
-
-       lock_fb_info(info);
-       cmap_from = info->cmap;
-       unlock_fb_info(info);
-
-       return fb_cmap_to_user(&cmap_from, &cmap);
-}
-
-static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
-                                 struct fb_fix_screeninfo32 __user *fix32)
-{
-       __u32 data;
-       int err;
-
-       err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
-
-       data = (__u32) (unsigned long) fix->smem_start;
-       err |= put_user(data, &fix32->smem_start);
-
-       err |= put_user(fix->smem_len, &fix32->smem_len);
-       err |= put_user(fix->type, &fix32->type);
-       err |= put_user(fix->type_aux, &fix32->type_aux);
-       err |= put_user(fix->visual, &fix32->visual);
-       err |= put_user(fix->xpanstep, &fix32->xpanstep);
-       err |= put_user(fix->ypanstep, &fix32->ypanstep);
-       err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
-       err |= put_user(fix->line_length, &fix32->line_length);
-
-       data = (__u32) (unsigned long) fix->mmio_start;
-       err |= put_user(data, &fix32->mmio_start);
-
-       err |= put_user(fix->mmio_len, &fix32->mmio_len);
-       err |= put_user(fix->accel, &fix32->accel);
-       err |= copy_to_user(fix32->reserved, fix->reserved,
-                           sizeof(fix->reserved));
-
-       if (err)
-               return -EFAULT;
-       return 0;
-}
-
-static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
-                             unsigned long arg)
-{
-       struct fb_fix_screeninfo fix;
-
-       lock_fb_info(info);
-       fix = info->fix;
-       if (info->flags & FBINFO_HIDE_SMEM_START)
-               fix.smem_start = 0;
-       unlock_fb_info(info);
-       return do_fscreeninfo_to_user(&fix, compat_ptr(arg));
-}
-
-static long fb_compat_ioctl(struct file *file, unsigned int cmd,
-                           unsigned long arg)
-{
-       struct fb_info *info = file_fb_info(file);
-       const struct fb_ops *fb;
-       long ret = -ENOIOCTLCMD;
-
-       if (!info)
-               return -ENODEV;
-       fb = info->fbops;
-       switch(cmd) {
-       case FBIOGET_VSCREENINFO:
-       case FBIOPUT_VSCREENINFO:
-       case FBIOPAN_DISPLAY:
-       case FBIOGET_CON2FBMAP:
-       case FBIOPUT_CON2FBMAP:
-               arg = (unsigned long) compat_ptr(arg);
-               fallthrough;
-       case FBIOBLANK:
-               ret = do_fb_ioctl(info, cmd, arg);
-               break;
-
-       case FBIOGET_FSCREENINFO:
-               ret = fb_get_fscreeninfo(info, cmd, arg);
-               break;
-
-       case FBIOGETCMAP:
-       case FBIOPUTCMAP:
-               ret = fb_getput_cmap(info, cmd, arg);
-               break;
-
-       default:
-               if (fb->fb_compat_ioctl)
-                       ret = fb->fb_compat_ioctl(info, cmd, arg);
-               break;
-       }
-       return ret;
-}
-#endif
-
-static int
-fb_mmap(struct file *file, struct vm_area_struct * vma)
-{
-       struct fb_info *info = file_fb_info(file);
-       unsigned long mmio_pgoff;
-       unsigned long start;
-       u32 len;
-
-       if (!info)
-               return -ENODEV;
-       mutex_lock(&info->mm_lock);
-
-       if (info->fbops->fb_mmap) {
-               int res;
-
-               /*
-                * The framebuffer needs to be accessed decrypted, be sure
-                * SME protection is removed ahead of the call
-                */
-               vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-               res = info->fbops->fb_mmap(info, vma);
-               mutex_unlock(&info->mm_lock);
-               return res;
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
-       } else if (info->fbdefio) {
-               /*
-                * FB deferred I/O wants you to handle mmap in your drivers. At a
-                * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
-                */
-               dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
-               mutex_unlock(&info->mm_lock);
-               return -ENODEV;
-#endif
-       }
-
-       /*
-        * Ugh. This can be either the frame buffer mapping, or
-        * if pgoff points past it, the mmio mapping.
-        */
-       start = info->fix.smem_start;
-       len = info->fix.smem_len;
-       mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
-       if (vma->vm_pgoff >= mmio_pgoff) {
-               if (info->var.accel_flags) {
-                       mutex_unlock(&info->mm_lock);
-                       return -EINVAL;
-               }
-
-               vma->vm_pgoff -= mmio_pgoff;
-               start = info->fix.mmio_start;
-               len = info->fix.mmio_len;
-       }
-       mutex_unlock(&info->mm_lock);
-
-       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       fb_pgprotect(file, vma, start);
-
-       return vm_iomap_memory(vma, start, len);
-}
-
-static int
-fb_open(struct inode *inode, struct file *file)
-__acquires(&info->lock)
-__releases(&info->lock)
-{
-       int fbidx = iminor(inode);
-       struct fb_info *info;
-       int res = 0;
-
-       info = get_fb_info(fbidx);
-       if (!info) {
-               request_module("fb%d", fbidx);
-               info = get_fb_info(fbidx);
-               if (!info)
-                       return -ENODEV;
-       }
-       if (IS_ERR(info))
-               return PTR_ERR(info);
-
-       lock_fb_info(info);
-       if (!try_module_get(info->fbops->owner)) {
-               res = -ENODEV;
-               goto out;
-       }
-       file->private_data = info;
-       if (info->fbops->fb_open) {
-               res = info->fbops->fb_open(info,1);
-               if (res)
-                       module_put(info->fbops->owner);
-       }
-#ifdef CONFIG_FB_DEFERRED_IO
-       if (info->fbdefio)
-               fb_deferred_io_open(info, inode, file);
-#endif
-out:
-       unlock_fb_info(info);
-       if (res)
-               put_fb_info(info);
-       return res;
-}
-
-static int
-fb_release(struct inode *inode, struct file *file)
-__acquires(&info->lock)
-__releases(&info->lock)
-{
-       struct fb_info * const info = file->private_data;
-
-       lock_fb_info(info);
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
-       if (info->fbdefio)
-               fb_deferred_io_release(info);
-#endif
-       if (info->fbops->fb_release)
-               info->fbops->fb_release(info,1);
-       module_put(info->fbops->owner);
-       unlock_fb_info(info);
-       put_fb_info(info);
-       return 0;
-}
-
-#if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
-static unsigned long get_fb_unmapped_area(struct file *filp,
-                                  unsigned long addr, unsigned long len,
-                                  unsigned long pgoff, unsigned long flags)
-{
-       struct fb_info * const info = filp->private_data;
-       unsigned long fb_size = PAGE_ALIGN(info->fix.smem_len);
-
-       if (pgoff > fb_size || len > fb_size - pgoff)
-               return -EINVAL;
-
-       return (unsigned long)info->screen_base + pgoff;
-}
-#endif
-
-static const struct file_operations fb_fops = {
-       .owner =        THIS_MODULE,
-       .read =         fb_read,
-       .write =        fb_write,
-       .unlocked_ioctl = fb_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = fb_compat_ioctl,
-#endif
-       .mmap =         fb_mmap,
-       .open =         fb_open,
-       .release =      fb_release,
-#if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \
-       (defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && \
-        !defined(CONFIG_MMU))
-       .get_unmapped_area = get_fb_unmapped_area,
-#endif
-#ifdef CONFIG_FB_DEFERRED_IO
-       .fsync =        fb_deferred_io_fsync,
-#endif
-       .llseek =       default_llseek,
-};
-
-struct class *fb_class;
-EXPORT_SYMBOL(fb_class);
-
 static int fb_check_foreignness(struct fb_info *fi)
 {
        const bool foreign_endian = fi->flags & FBINFO_FOREIGN_ENDIAN;
@@ -1447,14 +946,7 @@ static int do_register_framebuffer(struct fb_info *fb_info)
        mutex_init(&fb_info->lock);
        mutex_init(&fb_info->mm_lock);
 
-       fb_info->dev = device_create(fb_class, fb_info->device,
-                                    MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
-       if (IS_ERR(fb_info->dev)) {
-               /* Not fatal */
-               printk(KERN_WARNING "Unable to create device for framebuffer %d; errno = %ld\n", i, PTR_ERR(fb_info->dev));
-               fb_info->dev = NULL;
-       } else
-               fb_init_device(fb_info);
+       fb_device_create(fb_info);
 
        if (fb_info->pixmap.addr == NULL) {
                fb_info->pixmap.addr = kmalloc(FBPIXMAPSIZE, GFP_KERNEL);
@@ -1478,9 +970,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
                INIT_LIST_HEAD(&fb_info->modelist);
 
        if (fb_info->skip_vt_switch)
-               pm_vt_switch_required(fb_info->dev, false);
+               pm_vt_switch_required(fb_info->device, false);
        else
-               pm_vt_switch_required(fb_info->dev, true);
+               pm_vt_switch_required(fb_info->device, true);
 
        fb_var_to_videomode(&mode, &fb_info->var);
        fb_add_videomode(&mode, &fb_info->modelist);
@@ -1515,16 +1007,9 @@ static void unlink_framebuffer(struct fb_info *fb_info)
        if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info))
                return;
 
-       if (!fb_info->dev)
-               return;
-
-       device_destroy(fb_class, MKDEV(FB_MAJOR, i));
-
-       pm_vt_switch_unregister(fb_info->dev);
-
+       fb_device_destroy(fb_info);
+       pm_vt_switch_unregister(fb_info->device);
        unbind_console(fb_info);
-
-       fb_info->dev = NULL;
 }
 
 static void do_unregister_framebuffer(struct fb_info *fb_info)
@@ -1539,7 +1024,6 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
        fb_destroy_modelist(&fb_info->modelist);
        registered_fb[fb_info->node] = NULL;
        num_registered_fb--;
-       fb_cleanup_device(fb_info);
 #ifdef CONFIG_GUMSTIX_AM200EPD
        {
                struct fb_event event;
@@ -1623,60 +1107,48 @@ void fb_set_suspend(struct fb_info *info, int state)
 }
 EXPORT_SYMBOL(fb_set_suspend);
 
-/**
- *     fbmem_init - init frame buffer subsystem
- *
- *     Initialize the frame buffer subsystem.
- *
- *     NOTE: This function is _only_ to be called by drivers/char/mem.c.
- *
- */
-
-static int __init
-fbmem_init(void)
+static int __init fbmem_init(void)
 {
        int ret;
 
-       if (!proc_create_seq("fb", 0, NULL, &proc_fb_seq_ops))
-               return -ENOMEM;
-
-       ret = register_chrdev(FB_MAJOR, "fb", &fb_fops);
-       if (ret) {
-               printk("unable to get major %d for fb devs\n", FB_MAJOR);
-               goto err_chrdev;
-       }
-
        fb_class = class_create("graphics");
        if (IS_ERR(fb_class)) {
                ret = PTR_ERR(fb_class);
-               pr_warn("Unable to create fb class; errno = %d\n", ret);
-               fb_class = NULL;
-               goto err_class;
+               pr_err("Unable to create fb class; errno = %d\n", ret);
+               goto err_fb_class;
        }
 
+       ret = fb_init_procfs();
+       if (ret)
+               goto err_class_destroy;
+
+       ret = fb_register_chrdev();
+       if (ret)
+               goto err_fb_cleanup_procfs;
+
        fb_console_init();
 
        return 0;
 
-err_class:
-       unregister_chrdev(FB_MAJOR, "fb");
-err_chrdev:
-       remove_proc_entry("fb", NULL);
+err_fb_cleanup_procfs:
+       fb_cleanup_procfs();
+err_class_destroy:
+       class_destroy(fb_class);
+err_fb_class:
+       fb_class = NULL;
        return ret;
 }
 
 #ifdef MODULE
-module_init(fbmem_init);
-static void __exit
-fbmem_exit(void)
+static void __exit fbmem_exit(void)
 {
        fb_console_exit();
-
-       remove_proc_entry("fb", NULL);
+       fb_unregister_chrdev();
+       fb_cleanup_procfs();
        class_destroy(fb_class);
-       unregister_chrdev(FB_MAJOR, "fb");
 }
 
+module_init(fbmem_init);
 module_exit(fbmem_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Framebuffer base");
index 0c33c4adcd798be80babd9bfc513ece7f588bb4a..fafe574398b01e45976e3cd2c1f6bad38297ad52 100644 (file)
@@ -5,92 +5,14 @@
  * Copyright (c) 2004 James Simmons <jsimmons@infradead.org>
  */
 
-/*
- * Note:  currently there's only stubs for framebuffer_alloc and
- * framebuffer_release here.  The reson for that is that until all drivers
- * are converted to use it a sysfsification will open OOPSable races.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/console.h>
 #include <linux/fb.h>
 #include <linux/fbcon.h>
-#include <linux/console.h>
-#include <linux/module.h>
-
-#define FB_SYSFS_FLAG_ATTR 1
-
-/**
- * framebuffer_alloc - creates a new frame buffer info structure
- *
- * @size: size of driver private data, can be zero
- * @dev: pointer to the device for this fb, this can be NULL
- *
- * Creates a new frame buffer info structure. Also reserves @size bytes
- * for driver private data (info->par). info->par (if any) will be
- * aligned to sizeof(long).
- *
- * Returns the new structure, or NULL if an error occurred.
- *
- */
-struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
-{
-#define BYTES_PER_LONG (BITS_PER_LONG/8)
-#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
-       int fb_info_size = sizeof(struct fb_info);
-       struct fb_info *info;
-       char *p;
-
-       if (size)
-               fb_info_size += PADDING;
-
-       p = kzalloc(fb_info_size + size, GFP_KERNEL);
-
-       if (!p)
-               return NULL;
-
-       info = (struct fb_info *) p;
-
-       if (size)
-               info->par = p + fb_info_size;
-
-       info->device = dev;
-       info->fbcon_rotate_hint = -1;
-
-#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
-       mutex_init(&info->bl_curve_mutex);
-#endif
-
-       return info;
-#undef PADDING
-#undef BYTES_PER_LONG
-}
-EXPORT_SYMBOL(framebuffer_alloc);
+#include <linux/major.h>
 
-/**
- * framebuffer_release - marks the structure available for freeing
- *
- * @info: frame buffer info structure
- *
- * Drop the reference count of the device embedded in the
- * framebuffer info structure.
- *
- */
-void framebuffer_release(struct fb_info *info)
-{
-       if (!info)
-               return;
+#include "fb_internal.h"
 
-       if (WARN_ON(refcount_read(&info->count)))
-               return;
-
-#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
-       mutex_destroy(&info->bl_curve_mutex);
-#endif
-
-       kfree(info);
-}
-EXPORT_SYMBOL(framebuffer_release);
+#define FB_SYSFS_FLAG_ATTR 1
 
 static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
 {
@@ -516,7 +438,7 @@ static struct device_attribute device_attrs[] = {
 #endif
 };
 
-int fb_init_device(struct fb_info *fb_info)
+static int fb_init_device(struct fb_info *fb_info)
 {
        int i, error = 0;
 
@@ -540,7 +462,7 @@ int fb_init_device(struct fb_info *fb_info)
        return 0;
 }
 
-void fb_cleanup_device(struct fb_info *fb_info)
+static void fb_cleanup_device(struct fb_info *fb_info)
 {
        unsigned int i;
 
@@ -552,29 +474,33 @@ void fb_cleanup_device(struct fb_info *fb_info)
        }
 }
 
-#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
-/* This function generates a linear backlight curve
- *
- *     0: off
- *   1-7: min
- * 8-127: linear from min to max
- */
-void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
+int fb_device_create(struct fb_info *fb_info)
 {
-       unsigned int i, flat, count, range = (max - min);
-
-       mutex_lock(&fb_info->bl_curve_mutex);
+       int node = fb_info->node;
+       dev_t devt = MKDEV(FB_MAJOR, node);
+       int ret;
+
+       fb_info->dev = device_create(fb_class, fb_info->device, devt, NULL, "fb%d", node);
+       if (IS_ERR(fb_info->dev)) {
+               /* Not fatal */
+               ret = PTR_ERR(fb_info->dev);
+               pr_warn("Unable to create device for framebuffer %d; error %d\n", node, ret);
+               fb_info->dev = NULL;
+       } else {
+               fb_init_device(fb_info);
+       }
 
-       fb_info->bl_curve[0] = off;
+       return 0;
+}
 
-       for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat)
-               fb_info->bl_curve[flat] = min;
+void fb_device_destroy(struct fb_info *fb_info)
+{
+       dev_t devt = MKDEV(FB_MAJOR, fb_info->node);
 
-       count = FB_BACKLIGHT_LEVELS * 15 / 16;
-       for (i = 0; i < count; ++i)
-               fb_info->bl_curve[flat + i] = min + (range * (i + 1) / count);
+       if (!fb_info->dev)
+               return;
 
-       mutex_unlock(&fb_info->bl_curve_mutex);
+       fb_cleanup_device(fb_info);
+       device_destroy(fb_class, devt);
+       fb_info->dev = NULL;
 }
-EXPORT_SYMBOL_GPL(fb_bl_default_curve);
-#endif
index 38c0a6866d760561b18db6195cf283a7a4a4cb7e..98ea56a9abf1e39e87ac78be83efec6f5e14fa71 100644 (file)
@@ -1459,7 +1459,7 @@ static struct cfb_info *cyberpro_alloc_fb_info(unsigned int id, char *name)
        cfb->fb.var.accel_flags = FB_ACCELF_TEXT;
 
        cfb->fb.fbops           = &cyber2000fb_ops;
-       cfb->fb.flags           = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       cfb->fb.flags           = FBINFO_HWACCEL_YPAN;
        cfb->fb.pseudo_palette  = cfb->pseudo_palette;
 
        spin_lock_init(&cfb->reg_b0_lock);
index 60cd1286370f163b3d885c0dbbdcf9d3b818e135..4ca70a1bdd3b288c225e1621596d66fc18b21de9 100644 (file)
@@ -1295,14 +1295,12 @@ static int da8xxfb_set_par(struct fb_info *info)
 
 static const struct fb_ops da8xx_fb_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var = fb_check_var,
        .fb_set_par = da8xxfb_set_par,
        .fb_setcolreg = fb_setcolreg,
        .fb_pan_display = da8xx_pan_display,
        .fb_ioctl = fb_ioctl,
-       .fb_fillrect = cfb_fillrect,
-       .fb_copyarea = cfb_copyarea,
-       .fb_imageblit = cfb_imageblit,
        .fb_blank = cfb_blank,
 };
 
@@ -1463,7 +1461,6 @@ static int fb_probe(struct platform_device *device)
        da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
 
        /* Initialize fbinfo */
-       da8xx_fb_info->flags = FBINFO_FLAG_DEFAULT;
        da8xx_fb_info->fix = da8xx_fb_fix;
        da8xx_fb_info->var = da8xx_fb_var;
        da8xx_fb_info->fbops = &da8xx_fb_ops;
index 3d7be69ab593fbc6c97f774f1f2fe2f45f510505..f9b4ddd592ce4deecb803d2fcc07b02031dae85f 100644 (file)
@@ -277,11 +277,9 @@ static void efifb_destroy(struct fb_info *info)
 
 static const struct fb_ops efifb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_destroy     = efifb_destroy,
        .fb_setcolreg   = efifb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int efifb_setup(char *options)
@@ -555,7 +553,6 @@ static int efifb_probe(struct platform_device *dev)
        info->fbops = &efifb_ops;
        info->var = efifb_defined;
        info->fix = efifb_fix;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        orientation = drm_get_panel_orientation_quirk(efifb_defined.xres,
                                                      efifb_defined.yres);
index 94fe52928be251eae5d3815bc0e0341ff45883c1..037df9cb9675f770da7fb8395a9d3dff080e1c51 100644 (file)
@@ -312,7 +312,7 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
 
        if (offset < info->fix.smem_len) {
-               return dma_mmap_wc(info->dev, vma, info->screen_base,
+               return dma_mmap_wc(info->device, vma, info->screen_base,
                                   info->fix.smem_start, info->fix.smem_len);
        }
 
@@ -423,7 +423,7 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info)
        /* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
        fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
 
-       virt_addr = dma_alloc_wc(info->dev, fb_size, &phys_addr, GFP_KERNEL);
+       virt_addr = dma_alloc_wc(info->device, fb_size, &phys_addr, GFP_KERNEL);
        if (!virt_addr)
                return -ENOMEM;
 
@@ -436,11 +436,11 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info)
         * least.
         */
        if (check_screenpage_bug && phys_addr & (1 << 27)) {
-               dev_err(info->dev, "ep93xx framebuffer bug. phys addr (0x%x) "
-                       "has bit 27 set: cannot init framebuffer\n",
-                       phys_addr);
+               fb_err(info, "ep93xx framebuffer bug. phys addr (0x%x) "
+                      "has bit 27 set: cannot init framebuffer\n",
+                      phys_addr);
 
-               dma_free_coherent(info->dev, fb_size, virt_addr, phys_addr);
+               dma_free_coherent(info->device, fb_size, virt_addr, phys_addr);
                return -ENOMEM;
        }
 
@@ -454,7 +454,7 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info)
 static void ep93xxfb_dealloc_videomem(struct fb_info *info)
 {
        if (info->screen_base)
-               dma_free_coherent(info->dev, info->fix.smem_len,
+               dma_free_coherent(info->device, info->fix.smem_len,
                                  info->screen_base, info->fix.smem_start);
 }
 
@@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
-       info->dev = &pdev->dev;
        platform_set_drvdata(pdev, info);
        fbi = info->par;
        fbi->mach_info = mach_info;
@@ -516,7 +515,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
        info->fix.accel         = FB_ACCEL_NONE;
        info->var.activate      = FB_ACTIVATE_NOW;
        info->var.vmode         = FB_VMODE_NONINTERLACED;
-       info->flags             = FBINFO_DEFAULT;
        info->node              = -1;
        info->state             = FBINFO_STATE_RUNNING;
        info->pseudo_palette    = &fbi->pseudo_palette;
@@ -525,7 +523,7 @@ static int ep93xxfb_probe(struct platform_device *pdev)
        err = fb_find_mode(&info->var, info, video_mode,
                           NULL, 0, NULL, 16);
        if (err == 0) {
-               dev_err(info->dev, "No suitable video mode found\n");
+               fb_err(info, "No suitable video mode found\n");
                err = -EINVAL;
                goto failed_resource;
        }
@@ -554,8 +552,8 @@ static int ep93xxfb_probe(struct platform_device *pdev)
        if (err)
                goto failed_framebuffer;
 
-       dev_info(info->dev, "registered. Mode = %dx%d-%d\n",
-                info->var.xres, info->var.yres, info->var.bits_per_pixel);
+       fb_info(info, "registered. Mode = %dx%d-%d\n",
+               info->var.xres, info->var.yres, info->var.bits_per_pixel);
        return 0;
 
 failed_framebuffer:
index c6d3111dcbb073da7319b12b1bddb957b7a98869..e4fe13059ad59e54e8f53b373c6cadcb48b629b3 100644 (file)
@@ -929,8 +929,7 @@ static int ffb_probe(struct platform_device *op)
        /* Don't mention copyarea, so SCROLL_REDRAW is always
         * used.  It is the fastest on this chip.
         */
-       info->flags = (FBINFO_DEFAULT |
-                      /* FBINFO_HWACCEL_COPYAREA | */
+       info->flags = (/* FBINFO_HWACCEL_COPYAREA | */
                       FBINFO_HWACCEL_FILLRECT |
                       FBINFO_HWACCEL_IMAGEBLIT);
 
index 942e382cf1cf9fd90120873b8812a252cc863dee..25d2e716edf2d4b7bc94a9bdc44b3d248cd4f798 100644 (file)
@@ -167,11 +167,9 @@ static int fm2fb_blank(int blank, struct fb_info *info);
 
 static const struct fb_ops fm2fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = fm2fb_setcolreg,
        .fb_blank       = fm2fb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
     /*
@@ -280,7 +278,6 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
        info->pseudo_palette = info->par;
        info->par = NULL;
        info->fix = fb_fix;
-       info->flags = FBINFO_DEFAULT;
 
        if (register_framebuffer(info) < 0) {
                fb_dealloc_cmap(&info->cmap);
index 730a07d23fa92a0ff4495b7622a2455b20c6c31b..7fbd9f069ac2ed8c62c463a07b8d8c73d2a11849 100644 (file)
@@ -872,7 +872,7 @@ static int map_video_memory(struct fb_info *info)
 
        p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
        if (!p) {
-               dev_err(info->dev, "unable to allocate fb memory\n");
+               fb_err(info, "unable to allocate fb memory\n");
                return -ENOMEM;
        }
        mutex_lock(&info->mm_lock);
@@ -1145,7 +1145,7 @@ static int fsl_diu_set_par(struct fb_info *info)
 
                /* Memory allocation for framebuffer */
                if (map_video_memory(info)) {
-                       dev_err(info->dev, "unable to allocate fb memory 1\n");
+                       fb_err(info, "unable to allocate fb memory 1\n");
                        return -ENOMEM;
                }
        }
@@ -1277,16 +1277,16 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
        if (!arg)
                return -EINVAL;
 
-       dev_dbg(info->dev, "ioctl %08x (dir=%s%s type=%u nr=%u size=%u)\n", cmd,
+       fb_dbg(info, "ioctl %08x (dir=%s%s type=%u nr=%u size=%u)\n", cmd,
                _IOC_DIR(cmd) & _IOC_READ ? "R" : "",
                _IOC_DIR(cmd) & _IOC_WRITE ? "W" : "",
                _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
 
        switch (cmd) {
        case MFB_SET_PIXFMT_OLD:
-               dev_warn(info->dev,
-                        "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
-                        MFB_SET_PIXFMT_OLD);
+               fb_warn(info,
+                       "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
+                       MFB_SET_PIXFMT_OLD);
                fallthrough;
        case MFB_SET_PIXFMT:
                if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
@@ -1294,9 +1294,9 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
                ad->pix_fmt = pix_fmt;
                break;
        case MFB_GET_PIXFMT_OLD:
-               dev_warn(info->dev,
-                        "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
-                        MFB_GET_PIXFMT_OLD);
+               fb_warn(info,
+                       "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
+                       MFB_GET_PIXFMT_OLD);
                fallthrough;
        case MFB_GET_PIXFMT:
                pix_fmt = ad->pix_fmt;
@@ -1375,7 +1375,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
        }
 #endif
        default:
-               dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd);
+               fb_err(info, "unknown ioctl command (0x%08X)\n", cmd);
                return -ENOIOCTLCMD;
        }
 
@@ -1451,13 +1451,11 @@ static int fsl_diu_release(struct fb_info *info, int user)
 
 static const struct fb_ops fsl_diu_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var = fsl_diu_check_var,
        .fb_set_par = fsl_diu_set_par,
        .fb_setcolreg = fsl_diu_setcolreg,
        .fb_pan_display = fsl_diu_pan_display,
-       .fb_fillrect = cfb_fillrect,
-       .fb_copyarea = cfb_copyarea,
-       .fb_imageblit = cfb_imageblit,
        .fb_ioctl = fsl_diu_ioctl,
        .fb_open = fsl_diu_open,
        .fb_release = fsl_diu_release,
@@ -1476,7 +1474,7 @@ static int install_fb(struct fb_info *info)
 
        info->var.activate = FB_ACTIVATE_NOW;
        info->fbops = &fsl_diu_ops;
-       info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
+       info->flags = FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
                FBINFO_READS_FAST;
        info->pseudo_palette = mfbi->pseudo_palette;
 
@@ -1543,21 +1541,21 @@ static int install_fb(struct fb_info *info)
        }
 
        if (fsl_diu_check_var(&info->var, info)) {
-               dev_err(info->dev, "fsl_diu_check_var failed\n");
+               fb_err(info, "fsl_diu_check_var failed\n");
                unmap_video_memory(info);
                fb_dealloc_cmap(&info->cmap);
                return -EINVAL;
        }
 
        if (register_framebuffer(info) < 0) {
-               dev_err(info->dev, "register_framebuffer failed\n");
+               fb_err(info, "register_framebuffer failed\n");
                unmap_video_memory(info);
                fb_dealloc_cmap(&info->cmap);
                return -EINVAL;
        }
 
        mfbi->registered = 1;
-       dev_info(info->dev, "%s registered successfully\n", mfbi->id);
+       fb_info(info, "%s registered successfully\n", mfbi->id);
 
        return 0;
 }
index c5b7673ddc6caf04f4e46ae0d2dc38f37c01fdb8..7a1013b22fa7172ef3c42371aa08f8dfe961306a 100644 (file)
@@ -112,12 +112,10 @@ static int g364fb_blank(int blank, struct fb_info *info);
 
 static const struct fb_ops g364fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_HELPERS,
        .fb_setcolreg   = g364fb_setcolreg,
        .fb_pan_display = g364fb_pan_display,
        .fb_blank       = g364fb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /*
@@ -219,7 +217,7 @@ int __init g364fb_init(void)
        fb_info.screen_base = (char *) G364_MEM_BASE;   /* virtual kernel address */
        fb_info.var = fb_var;
        fb_info.fix = fb_fix;
-       fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       fb_info.flags = FBINFO_HWACCEL_YPAN;
 
        fb_alloc_cmap(&fb_info.cmap, 255, 0);
 
index 3f141e21b7e00e3a53aa62a047f861ba35aa72c6..4fccdccbc364aa71d42d1571161c26e999b5901e 100644 (file)
@@ -1194,7 +1194,6 @@ static int gbefb_probe(struct platform_device *p_dev)
 
        info->fbops = &gbefb_ops;
        info->pseudo_palette = pseudo_palette;
-       info->flags = FBINFO_DEFAULT;
        info->screen_base = gbe_mem;
        fb_alloc_cmap(&info->cmap, 256, 0);
 
index b184085a78c243636dca31bee29feb37ef569c4c..9a49916e0492961502ff92b491886e2c1bc1c7f5 100644 (file)
@@ -13,9 +13,7 @@ config FB_GEODE
 config FB_GEODE_LX
        tristate "AMD Geode LX framebuffer support"
        depends on FB && FB_GEODE
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Framebuffer driver for the display controller integrated into the
@@ -29,9 +27,7 @@ config FB_GEODE_LX
 config FB_GEODE_GX
        tristate "AMD Geode GX framebuffer support"
        depends on FB && FB_GEODE
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Framebuffer driver for the display controller integrated into the
@@ -45,9 +41,7 @@ config FB_GEODE_GX
 config FB_GEODE_GX1
        tristate "AMD Geode GX1 framebuffer support"
        depends on FB && FB_GEODE
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        select VIDEO_NOMODESET
        help
          Framebuffer driver for the display controller integrated into the
index 9c942001ac10359496cce6168ef2af526c88675a..a1919c1934acaa92fd54c8e3afa54d1cea764b5f 100644 (file)
@@ -255,14 +255,11 @@ static int parse_panel_option(struct fb_info *info)
 
 static const struct fb_ops gx1fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = gx1fb_check_var,
        .fb_set_par     = gx1fb_set_par,
        .fb_setcolreg   = gx1fb_setcolreg,
        .fb_blank       = gx1fb_blank,
-       /* No HW acceleration for now. */
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static struct fb_info *gx1fb_init_fbinfo(struct device *dev)
@@ -294,7 +291,6 @@ static struct fb_info *gx1fb_init_fbinfo(struct device *dev)
        info->var.vmode = FB_VMODE_NONINTERLACED;
 
        info->fbops             = &gx1fb_ops;
-       info->flags             = FBINFO_DEFAULT;
        info->node              = -1;
 
        info->pseudo_palette    = (void *)par + sizeof(struct geodefb_par);
index 8e05e76de0759d47461864dfd55ee04564b7f40d..af996634c1a9c0f43377b9d9cb1320313e700827 100644 (file)
@@ -268,14 +268,11 @@ static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 
 static const struct fb_ops gxfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = gxfb_check_var,
        .fb_set_par     = gxfb_set_par,
        .fb_setcolreg   = gxfb_setcolreg,
        .fb_blank       = gxfb_blank,
-       /* No HW acceleration for now. */
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static struct fb_info *gxfb_init_fbinfo(struct device *dev)
@@ -308,7 +305,6 @@ static struct fb_info *gxfb_init_fbinfo(struct device *dev)
        info->var.vmode = FB_VMODE_NONINTERLACED;
 
        info->fbops             = &gxfb_ops;
-       info->flags             = FBINFO_DEFAULT;
        info->node              = -1;
 
        info->pseudo_palette    = (void *)par + sizeof(struct gxfb_par);
index 556d8b1a9e06aefd6ec34796d9a60963c733f956..cad99f5b7fe82868bb1fbe4116097af7c671edb3 100644 (file)
@@ -392,14 +392,11 @@ static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 
 static const struct fb_ops lxfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = lxfb_check_var,
        .fb_set_par     = lxfb_set_par,
        .fb_setcolreg   = lxfb_setcolreg,
        .fb_blank       = lxfb_blank,
-       /* No HW acceleration for now. */
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static struct fb_info *lxfb_init_fbinfo(struct device *dev)
@@ -432,7 +429,6 @@ static struct fb_info *lxfb_init_fbinfo(struct device *dev)
        info->var.vmode = FB_VMODE_NONINTERLACED;
 
        info->fbops             = &lxfb_ops;
-       info->flags             = FBINFO_DEFAULT;
        info->node              = -1;
 
        info->pseudo_palette    = (void *)par + sizeof(struct lxfb_par);
index 6fa2108fd912de0a4879094417d8b61febc6f2c1..60c8a20d6fcda007ee993fa6203e69f5c86702f9 100644 (file)
@@ -162,14 +162,12 @@ static int goldfish_fb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops goldfish_fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = goldfish_fb_check_var,
        .fb_set_par     = goldfish_fb_set_par,
        .fb_setcolreg   = goldfish_fb_setcolreg,
        .fb_pan_display = goldfish_fb_pan_display,
        .fb_blank       = goldfish_fb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 
@@ -212,7 +210,6 @@ static int goldfish_fb_probe(struct platform_device *pdev)
        height = readl(fb->reg_base + FB_GET_HEIGHT);
 
        fb->fb.fbops            = &goldfish_fb_ops;
-       fb->fb.flags            = FBINFO_FLAG_DEFAULT;
        fb->fb.pseudo_palette   = fb->cmap;
        fb->fb.fix.type         = FB_TYPE_PACKED_PIXELS;
        fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
index 9aa15be29ea9d5920ba4d3f0d6e385eae2146f99..2fa9a274659c965e72396559ddd1cf8c6a7eb3a7 100644 (file)
@@ -253,13 +253,11 @@ static int grvga_pan_display(struct fb_var_screeninfo *var,
 
 static const struct fb_ops grvga_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = grvga_check_var,
        .fb_set_par     = grvga_set_par,
        .fb_setcolreg   = grvga_setcolreg,
        .fb_pan_display = grvga_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit
 };
 
 static int grvga_parse_custom(char *options,
@@ -377,7 +375,7 @@ static int grvga_probe(struct platform_device *dev)
        info->fbops = &grvga_ops;
        info->fix = grvga_fix;
        info->pseudo_palette = par->color_palette;
-       info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
        info->fix.smem_len = grvga_mem_size;
 
        if (!devm_request_mem_region(&dev->dev, dev->resource[0].start,
index 5f42d3d9d6cef66301252a302bc07a59040d3166..15a82c6b609ea7cc9d5152cbbd9f218288f58210 100644 (file)
@@ -602,14 +602,12 @@ static const struct fb_fix_screeninfo gxt4500_fix = {
 
 static const struct fb_ops gxt4500_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var = gxt4500_check_var,
        .fb_set_par = gxt4500_set_par,
        .fb_setcolreg = gxt4500_setcolreg,
        .fb_pan_display = gxt4500_pan_display,
        .fb_blank = gxt4500_blank,
-       .fb_fillrect = cfb_fillrect,
-       .fb_copyarea = cfb_copyarea,
-       .fb_imageblit = cfb_imageblit,
 };
 
 /* PCI functions */
@@ -690,8 +688,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif
 
        info->fbops = &gxt4500_ops;
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_HWACCEL_XPAN |
-                                           FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
 
        err = fb_alloc_cmap(&info->cmap, 256, 0);
        if (err) {
index 7ce0a16ce8b9b133ca794ab53a3051b9aac38478..ef526ed4a2d9ca39aa893b8403666b9e43bdb302 100644 (file)
@@ -120,90 +120,28 @@ static void hecubafb_dpy_deferred_io(struct fb_info *info, struct list_head *pag
        hecubafb_dpy_update(info->par);
 }
 
-static void hecubafb_fillrect(struct fb_info *info,
-                                  const struct fb_fillrect *rect)
+static void hecubafb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
 {
        struct hecubafb_par *par = info->par;
 
-       sys_fillrect(info, rect);
-
        hecubafb_dpy_update(par);
 }
 
-static void hecubafb_copyarea(struct fb_info *info,
-                                  const struct fb_copyarea *area)
+static void hecubafb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+                                      u32 width, u32 height)
 {
        struct hecubafb_par *par = info->par;
 
-       sys_copyarea(info, area);
-
        hecubafb_dpy_update(par);
 }
 
-static void hecubafb_imageblit(struct fb_info *info,
-                               const struct fb_image *image)
-{
-       struct hecubafb_par *par = info->par;
-
-       sys_imageblit(info, image);
-
-       hecubafb_dpy_update(par);
-}
-
-/*
- * this is the slow path from userspace. they can seek and write to
- * the fb. it's inefficient to do anything less than a full screen draw
- */
-static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       struct hecubafb_par *par = info->par;
-       unsigned long p = *ppos;
-       void *dst;
-       int err = 0;
-       unsigned long total_size;
-
-       if (!info->screen_buffer)
-               return -ENODEV;
-
-       total_size = info->fix.smem_len;
-
-       if (p > total_size)
-               return -EFBIG;
-
-       if (count > total_size) {
-               err = -EFBIG;
-               count = total_size;
-       }
-
-       if (count + p > total_size) {
-               if (!err)
-                       err = -ENOSPC;
-
-               count = total_size - p;
-       }
-
-       dst = info->screen_buffer + p;
-
-       if (copy_from_user(dst, buf, count))
-               err = -EFAULT;
-
-       if  (!err)
-               *ppos += count;
-
-       hecubafb_dpy_update(par);
-
-       return (err) ? err : count;
-}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(hecubafb,
+                                  hecubafb_defio_damage_range,
+                                  hecubafb_defio_damage_area)
 
 static const struct fb_ops hecubafb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_read        = fb_sys_read,
-       .fb_write       = hecubafb_write,
-       .fb_fillrect    = hecubafb_fillrect,
-       .fb_copyarea    = hecubafb_copyarea,
-       .fb_imageblit   = hecubafb_imageblit,
-       .fb_mmap        = fb_deferred_io_mmap,
+       .owner  = THIS_MODULE,
+       FB_DEFAULT_DEFERRED_OPS(hecubafb),
 };
 
 static struct fb_deferred_io hecubafb_defio = {
@@ -251,7 +189,7 @@ static int hecubafb_probe(struct platform_device *dev)
        par->send_command = apollo_send_command;
        par->send_data = apollo_send_data;
 
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->flags = FBINFO_VIRTFB;
 
        info->fbdefio = &hecubafb_defio;
        fb_deferred_io_init(info);
index 0af58018441d30aa0dc893be291848c6b1b07019..6a64e6d7255eb6379175d6da292a5f52644cdf38 100644 (file)
@@ -573,7 +573,7 @@ static int hgafb_probe(struct platform_device *pdev)
        hga_fix.smem_start = (unsigned long)hga_vram;
        hga_fix.smem_len = hga_vram_len;
 
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
        info->var = hga_default_var;
        info->fix = hga_fix;
        info->monspecs.hfmin = 0;
index 9fd196637d143894823e1317e3602c614a41bddb..17715eaf06732751b5236ef9af9571474c78311e 100644 (file)
@@ -405,7 +405,7 @@ static int hitfb_probe(struct platform_device *dev)
        info->var = hitfb_var;
        info->fix = hitfb_fix;
        info->pseudo_palette = info->par;
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+       info->flags = FBINFO_HWACCEL_YPAN |
                FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
 
        info->screen_base = (char __iomem *)(uintptr_t)hitfb_fix.smem_start;
index 77fbff47b1a8ed11c1dcd717a7745cad664b466a..406c1383cbda9156b30f55f39872f8a771778daa 100644 (file)
@@ -287,7 +287,6 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
        else
                strcat(fb_info.fix.id, "Catseye");
        fb_info.fbops = &hpfb_ops;
-       fb_info.flags = FBINFO_DEFAULT;
        fb_info.var   = hpfb_defined;
        fb_info.screen_base = (char *)fb_start;
 
index 1ae35ab62b290709ebbaf24c63efbcb235da7289..b9965cbdd7642795cd41474cc0c75a661775ced5 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/aperture.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/screen_info.h>
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/completion.h>
@@ -1158,8 +1159,6 @@ static int hvfb_probe(struct hv_device *hdev,
        }
 
        /* Set up fb_info */
-       info->flags = FBINFO_DEFAULT;
-
        info->var.xres_virtual = info->var.xres = screen_width;
        info->var.yres_virtual = info->var.yres = screen_height;
        info->var.bits_per_pixel = screen_depth;
index 3860b137b86a929089ee22586e2d1a589befc23e..1897e65ab7031dfbf9f3e907ac245661bc88ba18 100644 (file)
@@ -994,14 +994,12 @@ static const struct fb_ops i740fb_ops = {
        .owner          = THIS_MODULE,
        .fb_open        = i740fb_open,
        .fb_release     = i740fb_release,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = i740fb_check_var,
        .fb_set_par     = i740fb_set_par,
        .fb_setcolreg   = i740fb_setcolreg,
        .fb_blank       = i740fb_blank,
        .fb_pan_display = i740fb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /* ------------------------------------------------------------------------- */
@@ -1077,7 +1075,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
        info->fix.mmio_len = pci_resource_len(dev, 1);
        info->fix.smem_start = pci_resource_start(dev, 0);
        info->fix.smem_len = info->screen_size;
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
 
        if (i740fb_setup_ddc_bus(info) == 0) {
                par->ddc_registered = true;
index 85abb65f07d7ddd5a49be5519aa58c33ef47e7cd..f5511bb4fadcabe5c6b300fcae10373d0fdf7441 100644 (file)
@@ -1442,13 +1442,13 @@ static int i810fb_set_par(struct fb_info *info)
        encode_fix(&info->fix, info);
 
        if (info->var.accel_flags && !(par->dev_flags & LOCKUP)) {
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+               info->flags = FBINFO_HWACCEL_YPAN |
                FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
                FBINFO_HWACCEL_IMAGEBLIT;
                info->pixmap.scan_align = 2;
        } else {
                info->pixmap.scan_align = 1;
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+               info->flags = FBINFO_HWACCEL_YPAN;
        }
        return 0;
 }
index ee7d01ad14068687b968a42f1558ed730664e47b..f4c8677488fb883405dc998c874dff9fe53933cb 100644 (file)
@@ -1447,8 +1447,7 @@ static int init_imstt(struct fb_info *info)
        info->var.pixclock = 1000000 / getclkMHz(par);
 
        info->fbops = &imsttfb_ops;
-       info->flags = FBINFO_DEFAULT |
-                      FBINFO_HWACCEL_COPYAREA |
+       info->flags = FBINFO_HWACCEL_COPYAREA |
                      FBINFO_HWACCEL_FILLRECT |
                      FBINFO_HWACCEL_YPAN;
 
index adf36690c342b06099f48e9f18c4c73303629617..ee2b93bd231f969d2e37d3c57ff8941f80bf8e0e 100644 (file)
@@ -580,12 +580,10 @@ static int imxfb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops imxfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = imxfb_check_var,
        .fb_set_par     = imxfb_set_par,
        .fb_setcolreg   = imxfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_blank       = imxfb_blank,
 };
 
@@ -698,8 +696,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
        info->var.vmode                 = FB_VMODE_NONINTERLACED;
 
        info->fbops                     = &imxfb_ops;
-       info->flags                     = FBINFO_FLAG_DEFAULT |
-                                         FBINFO_READS_FAST;
+       info->flags                     = FBINFO_READS_FAST;
 
        np = pdev->dev.of_node;
        info->var.grayscale = of_property_read_bool(np,
index a81095b2b1ea592e6eaabc03f0f880d459e9e1fd..3d334f17195966be46476e718a16ad07f59cb165 100644 (file)
@@ -1098,7 +1098,6 @@ static int intelfb_set_fbinfo(struct intelfb_info *dinfo)
 
        DBG_MSG("intelfb_set_fbinfo\n");
 
-       info->flags = FBINFO_FLAG_DEFAULT;
        info->fbops = &intel_fb_ops;
        info->pseudo_palette = dinfo->pseudo_palette;
 
@@ -1372,11 +1371,11 @@ static int intelfb_set_par(struct fb_info *info)
        intelfb_blank(FB_BLANK_UNBLANK, info);
 
        if (ACCEL(dinfo, info)) {
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+               info->flags = FBINFO_HWACCEL_YPAN |
                FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
                FBINFO_HWACCEL_IMAGEBLIT;
        } else
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+               info->flags = FBINFO_HWACCEL_YPAN;
 
        kfree(hw);
        return 0;
index 3f277bdb3a321618eef99fee24785524d3f5764d..af6c0581d3e211306279a6314b19acecf175ab3b 100644 (file)
@@ -661,13 +661,11 @@ static struct pci_driver kyrofb_pci_driver = {
 
 static const struct fb_ops kyrofb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = kyrofb_check_var,
        .fb_set_par     = kyrofb_set_par,
        .fb_setcolreg   = kyrofb_setcolreg,
        .fb_ioctl       = kyrofb_ioctl,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -716,7 +714,6 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        info->fbops             = &kyrofb_ops;
        info->fix               = kyro_fix;
        info->pseudo_palette    = currentpar->palette;
-       info->flags             = FBINFO_DEFAULT;
 
        SetCoreClockPLL(deviceInfo.pSTGReg, pdev);
 
index 3ffc0a725f89b0f195c836dab5bfec5a6d3326d3..a1a40ea3b22a5ded42fa00e80d7610bd3fcd2c89 100644 (file)
@@ -600,7 +600,6 @@ static int leo_probe(struct platform_device *op)
            !info->screen_base)
                goto out_unmap_regs;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &leo_ops;
        info->pseudo_palette = par->clut_data;
 
index 44ff860a3f3783210d0b54f1444e0aa4e0f8dd05..887fffdccd2451aebed82b3c9eb3d7c1d2a923c9 100644 (file)
@@ -498,10 +498,8 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static const struct fb_ops macfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = macfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static void __init macfb_setup(char *options)
@@ -876,7 +874,6 @@ static int __init macfb_init(void)
        fb_info.var             = macfb_defined;
        fb_info.fix             = macfb_fix;
        fb_info.pseudo_palette  = pseudo_palette;
-       fb_info.flags           = FBINFO_DEFAULT;
 
        err = fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
        if (err)
index 7655afa3fd500b672ff1dae94b59472e5e75528e..372197c124dec4caadd72e27517b9f493eb6115c 100644 (file)
@@ -603,9 +603,8 @@ static int matroxfb_dh_regit(const struct matrox_fb_info *minfo,
        void* oldcrtc2;
 
        m2info->fbcon.fbops = &matroxfb_dh_ops;
-       m2info->fbcon.flags = FBINFO_FLAG_DEFAULT;
-       m2info->fbcon.flags |= FBINFO_HWACCEL_XPAN |
-                              FBINFO_HWACCEL_YPAN;
+       m2info->fbcon.flags = FBINFO_HWACCEL_XPAN |
+                             FBINFO_HWACCEL_YPAN;
        m2info->fbcon.pseudo_palette = m2info->cmap;
        fb_alloc_cmap(&m2info->fbcon.cmap, 256, 1);
 
index 4e6b05232ae2238159734e8d956a832ddd101442..52528eb4dfb4128bf9ca2d76921c914a11212905 100644 (file)
@@ -107,10 +107,8 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static const struct fb_ops maxinefb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = maxinefb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 int __init maxinefb_init(void)
@@ -155,7 +153,6 @@ int __init maxinefb_init(void)
        fb_info.screen_base = (char *)maxinefb_fix.smem_start;
        fb_info.var = maxinefb_defined;
        fb_info.fix = maxinefb_fix;
-       fb_info.flags = FBINFO_DEFAULT;
 
        fb_alloc_cmap(&fb_info.cmap, 256, 0);
 
index b5c8fcab9940dde66550752c2d80d011302be641..05bc51305d94c4f758af6c948b742b6a0bbf3b4f 100644 (file)
@@ -112,8 +112,7 @@ static int mb862xxfb_check_var(struct fb_var_screeninfo *var,
 {
        unsigned long tmp;
 
-       if (fbi->dev)
-               dev_dbg(fbi->dev, "%s\n", __func__);
+       fb_dbg(fbi, "%s\n", __func__);
 
        /* check if these values fit into the registers */
        if (var->hsync_len > 255 || var->vsync_len > 255)
@@ -290,7 +289,7 @@ static int mb862xxfb_blank(int mode, struct fb_info *fbi)
        struct mb862xxfb_par  *par = fbi->par;
        unsigned long reg;
 
-       dev_dbg(fbi->dev, "blank mode=%d\n", mode);
+       fb_dbg(fbi, "blank mode=%d\n", mode);
 
        switch (mode) {
        case FB_BLANK_POWERDOWN:
@@ -408,14 +407,12 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
 /* framebuffer ops */
 static struct fb_ops mb862xxfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = mb862xxfb_check_var,
        .fb_set_par     = mb862xxfb_set_par,
        .fb_setcolreg   = mb862xxfb_setcolreg,
        .fb_blank       = mb862xxfb_blank,
        .fb_pan_display = mb862xxfb_pan,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_ioctl       = mb862xxfb_ioctl,
 };
 
@@ -502,7 +499,7 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
        fbi->var.accel_flags = 0;
        fbi->var.vmode = FB_VMODE_NONINTERLACED;
        fbi->var.activate = FB_ACTIVATE_NOW;
-       fbi->flags = FBINFO_DEFAULT |
+       fbi->flags =
 #ifdef __BIG_ENDIAN
                     FBINFO_FOREIGN_ENDIAN |
 #endif
@@ -791,7 +788,7 @@ static void of_platform_mb862xx_remove(struct platform_device *ofdev)
        resource_size_t res_size = resource_size(par->res);
        unsigned long reg;
 
-       dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
+       fb_dbg(fbi, "%s release\n", fbi->fix.id);
 
        /* display off */
        reg = inreg(disp, GC_DCM1);
@@ -1138,7 +1135,7 @@ static void mb862xx_pci_remove(struct pci_dev *pdev)
        struct mb862xxfb_par *par = fbi->par;
        unsigned long reg;
 
-       dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
+       fb_dbg(fbi, "%s release\n", fbi->fix.id);
 
        /* display off */
        reg = inreg(disp, GC_DCM1);
index ebdb4949c4ace1fed14706e4ba542e95e38aeaa3..130394616a7ca58f4e25deb3d402c74241df459b 100644 (file)
@@ -181,7 +181,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
        int mem_idx = 0;
        struct waveform_hdr *wfm_hdr;
        u8 *metromem = par->metromem_wfm;
-       struct device *dev = par->info->dev;
+       struct device *dev = par->info->device;
 
        if (user_wfm_size)
                epd_frame_table[par->dt].wfm_size = user_wfm_size;
@@ -483,86 +483,28 @@ static void metronomefb_dpy_deferred_io(struct fb_info *info, struct list_head *
        metronome_display_cmd(par);
 }
 
-static void metronomefb_fillrect(struct fb_info *info,
-                                  const struct fb_fillrect *rect)
+static void metronomefb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
 {
        struct metronomefb_par *par = info->par;
 
-       sys_fillrect(info, rect);
        metronomefb_dpy_update(par);
 }
 
-static void metronomefb_copyarea(struct fb_info *info,
-                                  const struct fb_copyarea *area)
+static void metronomefb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+                                         u32 width, u32 height)
 {
        struct metronomefb_par *par = info->par;
 
-       sys_copyarea(info, area);
        metronomefb_dpy_update(par);
 }
 
-static void metronomefb_imageblit(struct fb_info *info,
-                               const struct fb_image *image)
-{
-       struct metronomefb_par *par = info->par;
-
-       sys_imageblit(info, image);
-       metronomefb_dpy_update(par);
-}
-
-/*
- * this is the slow path from userspace. they can seek and write to
- * the fb. it is based on fb_sys_write
- */
-static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       struct metronomefb_par *par = info->par;
-       unsigned long p = *ppos;
-       void *dst;
-       int err = 0;
-       unsigned long total_size;
-
-       if (!info->screen_buffer)
-               return -ENODEV;
-
-       total_size = info->fix.smem_len;
-
-       if (p > total_size)
-               return -EFBIG;
-
-       if (count > total_size) {
-               err = -EFBIG;
-               count = total_size;
-       }
-
-       if (count + p > total_size) {
-               if (!err)
-                       err = -ENOSPC;
-
-               count = total_size - p;
-       }
-
-       dst = info->screen_buffer + p;
-
-       if (copy_from_user(dst, buf, count))
-               err = -EFAULT;
-
-       if  (!err)
-               *ppos += count;
-
-       metronomefb_dpy_update(par);
-
-       return (err) ? err : count;
-}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(metronomefb,
+                                  metronomefb_defio_damage_range,
+                                  metronomefb_defio_damage_area)
 
 static const struct fb_ops metronomefb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_write       = metronomefb_write,
-       .fb_fillrect    = metronomefb_fillrect,
-       .fb_copyarea    = metronomefb_copyarea,
-       .fb_imageblit   = metronomefb_imageblit,
-       .fb_mmap        = fb_deferred_io_mmap,
+       .owner  = THIS_MODULE,
+       FB_DEFAULT_DEFERRED_OPS(metronomefb),
 };
 
 static struct fb_deferred_io metronomefb_defio = {
@@ -700,7 +642,7 @@ static int metronomefb_probe(struct platform_device *dev)
        if (retval < 0)
                goto err_free_irq;
 
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->flags = FBINFO_VIRTFB;
 
        info->fbdefio = &metronomefb_defio;
        fb_deferred_io_init(info);
index 0ec2e3fb9e17582355c9dc3092093269f6e7e1e8..b13882b34e79b9f1655645182f8c44d27cdf9def 100644 (file)
@@ -2,9 +2,7 @@
 config MMP_FB
        tristate "fb driver for Marvell MMP Display Subsystem"
        depends on FB
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        default y
        help
                fb driver for Marvell MMP Display Subsystem
index 39ebbe026ddf5cc34e1bbf31f5b67d66b971fa85..42a87474bceadf8139465c0c1147cbbc89622159 100644 (file)
@@ -454,14 +454,12 @@ static int mmpfb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops mmpfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_blank       = mmpfb_blank,
        .fb_check_var   = mmpfb_check_var,
        .fb_set_par     = mmpfb_set_par,
        .fb_setcolreg   = mmpfb_setcolreg,
        .fb_pan_display = mmpfb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int modes_setup(struct mmpfb_info *fbi)
@@ -502,7 +500,7 @@ static int fb_info_setup(struct fb_info *info,
 {
        int ret = 0;
        /* Initialise static fb parameters.*/
-       info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+       info->flags = FBINFO_PARTIAL_PAN_OK |
                FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
        info->node = -1;
        strcpy(info->fix.id, fbi->name);
index 63c186e0364aab4e39dfa970bf90f9fd9f4cc773..e6bafaba3460bbba3b3f07120622fc4694a23744 100644 (file)
@@ -1247,13 +1247,11 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
  */
 static const struct fb_ops mx3fb_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_set_par = mx3fb_set_par,
        .fb_check_var = mx3fb_check_var,
        .fb_setcolreg = mx3fb_setcolreg,
        .fb_pan_display = mx3fb_pan_display,
-       .fb_fillrect = cfb_fillrect,
-       .fb_copyarea = cfb_copyarea,
-       .fb_imageblit = cfb_imageblit,
        .fb_blank = mx3fb_blank,
 };
 
@@ -1406,7 +1404,6 @@ static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
        fbi->var.activate       = FB_ACTIVATE_NOW;
 
        fbi->fbops              = ops;
-       fbi->flags              = FBINFO_FLAG_DEFAULT;
        fbi->pseudo_palette     = mx3fbi->pseudo_palette;
 
        mutex_init(&mx3fbi->mutex);
index 39d8cdef5c97ac99a2254fda7b5fbba719a80a4b..d2f622b4c372a368ec0ee3e086ec9fb393c820ec 100644 (file)
@@ -1944,7 +1944,7 @@ static struct fb_info *neo_alloc_fb_info(struct pci_dev *dev,
 
        par->internal_display = internal;
        par->external_display = external;
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
 
        switch (info->fix.accel) {
        case FB_ACCEL_NEOMAGIC_NM2070:
index 503a7a683855aaaf8610d6216d5b4dd6d036d305..160da9c50a52c95357f66510cf44b12c6ad2e5cc 100644 (file)
@@ -98,7 +98,7 @@ void nvidia_bl_init(struct nvidia_par *par)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
-       bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
+       bd = backlight_device_register(name, info->device, par, &nvidia_bl_ops,
                                       &props);
        if (IS_ERR(bd)) {
                info->bl_dev = NULL;
index ea4ba3dfb96bb51d149c0e288286269bc1a1ad0f..907c22408652bd2bf4ce52d96a30bdfd431b1f81 100644 (file)
@@ -1111,8 +1111,8 @@ static int nvidia_set_fbinfo(struct fb_info *info)
        int lpitch;
 
        NVTRACE_ENTER();
-       info->flags = FBINFO_DEFAULT
-           | FBINFO_HWACCEL_IMAGEBLIT
+       info->flags =
+             FBINFO_HWACCEL_IMAGEBLIT
            | FBINFO_HWACCEL_FILLRECT
            | FBINFO_HWACCEL_COPYAREA
            | FBINFO_HWACCEL_YPAN;
@@ -1400,14 +1400,14 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
 
        pci_set_drvdata(pd, info);
 
-       if (backlight)
-               nvidia_bl_init(par);
-
        if (register_framebuffer(info) < 0) {
                printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
                goto err_out_iounmap_fb;
        }
 
+       if (backlight)
+               nvidia_bl_init(par);
+
        printk(KERN_INFO PFX
               "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
               info->fix.id,
@@ -1439,9 +1439,9 @@ static void nvidiafb_remove(struct pci_dev *pd)
 
        NVTRACE_ENTER();
 
+       nvidia_bl_exit(par);
        unregister_framebuffer(info);
 
-       nvidia_bl_exit(par);
        arch_phys_wc_del(par->wc_cookie);
        iounmap(info->screen_base);
        fb_destroy_modedb(info->monspecs.modedb);
index 7ebe794583e14cea228f9fcf2e7f20364210dc3b..7dc305c67af80522c7932cb72a9d61098933c7fa 100644 (file)
@@ -287,10 +287,8 @@ static int ocfb_init_var(struct ocfb_dev *fbdev)
 
 static const struct fb_ops ocfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = ocfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int ocfb_probe(struct platform_device *pdev)
index 0065a77b6dbc8f0a6f5563c3b3e963d0733189eb..dcb1b81d35db57cc15dba8040c92875b86e02e6b 100644 (file)
@@ -293,13 +293,11 @@ static void offb_destroy(struct fb_info *info)
 
 static const struct fb_ops offb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_destroy     = offb_destroy,
        .fb_setcolreg   = offb_setcolreg,
        .fb_set_par     = offb_set_par,
        .fb_blank       = offb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static void __iomem *offb_map_reg(struct device_node *np, int index,
@@ -514,7 +512,7 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
        info->fbops = &offb_ops;
        info->screen_base = ioremap(address, fix->smem_len);
        info->pseudo_palette = par->pseudo_palette;
-       info->flags = FBINFO_DEFAULT | foreign_endian;
+       info->flags = foreign_endian;
 
        fb_alloc_cmap(&info->cmap, 256, 0);
 
index a6548283451f9b25dac45dc50e22e7779f0a93bd..f01278238d50a3010aadf3dc2454d43950f875ca 100644 (file)
@@ -3,9 +3,7 @@ config FB_OMAP
        tristate "OMAP frame buffer support"
        depends on FB
        depends on ARCH_OMAP1 || (ARM && COMPILE_TEST)
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Frame buffer driver for OMAP based boards.
 
index ad65554b33c358cfc0e2ba8e3ec2d1854f6dd9fb..f28cb90947a3412076a203368828044aa4241093 100644 (file)
@@ -1216,13 +1216,11 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
  */
 static struct fb_ops omapfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_open        = omapfb_open,
        .fb_release     = omapfb_release,
        .fb_setcolreg   = omapfb_setcolreg,
        .fb_setcmap     = omapfb_setcmap,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_blank       = omapfb_blank,
        .fb_ioctl       = omapfb_ioctl,
        .fb_check_var   = omapfb_check_var,
@@ -1451,7 +1449,6 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
        int                             r = 0;
 
        info->fbops = &omapfb_ops;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        strscpy(fix->id, MODULE_NAME, sizeof(fix->id));
 
index 69f9cb03507ef3c639974c962959f4e8301616de..21069fdb7cc21d64956bc4f8bbe4ce81fe83d484 100644 (file)
@@ -5,9 +5,9 @@ config OMAP2_VRFB
 menuconfig FB_OMAP2
        tristate "OMAP2+ frame buffer support"
        depends on FB
+       depends on FB_DEVICE
        depends on DRM_OMAP = n
        depends on GPIOLIB
-
        select FB_OMAP2_DSS
        select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
        select FB_CFB_FILLRECT
index c0538069eb48e51f207d97483357d79ad46b13ad..b5acad8eb27966c0872b8be7f8a2885be3d66f2c 100644 (file)
@@ -1732,7 +1732,6 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
        int r = 0;
 
        fbi->fbops = &omapfb_ops;
-       fbi->flags = FBINFO_FLAG_DEFAULT;
        fbi->pseudo_palette = fbdev->pseudo_palette;
 
        if (ofbi->region->size == 0) {
index 0876962c52ebc5f54271c498c2867d4be024b02a..41d124862a007b967b3886fdde0c192d8493aced 100644 (file)
@@ -283,7 +283,6 @@ static int p9100_probe(struct platform_device *op)
        if (!par->regs)
                goto out_release_fb;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &p9100_ops;
        info->screen_base = of_ioremap(&op->resource[2], 0,
                                       info->fix.smem_len, "p9100 ram");
index f8283fcd5edb781cb66587b181d3737d31695a77..4f9d8742ea0cc4d7e94d54c3241a43419da56a6f 100644 (file)
@@ -98,13 +98,11 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
 
 static const struct fb_ops platinumfb_ops = {
        .owner =        THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = platinumfb_check_var,
        .fb_set_par     = platinumfb_set_par,
        .fb_setcolreg   = platinumfb_setcolreg,
        .fb_blank       = platinumfb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /*
@@ -317,7 +315,6 @@ static void platinum_init_info(struct fb_info *info,
        /* Fill fb_info */
        info->fbops = &platinumfb_ops;
        info->pseudo_palette = pinfo->pseudo_palette;
-        info->flags = FBINFO_DEFAULT;
        info->screen_base = pinfo->frame_buffer + 0x20;
 
        fb_alloc_cmap(&info->cmap, 256, 0);
index 47d212944f3079c5b6189fda3d8a080bbf2fd591..5a79a12efd8e30b578722525982659c02c6d3fe6 100644 (file)
@@ -1657,8 +1657,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        info->fbops             = &pm2fb_ops;
        info->fix               = pm2fb_fix;
        info->pseudo_palette    = default_par->palette;
-       info->flags             = FBINFO_DEFAULT |
-                                 FBINFO_HWACCEL_YPAN |
+       info->flags             = FBINFO_HWACCEL_YPAN |
                                  FBINFO_HWACCEL_COPYAREA |
                                  FBINFO_HWACCEL_IMAGEBLIT |
                                  FBINFO_HWACCEL_FILLRECT;
index b46a471df9ae800869e286a872ee43dda1f6dd5d..16577d0e41b1aa6ff7e3a0b18d493f2ccc1fcad4 100644 (file)
@@ -1390,8 +1390,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 
        info->fix = pm3fb_fix;
        info->pseudo_palette = par->palette;
-       info->flags = FBINFO_DEFAULT |
-                       FBINFO_HWACCEL_XPAN |
+       info->flags = FBINFO_HWACCEL_XPAN |
                        FBINFO_HWACCEL_YPAN |
                        FBINFO_HWACCEL_COPYAREA |
                        FBINFO_HWACCEL_IMAGEBLIT |
index 62c8de99af0b24dba9b0a64c1cf4542764e5e5f8..acfc8c70c410eaa1e2fb3edfc1d8676d8e31f75d 100644 (file)
@@ -149,10 +149,8 @@ static int aafb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops aafb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_blank       = aafb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_cursor      = aafb_cursor,
 };
 
@@ -174,7 +172,6 @@ static int pmagaafb_probe(struct device *dev)
        info->fbops = &aafb_ops;
        info->fix = aafb_fix;
        info->var = aafb_defined;
-       info->flags = FBINFO_DEFAULT;
 
        /* Request the I/O MEM resource. */
        start = tdev->resource.start;
index 1296f9b370c2e50c29fee335b3d5a00758ae311a..1e010520b3353679eb0a7a6a6ab544428df1b705 100644 (file)
@@ -119,10 +119,8 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
 
 static const struct fb_ops pmagbafb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = pmagbafb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 
@@ -166,7 +164,6 @@ static int pmagbafb_probe(struct device *dev)
        info->fbops = &pmagbafb_ops;
        info->fix = pmagbafb_fix;
        info->var = pmagbafb_defined;
-       info->flags = FBINFO_DEFAULT;
 
        /* Request the I/O MEM resource.  */
        start = tdev->resource.start;
index 9dccd51ee65a4e3f8fa69305ef9ceac93e239d10..6432492467d18bd0c1b640eda236c613a12ec14b 100644 (file)
@@ -123,10 +123,8 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
 
 static const struct fb_ops pmagbbfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = pmagbbfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 
@@ -273,7 +271,6 @@ static int pmagbbfb_probe(struct device *dev)
        info->fbops = &pmagbbfb_ops;
        info->fix = pmagbbfb_fix;
        info->var = pmagbbfb_defined;
-       info->flags = FBINFO_DEFAULT;
 
        /* Request the I/O MEM resource.  */
        start = tdev->resource.start;
index d4abcf8aff75fb9c7690981bcdab6925dc382890..64d291d6b1532eab1d605056dab77e94df126c83 100644 (file)
@@ -1145,7 +1145,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
        info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
 
        info->pseudo_palette = par->pseudo_palette;
-       info->flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
+       info->flags = FBINFO_READS_FAST |
                      FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
 
        retval = fb_alloc_cmap(&info->cmap, 256, 0);
@@ -1168,9 +1168,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
 
        ps3_system_bus_set_drvdata(dev, info);
 
-       dev_info(info->device, "%s %s, using %u KiB of video memory\n",
-                dev_driver_string(info->dev), dev_name(info->dev),
-                info->fix.smem_len >> 10);
+       fb_info(info, "using %u KiB of video memory\n", info->fix.smem_len >> 10);
 
        task = kthread_run(ps3fbd, info, DEVICE_NAME);
        if (IS_ERR(task)) {
index c692cd597ce32b94e8a9d18c1006ed70bd8934bb..6307364e4a49c5b19ee561ddc306b2d1b8370cb9 100644 (file)
@@ -810,7 +810,7 @@ static int __maybe_unused pvr2fb_common_init(void)
        fb_info->fix            = pvr2_fix;
        fb_info->par            = currentpar;
        fb_info->pseudo_palette = currentpar->palette;
-       fb_info->flags          = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       fb_info->flags          = FBINFO_HWACCEL_YPAN;
 
        if (video_output == VO_VGA)
                defmode = DEFMODE_VGA;
index 79f3384630926139f71ffa97cb25789b693c4bba..adee34386580061f39e9a6306146b3ce62250c2a 100644 (file)
@@ -543,14 +543,12 @@ static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
 
 static const struct fb_ops pxa168fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = pxa168fb_check_var,
        .fb_set_par     = pxa168fb_set_par,
        .fb_setcolreg   = pxa168fb_setcolreg,
        .fb_blank       = pxa168fb_blank,
        .fb_pan_display = pxa168fb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static void pxa168fb_init_mode(struct fb_info *info,
@@ -629,7 +627,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        fbi = info->par;
        fbi->info = info;
        fbi->clk = clk;
-       fbi->dev = info->dev = &pdev->dev;
+       fbi->dev = &pdev->dev;
        fbi->panel_rbswap = mi->panel_rbswap;
        fbi->is_blanked = 0;
        fbi->active = mi->active;
@@ -637,7 +635,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * Initialise static fb parameters.
         */
-       info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+       info->flags = FBINFO_PARTIAL_PAN_OK |
                      FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
        info->node = -1;
        strscpy(info->fix.id, mi->id, 16);
index 2a8b1dea3a67557ae1cd92351c4ff08a7aaf88cc..fa943612c4e2b32b81877afa4aeb503fa3d69967 100644 (file)
@@ -599,13 +599,11 @@ static int pxafb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops pxafb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = pxafb_check_var,
        .fb_set_par     = pxafb_set_par,
        .fb_pan_display = pxafb_pan_display,
        .fb_setcolreg   = pxafb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_blank       = pxafb_blank,
 };
 
@@ -888,7 +886,6 @@ static void init_pxafb_overlay(struct pxafb_info *fbi, struct pxafb_layer *ofb,
        ofb->fb.var.vmode               = FB_VMODE_NONINTERLACED;
 
        ofb->fb.fbops                   = &overlay_fb_ops;
-       ofb->fb.flags                   = FBINFO_FLAG_DEFAULT;
        ofb->fb.node                    = -1;
        ofb->fb.pseudo_palette          = NULL;
 
@@ -1826,7 +1823,6 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev,
        fbi->fb.var.vmode       = FB_VMODE_NONINTERLACED;
 
        fbi->fb.fbops           = &pxafb_ops;
-       fbi->fb.flags           = FBINFO_DEFAULT;
        fbi->fb.node            = -1;
 
        addr = fbi;
index 964bc88bb89c4b09cab7086d08fdb3ab0cb24b7d..1ff8fa1761248b9b2bb2551ad95ceaca166b9035 100644 (file)
@@ -76,10 +76,8 @@ static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static const struct fb_ops q40fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = q40fb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int q40fb_probe(struct platform_device *dev)
@@ -99,7 +97,6 @@ static int q40fb_probe(struct platform_device *dev)
        info->var = q40fb_var;
        info->fix = q40fb_fix;
        info->fbops = &q40fb_ops;
-       info->flags = FBINFO_DEFAULT;  /* not as module for now */
        info->pseudo_palette = info->par;
        info->par = NULL;
        info->screen_base = (char *) q40fb_fix.smem_start;
index 41edc6e794603145e3d6da922b2285def60f1728..99576ba3ce6ec678f68ad29740df629310a47edd 100644 (file)
@@ -333,7 +333,7 @@ static void riva_bl_init(struct riva_par *par)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
-       bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
+       bd = backlight_device_register(name, info->device, par, &riva_bl_ops,
                                       &props);
        if (IS_ERR(bd)) {
                info->bl_dev = NULL;
@@ -1688,8 +1688,7 @@ static int riva_set_fbinfo(struct fb_info *info)
        struct riva_par *par = info->par;
 
        NVTRACE_ENTER();
-       info->flags = FBINFO_DEFAULT
-                   | FBINFO_HWACCEL_XPAN
+       info->flags = FBINFO_HWACCEL_XPAN
                    | FBINFO_HWACCEL_YPAN
                    | FBINFO_HWACCEL_COPYAREA
                    | FBINFO_HWACCEL_FILLRECT
@@ -2031,9 +2030,6 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
 
        pci_set_drvdata(pd, info);
 
-       if (backlight)
-               riva_bl_init(info->par);
-
        ret = register_framebuffer(info);
        if (ret < 0) {
                printk(KERN_ERR PFX
@@ -2041,6 +2037,9 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
                goto err_iounmap_screen_base;
        }
 
+       if (backlight)
+               riva_bl_init(info->par);
+
        printk(KERN_INFO PFX
                "PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n",
                info->fix.id,
@@ -2084,9 +2083,9 @@ static void rivafb_remove(struct pci_dev *pd)
        kfree(par->EDID);
 #endif
 
+       riva_bl_exit(info);
        unregister_framebuffer(info);
 
-       riva_bl_exit(info);
        arch_phys_wc_del(par->wc_cookie);
        iounmap(par->ctrl_base);
        iounmap(info->screen_base);
index 8f2edccdba462fe656ce7be975e688b0175a0931..c7d221cce06d7eb6cb29afa30c7d40823db7357f 100644 (file)
@@ -869,14 +869,14 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
               default_par->regs, info->fix.smem_len / 1024, info->screen_base);
 
        info->par = default_par;
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
        info->fbops = &s1d13xxxfb_fbops;
 
        switch(prod_id) {
        case S1D13506_PROD_ID:  /* activate acceleration */
                s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
                s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
-               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+               info->flags = FBINFO_HWACCEL_YPAN |
                        FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
                break;
        default:
index 1ce707e4cfd03e0bb1b8dcc5499977b55ce8239b..2b85aad6a3045130cacad76feaeb3d3e7e622afd 100644 (file)
@@ -1038,13 +1038,11 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
 
 static const struct fb_ops s3c_fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = s3c_fb_check_var,
        .fb_set_par     = s3c_fb_set_par,
        .fb_blank       = s3c_fb_blank,
        .fb_setcolreg   = s3c_fb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_pan_display = s3c_fb_pan_display,
        .fb_ioctl       = s3c_fb_ioctl,
 };
@@ -1244,7 +1242,6 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        fbinfo->var.vmode       = FB_VMODE_NONINTERLACED;
        fbinfo->var.bits_per_pixel = windata->default_bpp;
        fbinfo->fbops           = &s3c_fb_ops;
-       fbinfo->flags           = FBINFO_FLAG_DEFAULT;
        fbinfo->pseudo_palette  = &win->pseudo_palette;
 
        /* prepare to actually start the framebuffer */
index a2408bf00ca0f51724645dd3b0c7f603d1ded146..3d76ce11148826a169bd0f0251424262024a2d66 100644 (file)
@@ -1089,7 +1089,6 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
        fbi->fb.var.vmode       = FB_VMODE_NONINTERLACED;
 
        fbi->fb.fbops           = &sa1100fb_ops;
-       fbi->fb.flags           = FBINFO_DEFAULT;
        fbi->fb.monspecs        = monspecs;
        fbi->fb.pseudo_palette  = fbi->pseudo_palette;
 
index 4a27b68798bf5831655d4b82f7f4e7ff309d8180..b5f84bd4804b83d860b69e1c2452bd6ceaa5381f 100644 (file)
@@ -2135,8 +2135,7 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
        info->var.accel_flags = 0;
 
        info->fbops          = &savagefb_ops;
-       info->flags          = FBINFO_DEFAULT |
-                              FBINFO_HWACCEL_YPAN |
+       info->flags          = FBINFO_HWACCEL_YPAN |
                               FBINFO_HWACCEL_XPAN;
 
        info->pseudo_palette = par->pseudo_palette;
index 98c5227098a8d652402ad4edb1624c8ec20d0dab..08a4943dc54184144ff231211ac3887fb9aa2f99 100644 (file)
@@ -118,7 +118,7 @@ static int sh7760_setcolreg (u_int regno,
        return 0;
 }
 
-static int sh7760fb_get_color_info(struct device *dev,
+static int sh7760fb_get_color_info(struct fb_info *info,
                                   u16 lddfr, int *bpp, int *gray)
 {
        int lbpp, lgray;
@@ -152,7 +152,7 @@ static int sh7760fb_get_color_info(struct device *dev,
                lgray = 0;
                break;
        default:
-               dev_dbg(dev, "unsupported LDDFR bit depth.\n");
+               fb_dbg(info, "unsupported LDDFR bit depth.\n");
                return -EINVAL;
        }
 
@@ -172,7 +172,7 @@ static int sh7760fb_check_var(struct fb_var_screeninfo *var,
        int ret, bpp;
 
        /* get color info from register value */
-       ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
+       ret = sh7760fb_get_color_info(info, par->pd->lddfr, &bpp, NULL);
        if (ret)
                return ret;
 
@@ -209,7 +209,7 @@ static int sh7760fb_set_par(struct fb_info *info)
 
        /* rotate only works with xres <= 320 */
        if (par->rot && (vm->xres > 320)) {
-               dev_dbg(info->dev, "rotation disabled due to display size\n");
+               fb_dbg(info, "rotation disabled due to display size\n");
                par->rot = 0;
        }
 
@@ -224,11 +224,11 @@ static int sh7760fb_set_par(struct fb_info *info)
        vdln = vm->yres;
 
        /* get color info from register value */
-       ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, &gray);
+       ret = sh7760fb_get_color_info(info, par->pd->lddfr, &bpp, &gray);
        if (ret)
                return ret;
 
-       dev_dbg(info->dev, "%dx%d %dbpp %s (orientation %s)\n", hdcn,
+       fb_dbg(info, "%dx%d %dbpp %s (orientation %s)\n", hdcn,
                vdln, bpp, gray ? "grayscale" : "color",
                par->rot ? "rotated" : "normal");
 
@@ -308,7 +308,7 @@ static int sh7760fb_set_par(struct fb_info *info)
        if (((ldmtr & 0x003f) >= LDMTR_DSTN_MONO_8) &&
            ((ldmtr & 0x003f) <= LDMTR_DSTN_COLOR_16)) {
 
-               dev_dbg(info->dev, " ***** DSTN untested! *****\n");
+               fb_dbg(info, " ***** DSTN untested! *****\n");
 
                dstn_off = stride;
                if (par->rot)
@@ -328,30 +328,28 @@ static int sh7760fb_set_par(struct fb_info *info)
 
        sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
 
-       dev_dbg(info->dev, "hdcn  : %6d htcn  : %6d\n", hdcn, htcn);
-       dev_dbg(info->dev, "hsynw : %6d hsynp : %6d\n", hsynw, hsynp);
-       dev_dbg(info->dev, "vdln  : %6d vtln  : %6d\n", vdln, vtln);
-       dev_dbg(info->dev, "vsynw : %6d vsynp : %6d\n", vsynw, vsynp);
-       dev_dbg(info->dev, "clksrc: %6d clkdiv: %6d\n",
+       fb_dbg(info, "hdcn  : %6d htcn  : %6d\n", hdcn, htcn);
+       fb_dbg(info, "hsynw : %6d hsynp : %6d\n", hsynw, hsynp);
+       fb_dbg(info, "vdln  : %6d vtln  : %6d\n", vdln, vtln);
+       fb_dbg(info, "vsynw : %6d vsynp : %6d\n", vsynw, vsynp);
+       fb_dbg(info, "clksrc: %6d clkdiv: %6d\n",
                (par->pd->ldickr >> 12) & 3, par->pd->ldickr & 0x1f);
-       dev_dbg(info->dev, "ldpmmr: 0x%04x ldpspr: 0x%04x\n", par->pd->ldpmmr,
+       fb_dbg(info, "ldpmmr: 0x%04x ldpspr: 0x%04x\n", par->pd->ldpmmr,
                par->pd->ldpspr);
-       dev_dbg(info->dev, "ldmtr : 0x%04x lddfr : 0x%04x\n", ldmtr, lddfr);
-       dev_dbg(info->dev, "ldlaor: %ld\n", stride);
-       dev_dbg(info->dev, "ldsaru: 0x%08lx ldsarl: 0x%08lx\n", sbase, ldsarl);
+       fb_dbg(info, "ldmtr : 0x%04x lddfr : 0x%04x\n", ldmtr, lddfr);
+       fb_dbg(info, "ldlaor: %ld\n", stride);
+       fb_dbg(info, "ldsaru: 0x%08lx ldsarl: 0x%08lx\n", sbase, ldsarl);
 
        return 0;
 }
 
 static const struct fb_ops sh7760fb_ops = {
        .owner = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_blank = sh7760fb_blank,
        .fb_check_var = sh7760fb_check_var,
        .fb_setcolreg = sh7760_setcolreg,
        .fb_set_par = sh7760fb_set_par,
-       .fb_fillrect = cfb_fillrect,
-       .fb_copyarea = cfb_copyarea,
-       .fb_imageblit = cfb_imageblit,
 };
 
 static void sh7760fb_free_mem(struct fb_info *info)
@@ -361,7 +359,7 @@ static void sh7760fb_free_mem(struct fb_info *info)
        if (!info->screen_base)
                return;
 
-       dma_free_coherent(info->dev, info->screen_size,
+       dma_free_coherent(info->device, info->screen_size,
                          info->screen_base, par->fbdma);
 
        par->fbdma = 0;
@@ -383,7 +381,7 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
                return 0;
 
        /* get color info from register value */
-       ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
+       ret = sh7760fb_get_color_info(info, par->pd->lddfr, &bpp, NULL);
        if (ret) {
                printk(KERN_ERR "colinfo\n");
                return ret;
@@ -403,21 +401,21 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
        } else if (bpp > 8)
                vram *= 2;
        if ((vram < 1) || (vram > 1024 * 2048)) {
-               dev_dbg(info->dev, "too much VRAM required. Check settings\n");
+               fb_dbg(info, "too much VRAM required. Check settings\n");
                return -ENODEV;
        }
 
        if (vram < PAGE_SIZE)
                vram = PAGE_SIZE;
 
-       fbmem = dma_alloc_coherent(info->dev, vram, &par->fbdma, GFP_KERNEL);
+       fbmem = dma_alloc_coherent(info->device, vram, &par->fbdma, GFP_KERNEL);
 
        if (!fbmem)
                return -ENOMEM;
 
        if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) {
                sh7760fb_free_mem(info);
-               dev_err(info->dev, "kernel gave me memory at 0x%08lx, which is"
+               dev_err(info->device, "kernel gave me memory at 0x%08lx, which is"
                        "unusable for the LCDC\n", (unsigned long)par->fbdma);
                return -ENOMEM;
        }
@@ -452,7 +450,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
 
        par->pd = pdev->dev.platform_data;
        if (!par->pd) {
-               dev_dbg(info->dev, "no display setup data!\n");
+               dev_dbg(&pdev->dev, "no display setup data!\n");
                ret = -ENODEV;
                goto out_fb;
        }
@@ -488,7 +486,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
 
        ret = sh7760fb_alloc_mem(info);
        if (ret) {
-               dev_dbg(info->dev, "framebuffer memory allocation failed!\n");
+               dev_dbg(info->device, "framebuffer memory allocation failed!\n");
                goto out_unmap;
        }
 
@@ -521,13 +519,13 @@ static int sh7760fb_probe(struct platform_device *pdev)
 
        ret = fb_alloc_cmap(&info->cmap, 256, 0);
        if (ret) {
-               dev_dbg(info->dev, "Unable to allocate cmap memory\n");
+               dev_dbg(&pdev->dev, "Unable to allocate cmap memory\n");
                goto out_mem;
        }
 
        ret = register_framebuffer(info);
        if (ret < 0) {
-               dev_dbg(info->dev, "cannot register fb!\n");
+               dev_dbg(&pdev->dev, "cannot register fb!\n");
                goto out_cmap;
        }
        platform_set_drvdata(pdev, info);
index 0adb2ba965e7120d39507b5bcb466cbfa61ddd34..1364dafaadb1d4632d95ca8be37a84d341734336 100644 (file)
@@ -1565,7 +1565,6 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
 
        ovl->info = info;
 
-       info->flags = FBINFO_FLAG_DEFAULT;
        info->fbops = &sh_mobile_lcdc_overlay_ops;
        info->device = priv->dev;
        info->screen_buffer = ovl->fb_mem;
@@ -2052,7 +2051,6 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
 
        ch->info = info;
 
-       info->flags = FBINFO_FLAG_DEFAULT;
        info->fbops = &sh_mobile_lcdc_ops;
        info->device = priv->dev;
        info->screen_buffer = ch->fb_mem;
index e4a13871bca6bdb2190446b467b60f2c6610a57b..62f99f6fccd3c011e26993b9e45fb17f49cb79b8 100644 (file)
@@ -109,11 +109,9 @@ static void simplefb_destroy(struct fb_info *info)
 
 static const struct fb_ops simplefb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_destroy     = simplefb_destroy,
        .fb_setcolreg   = simplefb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS;
@@ -479,7 +477,6 @@ static int simplefb_probe(struct platform_device *pdev)
        par->size = info->fix.smem_len;
 
        info->fbops = &simplefb_ops;
-       info->flags = FBINFO_DEFAULT;
        info->screen_base = ioremap_wc(info->fix.smem_start,
                                       info->fix.smem_len);
        if (!info->screen_base) {
index cfba776afcea6d7a3ee464f34d32c4d61f95314e..0f5374f6ef055996b0345ba2bdc81103c1926a96 100644 (file)
@@ -6472,14 +6472,11 @@ error_3:        vfree(ivideo->bios_abase);
                sisfb_initaccel(ivideo);
 
 #if defined(FBINFO_HWACCEL_DISABLED) && defined(FBINFO_HWACCEL_XPAN)
-               sis_fb_info->flags = FBINFO_DEFAULT             |
-                                    FBINFO_HWACCEL_YPAN        |
+               sis_fb_info->flags = FBINFO_HWACCEL_YPAN        |
                                     FBINFO_HWACCEL_XPAN        |
                                     FBINFO_HWACCEL_COPYAREA    |
                                     FBINFO_HWACCEL_FILLRECT    |
                                     ((ivideo->accel) ? 0 : FBINFO_HWACCEL_DISABLED);
-#else
-               sis_fb_info->flags = FBINFO_FLAG_DEFAULT;
 #endif
                sis_fb_info->var = ivideo->default_var;
                sis_fb_info->fix = ivideo->sisfb_fix;
index 40c130ab6b38d0c0f374b3448b074aa97ed87b79..7e98850d9bde9b88965c7e18e1cde94d3c378b7d 100644 (file)
@@ -716,7 +716,7 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
      *
      * NOTE: These are for fbcon use only.
      */
-    info->flags = FBINFO_DEFAULT;
+    info->flags = 0;
 
 /********************* This stage is optional ******************************/
      /*
index e0d29be1565bb68ae9c9087ebabc162ac82cfa50..65c799ac5604f5a80f159ac0607a68eef82c170a 100644 (file)
@@ -1293,7 +1293,7 @@ static int sm501fb_sync(struct fb_info *info)
                count--;
 
        if (count <= 0) {
-               dev_err(info->dev, "Timeout waiting for 2d engine sync\n");
+               fb_err(info, "Timeout waiting for 2d engine sync\n");
                return 1;
        }
        return 0;
@@ -1731,7 +1731,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
                par->ops.fb_cursor = NULL;
 
        fb->fbops = &par->ops;
-       fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST |
+       fb->flags = FBINFO_READS_FAST |
                FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
                FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
 
index b7ad3c644e138db5a76c09ac15acd79ec8c78034..db129ed3b2f7cfd52889ab29a32138fb7b92db81 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/console.h>
-#include <linux/screen_info.h>
 
 #include <linux/pm.h>
 
 #include "sm712.h"
 
+struct smtcfb_screen_info {
+       u16 lfb_width;
+       u16 lfb_height;
+       u16 lfb_depth;
+};
+
 /*
  * Private structure
  */
@@ -829,7 +834,7 @@ static const struct modeinit vgamode[] = {
        },
 };
 
-static struct screen_info smtc_scr_info;
+static struct smtcfb_screen_info smtc_scr_info;
 
 static char *mode_option;
 
@@ -1523,7 +1528,6 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
        sfb->fb = info;
        sfb->chip_id = ent->device;
        sfb->pdev = pdev;
-       info->flags = FBINFO_FLAG_DEFAULT;
        info->fbops = &smtcfb_ops;
        info->fix = smtcfb_fix;
        info->var = smtcfb_var;
index 17cec62cc65db584f97a572281d7003de1090a48..387d18706fecfe6f464fc280341d456d7711b693 100644 (file)
@@ -114,7 +114,7 @@ static struct fb_fix_screeninfo ufx_fix = {
        .accel =        FB_ACCEL_NONE,
 };
 
-static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
+static const u32 smscufx_info_flags = FBINFO_READS_FAST |
        FBINFO_VIRTFB | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
        FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
 
@@ -1496,7 +1496,7 @@ static int ufx_setup_modes(struct ufx_data *dev, struct fb_info *info,
        u8 *edid;
        int i, result = 0, tries = 3;
 
-       if (info->dev) /* only use mutex if info has been registered */
+       if (refcount_read(&info->count)) /* only use mutex if info has been registered */
                mutex_lock(&info->lock);
 
        edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
@@ -1610,7 +1610,7 @@ error:
        if (edid && (dev->edid != edid))
                kfree(edid);
 
-       if (info->dev)
+       if (refcount_read(&info->count))
                mutex_unlock(&info->lock);
 
        return result;
index 11c3737982790cda958221c50ddfa18c7074453a..5aee62434443674335b56c912071cfcee9195b67 100644 (file)
@@ -292,43 +292,6 @@ static int ssd1307fb_update_display(struct ssd1307fb_par *par)
        return ssd1307fb_update_rect(par, 0, 0, par->width, par->height);
 }
 
-static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf,
-               size_t count, loff_t *ppos)
-{
-       struct ssd1307fb_par *par = info->par;
-       unsigned long total_size;
-       unsigned long p = *ppos;
-       void *dst;
-       int ret;
-
-       if (!info->screen_buffer)
-               return -ENODEV;
-
-       total_size = info->fix.smem_len;
-
-       if (p > total_size)
-               return -EINVAL;
-
-       if (count + p > total_size)
-               count = total_size - p;
-
-       if (!count)
-               return -EINVAL;
-
-       dst = info->screen_buffer + p;
-
-       if (copy_from_user(dst, buf, count))
-               return -EFAULT;
-
-       ret = ssd1307fb_update_display(par);
-       if (ret < 0)
-               return ret;
-
-       *ppos += count;
-
-       return count;
-}
-
 static int ssd1307fb_blank(int blank_mode, struct fb_info *info)
 {
        struct ssd1307fb_par *par = info->par;
@@ -339,39 +302,29 @@ static int ssd1307fb_blank(int blank_mode, struct fb_info *info)
                return ssd1307fb_write_cmd(par->client, SSD1307FB_DISPLAY_ON);
 }
 
-static void ssd1307fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+static void ssd1307fb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
 {
        struct ssd1307fb_par *par = info->par;
-       sys_fillrect(info, rect);
-       ssd1307fb_update_rect(par, rect->dx, rect->dy, rect->width,
-                             rect->height);
-}
 
-static void ssd1307fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
-{
-       struct ssd1307fb_par *par = info->par;
-       sys_copyarea(info, area);
-       ssd1307fb_update_rect(par, area->dx, area->dy, area->width,
-                             area->height);
+       ssd1307fb_update_display(par);
 }
 
-static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *image)
+static void ssd1307fb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+                                       u32 width, u32 height)
 {
        struct ssd1307fb_par *par = info->par;
-       sys_imageblit(info, image);
-       ssd1307fb_update_rect(par, image->dx, image->dy, image->width,
-                             image->height);
+
+       ssd1307fb_update_rect(par, x, y, width, height);
 }
 
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(ssd1307fb,
+                                  ssd1307fb_defio_damage_range,
+                                  ssd1307fb_defio_damage_area)
+
 static const struct fb_ops ssd1307fb_ops = {
        .owner          = THIS_MODULE,
-       .fb_read        = fb_sys_read,
-       .fb_write       = ssd1307fb_write,
+       FB_DEFAULT_DEFERRED_OPS(ssd1307fb),
        .fb_blank       = ssd1307fb_blank,
-       .fb_fillrect    = ssd1307fb_fillrect,
-       .fb_copyarea    = ssd1307fb_copyarea,
-       .fb_imageblit   = ssd1307fb_imageblit,
-       .fb_mmap        = fb_deferred_io_mmap,
 };
 
 static void ssd1307fb_deferred_io(struct fb_info *info, struct list_head *pagereflist)
index 582324f5d869a13bb5509321d6f02f933874e868..f8ae54ca0cc34f2e10735857bcfc05d504b46db5 100644 (file)
@@ -1310,12 +1310,10 @@ static int sstfb_setup(char *options)
 
 static const struct fb_ops sstfb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var   = sstfb_check_var,
        .fb_set_par     = sstfb_set_par,
        .fb_setcolreg   = sstfb_setcolreg,
-       .fb_fillrect    = cfb_fillrect, /* sstfb_fillrect */
-       .fb_copyarea    = cfb_copyarea, /* sstfb_copyarea */
-       .fb_imageblit   = cfb_imageblit,
        .fb_ioctl       = sstfb_ioctl,
 };
 
@@ -1399,7 +1397,6 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        f_ddprintk("membase_phys: %#lx\n", fix->smem_start);
        f_ddprintk("fbbase_virt: %p\n", info->screen_base);
 
-       info->flags     = FBINFO_DEFAULT;
        info->fbops     = &sstfb_ops;
        info->pseudo_palette = par->palette;
 
index 490bd9a147638e6514cdc5fe23a8afacce2a5d3d..3350f56058c476f704e0bcfa2370cddcd7b97de7 100644 (file)
@@ -61,10 +61,8 @@ static int gfb_setcolreg(unsigned regno,
 
 static const struct fb_ops gfb_ops = {
        .owner                  = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg           = gfb_setcolreg,
-       .fb_fillrect            = cfb_fillrect,
-       .fb_copyarea            = cfb_copyarea,
-       .fb_imageblit           = cfb_imageblit,
 };
 
 static int gfb_set_fbinfo(struct gfb_info *gp)
@@ -72,7 +70,6 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
        struct fb_info *info = gp->info;
        struct fb_var_screeninfo *var = &info->var;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &gfb_ops;
        info->screen_base = gp->fb_base;
        info->screen_size = gp->fb_size;
index 2cab4b9be68aaddcaacf1e8a76a31863fe222d93..47b85e4959dd671147df83f0a42208a58a65030d 100644 (file)
@@ -66,10 +66,8 @@ static int s3d_setcolreg(unsigned regno,
 
 static const struct fb_ops s3d_ops = {
        .owner                  = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg           = s3d_setcolreg,
-       .fb_fillrect            = cfb_fillrect,
-       .fb_copyarea            = cfb_copyarea,
-       .fb_imageblit           = cfb_imageblit,
 };
 
 static int s3d_set_fbinfo(struct s3d_info *sp)
@@ -77,7 +75,6 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
        struct fb_info *info = sp->info;
        struct fb_var_screeninfo *var = &info->var;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &s3d_ops;
        info->screen_base = sp->fb_base;
        info->screen_size = sp->fb_size;
index 6ec358af125603ad55a2e488fb872af74a8aa63a..28a5e2251119afc2dd7150364045226a5b49c83f 100644 (file)
@@ -200,7 +200,6 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
        struct fb_info *info = ep->info;
        struct fb_var_screeninfo *var = &info->var;
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &e3d_ops;
        info->screen_base = ep->fb_base;
        info->screen_size = ep->fb_size;
index fc3ac2301b455bc6042f2350aa39d2e5eedcd79a..3572766de89c37ee1adec8d269e77effe07fe4d2 100644 (file)
@@ -438,7 +438,6 @@ static int tcx_probe(struct platform_device *op)
                par->mmap_map[i].poff = op->resource[j].start;
        }
 
-       info->flags = FBINFO_DEFAULT;
        info->fbops = &tcx_ops;
 
        /* Initialize brooktree DAC. */
index cdf8e9fe99487f7ab71ae9aa2d232c2f2d3b0986..68e2a82220f3fbfa2be15fbe6cc5bb2d3f05298a 100644 (file)
@@ -1327,8 +1327,8 @@ static void tdfxfb_create_i2c_busses(struct fb_info *info)
        par->chan[0].par = par;
        par->chan[1].par = par;
 
-       tdfxfb_setup_ddc_bus(&par->chan[0], "Voodoo3-DDC", info->dev);
-       tdfxfb_setup_i2c_bus(&par->chan[1], "Voodoo3-I2C", info->dev);
+       tdfxfb_setup_ddc_bus(&par->chan[0], "Voodoo3-DDC", info->device);
+       tdfxfb_setup_i2c_bus(&par->chan[1], "Voodoo3-I2C", info->device);
 }
 
 static void tdfxfb_delete_i2c_busses(struct tdfx_par *par)
@@ -1468,7 +1468,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        info->fbops             = &tdfxfb_ops;
        info->pseudo_palette    = default_par->palette;
-       info->flags             = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags             = FBINFO_HWACCEL_YPAN;
 #ifdef CONFIG_FB_3DFX_ACCEL
        info->flags             |= FBINFO_HWACCEL_FILLRECT |
                                   FBINFO_HWACCEL_COPYAREA |
index b44004880f0d1e25119ab9028aafc9acd0c61177..fc2d08dd1b45a79bbe453f7211358ac227105172 100644 (file)
@@ -1470,7 +1470,7 @@ static int tgafb_register(struct device *dev)
                par->tga_chip_rev = TGA_READ_REG(par, TGA_START_REG) & 0xff;
 
        /* Setup framebuffer.  */
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+       info->flags = FBINFO_HWACCEL_COPYAREA |
                      FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
        info->fbops = &tgafb_ops;
        info->screen_base = par->tga_fb_base;
index 6099b9768ba16bfe8c4f00da4bd05e5348b868cd..1ba157530af2fef6461b2d7756a7d5b4f6f162d0 100644 (file)
@@ -1600,7 +1600,7 @@ static int trident_pci_probe(struct pci_dev *dev,
        info->fbops = &tridentfb_ops;
        info->pseudo_palette = default_par->pseudo_pal;
 
-       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
        if (!noaccel && default_par->init_accel) {
                info->flags &= ~FBINFO_HWACCEL_DISABLED;
                info->flags |= FBINFO_HWACCEL_COPYAREA;
index a4a21b4ac28c6fd8affa7e6ba4d5dea4bde7a7e2..b70762ead13cdfe4d66c9ccb8ac424565c8fb6da 100644 (file)
@@ -39,7 +39,7 @@ static const struct fb_fix_screeninfo dlfb_fix = {
        .accel =        FB_ACCEL_NONE,
 };
 
-static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
+static const u32 udlfb_info_flags = FBINFO_READS_FAST |
                FBINFO_VIRTFB |
                FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
                FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
index 78d85dae8ec8005a0814747575345b40c3facc8b..a1a67830fbbcc22e7db922a7c77d73fb6a2ac66c 100644 (file)
@@ -1416,13 +1416,11 @@ static struct fb_ops uvesafb_ops = {
        .owner          = THIS_MODULE,
        .fb_open        = uvesafb_open,
        .fb_release     = uvesafb_release,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg   = uvesafb_setcolreg,
        .fb_setcmap     = uvesafb_setcmap,
        .fb_pan_display = uvesafb_pan_display,
        .fb_blank       = uvesafb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
        .fb_check_var   = uvesafb_check_var,
        .fb_set_par     = uvesafb_set_par,
 };
@@ -1508,8 +1506,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
                par->ypan = 0;
        }
 
-       info->flags = FBINFO_FLAG_DEFAULT |
-                       (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
+       info->flags = (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
 
        if (!par->ypan)
                uvesafb_ops.fb_pan_display = NULL;
index b166b7cfe0e56fcf24cfeaf2205c860044087278..91d070ef69897d0002ce9f1fb368c9fe9e23ecba 100644 (file)
@@ -110,13 +110,11 @@ static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valk
 
 static const struct fb_ops valkyriefb_ops = {
        .owner =        THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_check_var = valkyriefb_check_var,
        .fb_set_par =   valkyriefb_set_par,
        .fb_setcolreg = valkyriefb_setcolreg,
        .fb_blank =     valkyriefb_blank,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 /* Sets the video mode according to info->var */
@@ -535,7 +533,6 @@ static int __init valkyrie_init_info(struct fb_info *info,
 {
        info->fbops = &valkyriefb_ops;
        info->screen_base = p->frame_buffer + 0x1000;
-       info->flags = FBINFO_DEFAULT;
        info->pseudo_palette = p->pseudo_palette;
        info->par = &p->par;
        return fb_alloc_cmap(&info->cmap, 256, 0);
index 32e74e02a02fdfecbfc1f1f98abad45ce7c97dbc..71584c775efd49718b43990df60b6ea0cfcc57bd 100644 (file)
@@ -477,7 +477,7 @@ static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        }
 
        info = &vinfo->info;
-       info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK;
+       info->flags = FBINFO_PARTIAL_PAN_OK;
 
        err = vmlfb_enable_mmio(par);
        if (err)
index 7451c607dc5078cac76a2c2191203c25cc2b36ca..c0edceea0a7934f37c5d2f49874e76948c6eef1e 100644 (file)
@@ -201,12 +201,10 @@ static void vesafb_destroy(struct fb_info *info)
 
 static struct fb_ops vesafb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_destroy     = vesafb_destroy,
        .fb_setcolreg   = vesafb_setcolreg,
        .fb_pan_display = vesafb_pan_display,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int vesafb_setup(char *options)
@@ -457,7 +455,7 @@ static int vesafb_probe(struct platform_device *dev)
        info->fbops = &vesafb_ops;
        info->var = vesafb_defined;
        info->fix = vesafb_fix;
-       info->flags = FBINFO_FLAG_DEFAULT | (ypan ? FBINFO_HWACCEL_YPAN : 0);
+       info->flags = (ypan ? FBINFO_HWACCEL_YPAN : 0);
 
        if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
                err = -ENOMEM;
index cf3c72754ce702f66c66f624fbee0f7553756c51..1b7c338f99560fc4a3bca8cdfe9ba2d495ca3eaf 100644 (file)
@@ -455,7 +455,6 @@ static int vfb_probe(struct platform_device *dev)
        info->fix = vfb_fix;
        info->pseudo_palette = info->par;
        info->par = NULL;
-       info->flags = FBINFO_FLAG_DEFAULT;
 
        retval = fb_alloc_cmap(&info->cmap, 256, 0);
        if (retval < 0)
index 34d00347ad58a6c800db39cd126ec44af8f3770a..b43c874c199f679f7d3c4e063b3a7f4bda4c97da 100644 (file)
@@ -1359,7 +1359,7 @@ static int vga16fb_probe(struct platform_device *dev)
        info->fix = vga16fb_fix;
        /* supports rectangles with widths of multiples of 8 */
        info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->flags = FBINFO_HWACCEL_YPAN;
 
        i = (info->var.bits_per_pixel == 8) ? 256 : 16;
        ret = fb_alloc_cmap(&info->cmap, i, 0);
index 2d67c92c57749c8530f8cc42e73552db86ef2837..190fddee62e678034cc289e30f736087da707ae5 100644 (file)
@@ -1770,7 +1770,7 @@ int via_fb_pci_probe(struct viafb_dev *vdev)
        viafbinfo->fix.mmio_len = vdev->engine_len;
        viafbinfo->node = 0;
        viafbinfo->fbops = &viafb_ops;
-       viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       viafbinfo->flags = FBINFO_HWACCEL_YPAN;
 
        viafbinfo->pseudo_palette = pseudo_pal;
        if (viafb_accel && !viafb_setup_engine(viafbinfo)) {
index 31d4e85b220c0f55184abb1498ed84725b2a2a06..42d39a9d5130f98fe9beb56774b0225e0240e2da 100644 (file)
@@ -300,8 +300,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
        fbi->fb.var.vmode       = FB_VMODE_NONINTERLACED;
 
        fbi->fb.fbops           = &vt8500lcd_ops;
-       fbi->fb.flags           = FBINFO_DEFAULT
-                               | FBINFO_HWACCEL_COPYAREA
+       fbi->fb.flags           = FBINFO_HWACCEL_COPYAREA
                                | FBINFO_HWACCEL_FILLRECT
                                | FBINFO_HWACCEL_YPAN
                                | FBINFO_VIRTFB
index 10a8b12501039a7c18cd8d5db9d43c596781e035..5833147aa43d176a1d7f26449a66405899b1a16c 100644 (file)
@@ -285,8 +285,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
        fbi->fb.fix.accel       = FB_ACCEL_NONE;
 
        fbi->fb.fbops           = &wm8505fb_ops;
-       fbi->fb.flags           = FBINFO_DEFAULT
-                               | FBINFO_HWACCEL_COPYAREA
+       fbi->fb.flags           = FBINFO_HWACCEL_COPYAREA
                                | FBINFO_HWACCEL_FILLRECT
                                | FBINFO_HWACCEL_XPAN
                                | FBINFO_HWACCEL_YPAN
index 9b2a786621a63b07c76d00397f053cd67eb55db4..66d4628a96ae04d1d28837b4d373262273649b8d 100644 (file)
@@ -240,41 +240,6 @@ static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
        return 0;
 }
 
-static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
-{
-       struct xenfb_info *info = p->par;
-
-       sys_fillrect(p, rect);
-       xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
-{
-       struct xenfb_info *info = p->par;
-
-       sys_imageblit(p, image);
-       xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
-}
-
-static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
-{
-       struct xenfb_info *info = p->par;
-
-       sys_copyarea(p, area);
-       xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
-}
-
-static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
-                       size_t count, loff_t *ppos)
-{
-       struct xenfb_info *info = p->par;
-       ssize_t res;
-
-       res = fb_sys_write(p, buf, count, ppos);
-       xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
-       return res;
-}
-
 static int
 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
@@ -326,17 +291,31 @@ static int xenfb_set_par(struct fb_info *info)
        return 0;
 }
 
+static void xenfb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
+{
+       struct xenfb_info *xenfb_info = info->par;
+
+       xenfb_refresh(xenfb_info, 0, 0, xenfb_info->page->width, xenfb_info->page->height);
+}
+
+static void xenfb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+                                   u32 width, u32 height)
+{
+       struct xenfb_info *xenfb_info = info->par;
+
+       xenfb_refresh(xenfb_info, x, y, width, height);
+}
+
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(xenfb,
+                                  xenfb_defio_damage_range,
+                                  xenfb_defio_damage_area)
+
 static const struct fb_ops xenfb_fb_ops = {
        .owner          = THIS_MODULE,
-       .fb_read        = fb_sys_read,
-       .fb_write       = xenfb_write,
+       FB_DEFAULT_DEFERRED_OPS(xenfb),
        .fb_setcolreg   = xenfb_setcolreg,
-       .fb_fillrect    = xenfb_fillrect,
-       .fb_copyarea    = xenfb_copyarea,
-       .fb_imageblit   = xenfb_imageblit,
        .fb_check_var   = xenfb_check_var,
        .fb_set_par     = xenfb_set_par,
-       .fb_mmap        = fb_deferred_io_mmap,
 };
 
 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
@@ -453,7 +432,7 @@ static int xenfb_probe(struct xenbus_device *dev,
        fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
        fb_info->fix.accel = FB_ACCEL_NONE;
 
-       fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       fb_info->flags = FBINFO_VIRTFB;
 
        ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
        if (ret < 0) {
index 2aa3a528277f6019f027ba2fe38fa0ccb7219eb2..bf049a1a6ab2d288301a04ff8a7e1d5f542f81f6 100644 (file)
@@ -251,11 +251,9 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
 
 static const struct fb_ops xilinxfb_ops = {
        .owner                  = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_setcolreg           = xilinx_fb_setcolreg,
        .fb_blank               = xilinx_fb_blank,
-       .fb_fillrect            = cfb_fillrect,
-       .fb_copyarea            = cfb_copyarea,
-       .fb_imageblit           = cfb_imageblit,
 };
 
 /* ---------------------------------------------------------------------
@@ -324,7 +322,6 @@ static int xilinxfb_assign(struct platform_device *pdev,
        drvdata->info.fix.line_length = pdata->xvirt * BYTES_PER_PIXEL;
 
        drvdata->info.pseudo_palette = drvdata->pseudo_palette;
-       drvdata->info.flags = FBINFO_DEFAULT;
        drvdata->info.var = xilinx_fb_var;
        drvdata->info.var.height = pdata->screen_height_mm;
        drvdata->info.var.width = pdata->screen_width_mm;
index 6d6f8c08792dc8977cba65817b30d961034d6a68..b7d94d1dd1585a8418b365f5ce0c3339d4a7045e 100644 (file)
@@ -5,7 +5,7 @@
 
 menuconfig LOGO
        bool "Bootup logo"
-       depends on FB || SGI_NEWPORT_CONSOLE
+       depends on FB_CORE || SGI_NEWPORT_CONSOLE
        help
          Enable and select frame buffer bootup logos.
 
index 9784a77fa3c990edfa6b551497e659c1d907a5a4..76f6f26265a3b8ba4b7a192bd6d4cdf0ba867d10 100644 (file)
@@ -303,6 +303,8 @@ static struct device_node *xen_dt_get_node(struct device *dev)
                while (!pci_is_root_bus(bus))
                        bus = bus->parent;
 
+               if (!bus->bridge->parent)
+                       return NULL;
                return of_node_get(bus->bridge->parent->of_node);
        }
 
index 2a29943fa5cc41e8b5580d5b8c08d9672bedada4..cfad1eac7fd9df4663a33c0f52dae2cb90af6e9c 100644 (file)
@@ -148,7 +148,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
                *maptype = 0;
                return inpage;
        }
-       kunmap_atomic(inpage);
+       kunmap_local(inpage);
        might_sleep();
        src = erofs_vm_map_ram(rq->in, ctx->inpages);
        if (!src)
@@ -162,7 +162,7 @@ docopy:
        src = erofs_get_pcpubuf(ctx->inpages);
        if (!src) {
                DBG_BUGON(1);
-               kunmap_atomic(inpage);
+               kunmap_local(inpage);
                return ERR_PTR(-EFAULT);
        }
 
@@ -173,9 +173,9 @@ docopy:
                        min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
 
                if (!inpage)
-                       inpage = kmap_atomic(*in);
+                       inpage = kmap_local_page(*in);
                memcpy(tmp, inpage + *inputmargin, page_copycnt);
-               kunmap_atomic(inpage);
+               kunmap_local(inpage);
                inpage = NULL;
                tmp += page_copycnt;
                total -= page_copycnt;
@@ -214,7 +214,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
        int ret, maptype;
 
        DBG_BUGON(*rq->in == NULL);
-       headpage = kmap_atomic(*rq->in);
+       headpage = kmap_local_page(*rq->in);
 
        /* LZ4 decompression inplace is only safe if zero_padding is enabled */
        if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
@@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
                                min_t(unsigned int, rq->inputsize,
                                      rq->sb->s_blocksize - rq->pageofs_in));
                if (ret) {
-                       kunmap_atomic(headpage);
+                       kunmap_local(headpage);
                        return ret;
                }
                may_inplace = !((rq->pageofs_in + rq->inputsize) &
@@ -261,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
        }
 
        if (maptype == 0) {
-               kunmap_atomic(headpage);
+               kunmap_local(headpage);
        } else if (maptype == 1) {
                vm_unmap_ram(src, ctx->inpages);
        } else if (maptype == 2) {
@@ -289,7 +289,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
        /* one optimized fast path only for non bigpcluster cases yet */
        if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
                DBG_BUGON(!*rq->out);
-               dst = kmap_atomic(*rq->out);
+               dst = kmap_local_page(*rq->out);
                dst_maptype = 0;
                goto dstmap_out;
        }
@@ -311,7 +311,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
 dstmap_out:
        ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
        if (!dst_maptype)
-               kunmap_atomic(dst);
+               kunmap_local(dst);
        else if (dst_maptype == 2)
                vm_unmap_ram(dst, ctx.outpages);
        return ret;
@@ -328,7 +328,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
        const unsigned int lefthalf = rq->outputsize - righthalf;
        const unsigned int interlaced_offset =
                rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
-       unsigned char *src, *dst;
+       u8 *src;
 
        if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
                DBG_BUGON(1);
@@ -341,22 +341,19 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
        }
 
        src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
-       if (rq->out[0]) {
-               dst = kmap_local_page(rq->out[0]);
-               memcpy(dst + rq->pageofs_out, src + interlaced_offset,
-                      righthalf);
-               kunmap_local(dst);
-       }
+       if (rq->out[0])
+               memcpy_to_page(rq->out[0], rq->pageofs_out,
+                              src + interlaced_offset, righthalf);
 
        if (outpages > inpages) {
                DBG_BUGON(!rq->out[outpages - 1]);
                if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
-                       dst = kmap_local_page(rq->out[outpages - 1]);
-                       memcpy(dst, interlaced_offset ? src :
-                                       (src + righthalf), lefthalf);
-                       kunmap_local(dst);
+                       memcpy_to_page(rq->out[outpages - 1], 0, src +
+                                       (interlaced_offset ? 0 : righthalf),
+                                      lefthalf);
                } else if (!interlaced_offset) {
                        memmove(src, src + righthalf, lefthalf);
+                       flush_dcache_page(rq->in[inpages - 1]);
                }
        }
        kunmap_local(src);
index d70b12b81507fad3d601744b3fa55a6fd2212ead..e12592727a546813e75469751f5a22a9c966c697 100644 (file)
@@ -183,7 +183,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
 
        inode->i_flags &= ~S_DAX;
        if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
-           vi->datalayout == EROFS_INODE_FLAT_PLAIN)
+           (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
+            vi->datalayout == EROFS_INODE_CHUNK_BASED))
                inode->i_flags |= S_DAX;
 
        if (!nblks)
index 5f1890e309c6ebaeebc4672a1665a4ec65043a53..b69d89a11dd03bd78dc29d5d7e74767dee7e5bdd 100644 (file)
@@ -1035,7 +1035,7 @@ hitted:
         */
        tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
 
-       cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+       cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
                zero_user_segment(page, cur, end);
                goto next_part;
@@ -1841,7 +1841,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
        }
 
        cur = map->m_la + map->m_llen - 1;
-       while (cur >= end) {
+       while ((cur >= end) && (cur < i_size_read(inode))) {
                pgoff_t index = cur >> PAGE_SHIFT;
                struct page *page;
 
index b5808fe3469a21002acacd36ed474a72ba316206..e5eec6d38d028802eda2ab6fe0de6d0c08662502 100644 (file)
@@ -532,7 +532,7 @@ struct smb_version_operations {
        /* Check for STATUS_IO_TIMEOUT */
        bool (*is_status_io_timeout)(char *buf);
        /* Check for STATUS_NETWORK_NAME_DELETED */
-       void (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
+       bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
 };
 
 struct smb_version_values {
index 19f7385abeeccdba500799fc0a8ac13613c8c262..9dee267f18938d4b45678a50c050e89d05955b2d 100644 (file)
@@ -3184,7 +3184,7 @@ setAclRetry:
        param_offset = offsetof(struct smb_com_transaction2_spi_req,
                                InformationLevel) - 4;
        offset = param_offset + params;
-       parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
+       parm_data = ((char *)pSMB) + sizeof(pSMB->hdr.smb_buf_length) + offset;
        pSMB->ParameterOffset = cpu_to_le16(param_offset);
 
        /* convert to on the wire format for POSIX ACL */
index 85dd1b373974a1500ed462ab9f292aa686991ab1..9280e253bf09267d54801236fc8287d8e7bad1c3 100644 (file)
@@ -60,7 +60,7 @@ extern bool disable_legacy_dialects;
 #define TLINK_IDLE_EXPIRE      (600 * HZ)
 
 /* Drop the connection to not overload the server */
-#define NUM_STATUS_IO_TIMEOUT   5
+#define MAX_STATUS_IO_TIMEOUT   5
 
 static int ip_connect(struct TCP_Server_Info *server);
 static int generic_ip_connect(struct TCP_Server_Info *server);
@@ -1117,6 +1117,7 @@ cifs_demultiplex_thread(void *p)
        struct mid_q_entry *mids[MAX_COMPOUND];
        char *bufs[MAX_COMPOUND];
        unsigned int noreclaim_flag, num_io_timeout = 0;
+       bool pending_reconnect = false;
 
        noreclaim_flag = memalloc_noreclaim_save();
        cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
@@ -1156,6 +1157,8 @@ cifs_demultiplex_thread(void *p)
                cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
                if (!is_smb_response(server, buf[0]))
                        continue;
+
+               pending_reconnect = false;
 next_pdu:
                server->pdu_size = pdu_length;
 
@@ -1213,10 +1216,13 @@ next_pdu:
                if (server->ops->is_status_io_timeout &&
                    server->ops->is_status_io_timeout(buf)) {
                        num_io_timeout++;
-                       if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
-                               cifs_reconnect(server, false);
+                       if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
+                               cifs_server_dbg(VFS,
+                                               "Number of request timeouts exceeded %d. Reconnecting",
+                                               MAX_STATUS_IO_TIMEOUT);
+
+                               pending_reconnect = true;
                                num_io_timeout = 0;
-                               continue;
                        }
                }
 
@@ -1226,9 +1232,14 @@ next_pdu:
                        if (mids[i] != NULL) {
                                mids[i]->resp_buf_size = server->pdu_size;
 
-                               if (bufs[i] && server->ops->is_network_name_deleted)
-                                       server->ops->is_network_name_deleted(bufs[i],
-                                                                       server);
+                               if (bufs[i] != NULL) {
+                                       if (server->ops->is_network_name_deleted &&
+                                           server->ops->is_network_name_deleted(bufs[i],
+                                                                                server)) {
+                                               cifs_server_dbg(FYI,
+                                                               "Share deleted. Reconnect needed");
+                                       }
+                               }
 
                                if (!mids[i]->multiRsp || mids[i]->multiEnd)
                                        mids[i]->callback(mids[i]);
@@ -1263,6 +1274,11 @@ next_pdu:
                        buf = server->smallbuf;
                        goto next_pdu;
                }
+
+               /* do this reconnect at the very end after processing all MIDs */
+               if (pending_reconnect)
+                       cifs_reconnect(server, true);
+
        } /* end while !EXITING */
 
        /* buffer usually freed in free_mid - need to free it here on exit */
index 1403a2d1ab173bd19c710b72e2cb58c588df9104..df3fd3b720dac3f2cdde9c7bff587f51d33438a1 100644 (file)
@@ -66,6 +66,12 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
        return rc;
 }
 
+/*
+ * Track individual DFS referral servers used by new DFS mount.
+ *
+ * On success, their lifetime will be shared by final tcon (dfs_ses_list).
+ * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
+ */
 static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
 {
        struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
@@ -80,11 +86,12 @@ static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
                INIT_LIST_HEAD(&root_ses->list);
 
                spin_lock(&cifs_tcp_ses_lock);
-               ses->ses_count++;
+               cifs_smb_ses_inc_refcount(ses);
                spin_unlock(&cifs_tcp_ses_lock);
                root_ses->ses = ses;
                list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
        }
+       /* Select new DFS referral server so that new referrals go through it */
        ctx->dfs_root_ses = ses;
        return 0;
 }
@@ -242,7 +249,6 @@ out:
 int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
 {
        struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
-       struct cifs_ses *ses;
        bool nodfs = ctx->nodfs;
        int rc;
 
@@ -276,20 +282,8 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
        }
 
        *isdfs = true;
-       /*
-        * Prevent DFS root session of being put in the first call to
-        * cifs_mount_put_conns().  If another DFS root server was not found
-        * while chasing the referrals (@ctx->dfs_root_ses == @ses), then we
-        * can safely put extra refcount of @ses.
-        */
-       ses = mnt_ctx->ses;
-       mnt_ctx->ses = NULL;
-       mnt_ctx->server = NULL;
-       rc = __dfs_mount_share(mnt_ctx);
-       if (ses == ctx->dfs_root_ses)
-               cifs_put_smb_ses(ses);
-
-       return rc;
+       add_root_smb_session(mnt_ctx);
+       return __dfs_mount_share(mnt_ctx);
 }
 
 /* Update dfs referral path of superblock */
index 879bc8e6555c11c904f1da92928ddfb97cd2e4c8..fc5acc95cd13f9ea30223985b9b9132f76f578c5 100644 (file)
@@ -1080,8 +1080,8 @@ int cifs_close(struct inode *inode, struct file *file)
                cfile = file->private_data;
                file->private_data = NULL;
                dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
-               if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
-                   cinode->lease_granted &&
+               if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG)
+                   && cinode->lease_granted &&
                    !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
                    dclose) {
                        if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
index 87abce010974403ba9ece93313e336baf06512e9..0f62bc373ad05cdf84ab2da8e12c18d1c134eeac 100644 (file)
@@ -2395,7 +2395,7 @@ smb2_is_status_io_timeout(char *buf)
                return false;
 }
 
-static void
+static bool
 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
 {
        struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
@@ -2404,7 +2404,7 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
        struct cifs_tcon *tcon;
 
        if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
-               return;
+               return false;
 
        /* If server is a channel, select the primary channel */
        pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
@@ -2419,11 +2419,13 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
                                spin_unlock(&cifs_tcp_ses_lock);
                                pr_warn_once("Server share %s deleted.\n",
                                             tcon->tree_name);
-                               return;
+                               return true;
                        }
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
+
+       return false;
 }
 
 static int
index c6db898dab7c437fe19fe795acd8531a79c7ac23..7676091b3e77a49d70a23dabc156fda67a458f7b 100644 (file)
@@ -160,7 +160,7 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
                        spin_unlock(&ses->ses_lock);
                        continue;
                }
-               ++ses->ses_count;
+               cifs_smb_ses_inc_refcount(ses);
                spin_unlock(&ses->ses_lock);
                return ses;
        }
index 0587354ba678229d772780bf682b655c4cc914e7..9c59409104f69830fe229ef8c32df24cb9c03e07 100644 (file)
                *(.text.unlikely .text.unlikely.*)                      \
                *(.text.unknown .text.unknown.*)                        \
                NOINSTR_TEXT                                            \
-               *(.text..refcount)                                      \
                *(.ref.text)                                            \
                *(.text.asan.* .text.tsan.*)                            \
        MEM_KEEP(init.text*)                                            \
index f668e75fbabe6191e7fb33dd032df6321b5b8679..6a46baa0737cd082927bb85afe9cf3434969a24b 100644 (file)
@@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
                            bool force, bool disabled, bool rxsense);
 void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
 
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
 #endif /* __IMX_HDMI_H__ */
index bf964cdfb3300d98365be6ae487f8d69d2d71f47..c339fc85fd076dd20f06d0f72397023c36fa50d5 100644 (file)
@@ -36,6 +36,7 @@ struct drm_bridge;
 struct drm_bridge_timings;
 struct drm_connector;
 struct drm_display_info;
+struct drm_minor;
 struct drm_panel;
 struct edid;
 struct i2c_adapter;
@@ -949,4 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
 }
 #endif
 
+void drm_bridge_debugfs_init(struct drm_minor *minor);
+
 #endif
index 8e1cbc75143ef2161be189b7dbfc3cb63260026d..8b48a1974da3143c7de176e6fe3e01da9c8fc9d8 100644 (file)
@@ -77,11 +77,6 @@ struct drm_plane_helper_funcs;
  * intended to indicate whether a full modeset is needed, rather than strictly
  * describing what has changed in a commit. See also:
  * drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
  */
 struct drm_crtc_state {
        /** @crtc: backpointer to the CRTC */
index 7616f457ce7007c51718c9bbfd092cf72ecfea2d..cb2c1956a214a6659131a76afefa87df891e456d 100644 (file)
 
 #include <linux/types.h>
 #include <linux/seq_file.h>
+
+#include <drm/drm_gpuva_mgr.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
 /**
  * struct drm_info_list - debugfs info list entry
  *
@@ -134,6 +150,9 @@ void drm_debugfs_add_file(struct drm_device *dev, const char *name,
 
 void drm_debugfs_add_files(struct drm_device *dev,
                           const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+                          struct drm_gpuva_manager *mgr);
 #else
 static inline void drm_debugfs_create_files(const struct drm_info_list *files,
                                            int count, struct dentry *root,
@@ -155,6 +174,12 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
                                         const struct drm_debugfs_info *files,
                                         int count)
 {}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+                                        struct drm_gpuva_manager *mgr)
+{
+       return 0;
+}
 #endif
 
 #endif /* _DRM_DEBUGFS_H_ */
index 89e2706cac5611a68afa345056d2b3ebf0412f55..9813fa759b75d4dd39350a6eb687fbf455a64aed 100644 (file)
@@ -104,6 +104,12 @@ enum drm_driver_feature {
         * acceleration should be handled by two drivers that are connected using auxiliary bus.
         */
        DRIVER_COMPUTE_ACCEL            = BIT(7),
+       /**
+        * @DRIVER_GEM_GPUVA:
+        *
+        * Driver supports user defined GPU VA bindings for GEM objects.
+        */
+       DRIVER_GEM_GPUVA                = BIT(8),
 
        /* IMPORTANT: Below are all the legacy flags, add new ones above. */
 
@@ -304,22 +310,14 @@ struct drm_driver {
        /**
         * @prime_handle_to_fd:
         *
-        * Main PRIME export function. Should be implemented with
-        * drm_gem_prime_handle_to_fd() for GEM based drivers.
-        *
-        * For an in-depth discussion see :ref:`PRIME buffer sharing
-        * documentation <prime_buffer_sharing>`.
+        * PRIME export function. Only used by vmwgfx.
         */
        int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
                                uint32_t handle, uint32_t flags, int *prime_fd);
        /**
         * @prime_fd_to_handle:
         *
-        * Main PRIME import function. Should be implemented with
-        * drm_gem_prime_fd_to_handle() for GEM based drivers.
-        *
-        * For an in-depth discussion see :ref:`PRIME buffer sharing
-        * documentation <prime_buffer_sharing>`.
+        * PRIME import function. Only used by vmwgfx.
         */
        int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
                                int prime_fd, uint32_t *handle);
@@ -343,20 +341,6 @@ struct drm_driver {
                                struct drm_device *dev,
                                struct dma_buf_attachment *attach,
                                struct sg_table *sgt);
-       /**
-        * @gem_prime_mmap:
-        *
-        * mmap hook for GEM drivers, used to implement dma-buf mmap in the
-        * PRIME helpers.
-        *
-        * This hook only exists for historical reasons. Drivers must use
-        * drm_gem_prime_mmap() to implement it.
-        *
-        * FIXME: Convert all drivers to implement mmap in struct
-        * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
-        * its callers. This hook should be removed afterwards.
-        */
-       int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
 
        /**
         * @dumb_create:
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644 (file)
index 0000000..e046236
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT    BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES     BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+       /**
+        * @flags: Flags to control locking behavior
+        */
+       uint32_t                flags;
+
+       /**
+        * @ticket: WW ticket used for acquiring locks
+        */
+       struct ww_acquire_ctx   ticket;
+
+       /**
+        * @num_objects: number of objects locked
+        */
+       unsigned int            num_objects;
+
+       /**
+        * @max_objects: maximum objects in array
+        */
+       unsigned int            max_objects;
+
+       /**
+        * @objects: array of the locked objects
+        */
+       struct drm_gem_object   **objects;
+
+       /**
+        * @contended: contended GEM object we backed off for
+        */
+       struct drm_gem_object   *contended;
+
+       /**
+        * @prelocked: already locked GEM object due to contention
+        */
+       struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj)      \
+       for (index = 0, obj = (exec)->objects[0];               \
+            index < (exec)->num_objects;                       \
+            ++index, obj = (exec)->objects[index])
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec)                                        \
+__PASTE(__drm_exec_, __LINE__):                                                \
+       for (void *__drm_exec_retry_ptr; ({                             \
+               __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+               (void)__drm_exec_retry_ptr;                             \
+               drm_exec_cleanup(exec);                                 \
+       });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec)                     \
+       do {                                                    \
+               if (unlikely(drm_exec_is_contended(exec)))      \
+                       goto *__drm_exec_retry_ptr;             \
+       } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+       return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, uint32_t flags);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+                        unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+                          struct drm_gem_object **objects,
+                          unsigned int num_objects,
+                          unsigned int num_fences);
+
+#endif
index 966912053cb0fea9132138fc38ac9e023686b48f..010239392adfb92477edfe12a95a1275e9b0b1bb 100644 (file)
@@ -50,16 +50,16 @@ struct file;
  * header include loops we need it here for now.
  */
 
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
  * /dev/dri/renderD* numbers).
  *
  * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
  * be implemented before we hit any future
  */
 enum drm_minor_type {
-       DRM_MINOR_PRIMARY,
-       DRM_MINOR_CONTROL,
-       DRM_MINOR_RENDER,
+       DRM_MINOR_PRIMARY = 0,
+       DRM_MINOR_CONTROL = 1,
+       DRM_MINOR_RENDER = 2,
        DRM_MINOR_ACCEL = 32,
 };
 
index bbc721870c13c7872b1e1b385a8550b227319ddc..bc9f6aa2f3fec3b0b9d7adff424928b0e09d3bd8 100644 (file)
@@ -36,6 +36,8 @@
 
 #include <linux/kref.h>
 #include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 
 #include <drm/drm_vma_manager.h>
 
@@ -379,6 +381,22 @@ struct drm_gem_object {
         */
        struct dma_resv _resv;
 
+       /**
+        * @gpuva:
+        *
+        * Provides the list of GPU VAs attached to this GEM object.
+        *
+        * Drivers should lock list accesses with the GEMs &dma_resv lock
+        * (&drm_gem_object.resv) or a custom lock if one is provided.
+        */
+       struct {
+               struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+               struct lockdep_map *lock_dep_map;
+#endif
+       } gpuva;
+
        /**
         * @funcs:
         *
@@ -526,4 +544,68 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
 
 int drm_gem_evict(struct drm_gem_object *obj);
 
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+       if (!WARN((obj)->gpuva.lock_dep_map, \
+                 "GEM GPUVA lock should be set only once.")) \
+               (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+       lockdep_assert((obj)->gpuva.lock_dep_map ? \
+                      lock_is_held((obj)->gpuva.lock_dep_map) : \
+                      dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuva list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+       INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuva_manager.
+ */
+#define drm_gem_for_each_gpuva(entry__, obj__) \
+       list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+
+/**
+ * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
+ * gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
+       list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+
 #endif /* __DRM_GEM_H__ */
index 8a043235dad81f45b6822c4c7db7bd6587db7f5c..a827bde494f610fe9afa03229cd5001f852a01e8 100644 (file)
@@ -166,11 +166,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
  * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
  */
 #define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
-       .dumb_create            = (dumb_create_func), \
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd, \
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle, \
-       .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
-       .gem_prime_mmap         = drm_gem_prime_mmap
+       .dumb_create               = (dumb_create_func), \
+       .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
 
 /**
  * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
@@ -204,11 +201,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
  * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
  */
 #define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
-       .dumb_create            = dumb_create_func, \
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd, \
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle, \
-       .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
-       .gem_prime_mmap         = drm_gem_prime_mmap
+       .dumb_create               = (dumb_create_func), \
+       .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
 
 /**
  * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
index 5994fed5e3278dd11311d4cb4812b23b4cf43abb..bf0c31aa8fbe4f01be1660565c8190eb11c3fbb7 100644 (file)
@@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
         */
        struct drm_gem_object base;
 
-       /**
-        * @pages_lock: Protects the page table and use count
-        */
-       struct mutex pages_lock;
-
        /**
         * @pages: Page table
         */
@@ -65,11 +60,6 @@ struct drm_gem_shmem_object {
         */
        struct sg_table *sgt;
 
-       /**
-        * @vmap_lock: Protects the vmap address and use count
-        */
-       struct mutex vmap_lock;
-
        /**
         * @vaddr: Kernel virtual address of the backing memory
         */
@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
 
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
                !shmem->base.dma_buf && !shmem->base.import_attach;
 }
 
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
 
 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -290,10 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
  * the &drm_driver structure.
  */
 #define DRM_GEM_SHMEM_DRIVER_OPS \
-       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd, \
-       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle, \
        .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
-       .gem_prime_mmap         = drm_gem_prime_mmap, \
-       .dumb_create            = drm_gem_shmem_dumb_create
+       .dumb_create               = drm_gem_shmem_dumb_create
 
 #endif /* __DRM_GEM_SHMEM_HELPER_H__ */
index f4aab64411d82dbbab324f02766d6982663ef723..e18429f09e5373f921bd3e7309b4b21f0c9ec251 100644 (file)
@@ -157,12 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
  * &struct drm_driver with default functions.
  */
 #define DRM_GEM_VRAM_DRIVER \
-       .debugfs_init             = drm_vram_mm_debugfs_init, \
-       .dumb_create              = drm_gem_vram_driver_dumb_create, \
-       .dumb_map_offset          = drm_gem_ttm_dumb_map_offset, \
-       .gem_prime_mmap           = drm_gem_prime_mmap, \
-       .prime_handle_to_fd       = drm_gem_prime_handle_to_fd, \
-       .prime_fd_to_handle       = drm_gem_prime_fd_to_handle
+       .debugfs_init    = drm_vram_mm_debugfs_init, \
+       .dumb_create     = drm_gem_vram_driver_dumb_create, \
+       .dumb_map_offset = drm_gem_ttm_dumb_map_offset
 
 /*
  *  VRAM memory manager
diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h
new file mode 100644 (file)
index 0000000..ed8d502
--- /dev/null
@@ -0,0 +1,706 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __DRM_GPUVA_MGR_H__
+#define __DRM_GPUVA_MGR_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+
+struct drm_gpuva_manager;
+struct drm_gpuva_fn_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+       /**
+        * @DRM_GPUVA_INVALIDATED:
+        *
+        * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+        */
+       DRM_GPUVA_INVALIDATED = (1 << 0),
+
+       /**
+        * @DRM_GPUVA_SPARSE:
+        *
+        * Flag indicating that the &drm_gpuva is a sparse mapping.
+        */
+       DRM_GPUVA_SPARSE = (1 << 1),
+
+       /**
+        * @DRM_GPUVA_USERBITS: user defined bits
+        */
+       DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuva_manager.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+       /**
+        * @mgr: the &drm_gpuva_manager this object is associated with
+        */
+       struct drm_gpuva_manager *mgr;
+
+       /**
+        * @flags: the &drm_gpuva_flags for this mapping
+        */
+       enum drm_gpuva_flags flags;
+
+       /**
+        * @va: structure containing the address and range of the &drm_gpuva
+        */
+       struct {
+               /**
+                * @addr: the start address
+                */
+               u64 addr;
+
+               /*
+                * @range: the range
+                */
+               u64 range;
+       } va;
+
+       /**
+        * @gem: structure containing the &drm_gem_object and it's offset
+        */
+       struct {
+               /**
+                * @offset: the offset within the &drm_gem_object
+                */
+               u64 offset;
+
+               /**
+                * @obj: the mapped &drm_gem_object
+                */
+               struct drm_gem_object *obj;
+
+               /**
+                * @entry: the &list_head to attach this object to a &drm_gem_object
+                */
+               struct list_head entry;
+       } gem;
+
+       /**
+        * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+        */
+       struct {
+               /**
+                * @rb: the rb-tree node
+                */
+               struct rb_node node;
+
+               /**
+                * @entry: The &list_head to additionally connect &drm_gpuvas
+                * in the same order they appear in the interval tree. This is
+                * useful to keep iterating &drm_gpuvas from a start node found
+                * through the rb-tree while doing modifications on the rb-tree
+                * itself.
+                */
+               struct list_head entry;
+
+               /**
+                * @__subtree_last: needed by the interval tree, holding last-in-subtree
+                */
+               u64 __subtree_last;
+       } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
+                                u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+                                      u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
+
+bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
+
+static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
+                                 struct drm_gem_object *obj, u64 offset)
+{
+       va->va.addr = addr;
+       va->va.range = range;
+       va->gem.obj = obj;
+       va->gem.offset = offset;
+}
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+       if (invalidate)
+               va->flags |= DRM_GPUVA_INVALIDATED;
+       else
+               va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+       return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * struct drm_gpuva_manager - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuva_manager {
+       /**
+        * @name: the name of the DRM GPU VA space
+        */
+       const char *name;
+
+       /**
+        * @mm_start: start of the VA space
+        */
+       u64 mm_start;
+
+       /**
+        * @mm_range: length of the VA space
+        */
+       u64 mm_range;
+
+       /**
+        * @rb: structures to track &drm_gpuva entries
+        */
+       struct {
+               /**
+                * @tree: the rb-tree to track GPU VA mappings
+                */
+               struct rb_root_cached tree;
+
+               /**
+                * @list: the &list_head to track GPU VA mappings
+                */
+               struct list_head list;
+       } rb;
+
+       /**
+        * @kernel_alloc_node:
+        *
+        * &drm_gpuva representing the address space cutout reserved for
+        * the kernel
+        */
+       struct drm_gpuva kernel_alloc_node;
+
+       /**
+        * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
+        */
+       const struct drm_gpuva_fn_ops *ops;
+};
+
+void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
+                           const char *name,
+                           u64 start_offset, u64 range,
+                           u64 reserve_offset, u64 reserve_range,
+                           const struct drm_gpuva_fn_ops *ops);
+void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+       if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
+               return list_next_entry(va, rb.entry);
+
+       return NULL;
+}
+
+/**
+ * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuva_manager's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
+       for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
+            va__ && (va__->va.addr < (end__)); \
+            va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
+ * &drm_gpuva_manager's @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
+       for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
+            next__ = __drm_gpuva_next(va__); \
+            va__ && (va__->va.addr < (end__)); \
+            va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager.
+ */
+#define drm_gpuva_for_each_va(va__, mgr__) \
+       list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
+       list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
+ */
+enum drm_gpuva_op_type {
+       /**
+        * @DRM_GPUVA_OP_MAP: the map op type
+        */
+       DRM_GPUVA_OP_MAP,
+
+       /**
+        * @DRM_GPUVA_OP_REMAP: the remap op type
+        */
+       DRM_GPUVA_OP_REMAP,
+
+       /**
+        * @DRM_GPUVA_OP_UNMAP: the unmap op type
+        */
+       DRM_GPUVA_OP_UNMAP,
+
+       /**
+        * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+        */
+       DRM_GPUVA_OP_PREFETCH,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+       /**
+        * @va: structure containing address and range of a map
+        * operation
+        */
+       struct {
+               /**
+                * @addr: the base address of the new mapping
+                */
+               u64 addr;
+
+               /**
+                * @range: the range of the new mapping
+                */
+               u64 range;
+       } va;
+
+       /**
+        * @gem: structure containing the &drm_gem_object and it's offset
+        */
+       struct {
+               /**
+                * @offset: the offset within the &drm_gem_object
+                */
+               u64 offset;
+
+               /**
+                * @obj: the &drm_gem_object to map
+                */
+               struct drm_gem_object *obj;
+       } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+       /**
+        * @va: the &drm_gpuva to unmap
+        */
+       struct drm_gpuva *va;
+
+       /**
+        * @keep:
+        *
+        * Indicates whether this &drm_gpuva is physically contiguous with the
+        * original mapping request.
+        *
+        * Optionally, if &keep is set, drivers may keep the actual page table
+        * mappings for this &drm_gpuva, adding the missing page table entries
+        * only and update the &drm_gpuva_manager accordingly.
+        */
+       bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+       /**
+        * @prev: the preceding part of a split mapping
+        */
+       struct drm_gpuva_op_map *prev;
+
+       /**
+        * @next: the subsequent part of a split mapping
+        */
+       struct drm_gpuva_op_map *next;
+
+       /**
+        * @unmap: the unmap operation for the original existing mapping
+        */
+       struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+       /**
+        * @va: the &drm_gpuva to prefetch
+        */
+       struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+       /**
+        * @entry:
+        *
+        * The &list_head used to distribute instances of this struct within
+        * &drm_gpuva_ops.
+        */
+       struct list_head entry;
+
+       /**
+        * @op: the type of the operation
+        */
+       enum drm_gpuva_op_type op;
+
+       union {
+               /**
+                * @map: the map operation
+                */
+               struct drm_gpuva_op_map map;
+
+               /**
+                * @remap: the remap operation
+                */
+               struct drm_gpuva_op_remap remap;
+
+               /**
+                * @unmap: the unmap operation
+                */
+               struct drm_gpuva_op_unmap unmap;
+
+               /**
+                * @prefetch: the prefetch operation
+                */
+               struct drm_gpuva_op_prefetch prefetch;
+       };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+       /**
+        * @list: the &list_head
+        */
+       struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+       list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+       list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+       list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+       list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+struct drm_gpuva_ops *
+drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+                           u64 addr, u64 range,
+                           struct drm_gem_object *obj, u64 offset);
+struct drm_gpuva_ops *
+drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+                             u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+                                u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+                              struct drm_gem_object *obj);
+
+void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+                       struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+                                         struct drm_gpuva_op_map *op)
+{
+       drm_gpuva_init(va, op->va.addr, op->va.range,
+                      op->gem.obj, op->gem.offset);
+}
+
+/**
+ * struct drm_gpuva_fn_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuva_fn_ops {
+       /**
+        * @op_alloc: called when the &drm_gpuva_manager allocates
+        * a struct drm_gpuva_op
+        *
+        * Some drivers may want to embed struct drm_gpuva_op into driver
+        * specific structures. By implementing this callback drivers can
+        * allocate memory accordingly.
+        *
+        * This callback is optional.
+        */
+       struct drm_gpuva_op *(*op_alloc)(void);
+
+       /**
+        * @op_free: called when the &drm_gpuva_manager frees a
+        * struct drm_gpuva_op
+        *
+        * Some drivers may want to embed struct drm_gpuva_op into driver
+        * specific structures. By implementing this callback drivers can
+        * free the previously allocated memory accordingly.
+        *
+        * This callback is optional.
+        */
+       void (*op_free)(struct drm_gpuva_op *op);
+
+       /**
+        * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
+        * mapping once all previous steps were completed
+        *
+        * The &priv pointer matches the one the driver passed to
+        * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+        *
+        * Can be NULL if &drm_gpuva_sm_map is used.
+        */
+       int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+       /**
+        * @sm_step_remap: called from &drm_gpuva_sm_map and
+        * &drm_gpuva_sm_unmap to split up an existent mapping
+        *
+        * This callback is called when existent mapping needs to be split up.
+        * This is the case when either a newly requested mapping overlaps or
+        * is enclosed by an existent mapping or a partial unmap of an existent
+        * mapping is requested.
+        *
+        * The &priv pointer matches the one the driver passed to
+        * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+        *
+        * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+        * used.
+        */
+       int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+       /**
+        * @sm_step_unmap: called from &drm_gpuva_sm_map and
+        * &drm_gpuva_sm_unmap to unmap an existent mapping
+        *
+        * This callback is called when existent mapping needs to be unmapped.
+        * This is the case when either a newly requested mapping encloses an
+        * existent mapping or an unmap of an existent mapping is requested.
+        *
+        * The &priv pointer matches the one the driver passed to
+        * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+        *
+        * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+        * used.
+        */
+       int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+                    u64 addr, u64 range,
+                    struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+                      u64 addr, u64 range);
+
+void drm_gpuva_map(struct drm_gpuva_manager *mgr,
+                  struct drm_gpuva *va,
+                  struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+                    struct drm_gpuva *next,
+                    struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+#endif /* __DRM_GPUVA_MGR_H__ */
index ed013fdcc1ffb4fb7617dce59c0379f18b19424d..514c8a7a32f0f03d043cee7f16c9391752187795 100644 (file)
@@ -87,5 +87,12 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
                                                      sizeof(_type),            \
                                                      offsetof(_type, _member), \
                                                      _feat))
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+                                   struct drm_device *drm,
+                                   struct drm_modeset_acquire_ctx *ctx);
 
 #endif // DRM_KUNIT_HELPERS_H_
index 965faf082a6d1acbe894ad6f1849fc00e3979b90..e3c3ac615909474bce7163230ed53cb22e1e7919 100644 (file)
@@ -59,8 +59,8 @@ enum mode_set_atomic {
 /**
  * struct drm_crtc_helper_funcs - helper operations for CRTCs
  *
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
  */
 struct drm_crtc_helper_funcs {
        /**
@@ -216,9 +216,7 @@ struct drm_crtc_helper_funcs {
         *
         * This callback is used to update the display mode of a CRTC without
         * changing anything of the primary plane configuration. This fits the
-        * requirement of atomic and hence is used by the atomic helpers. It is
-        * also used by the transitional plane helpers to implement a
-        * @mode_set hook in drm_helper_crtc_mode_set().
+        * requirement of atomic and hence is used by the atomic helpers.
         *
         * Note that the display pipe is completely off when this function is
         * called. Atomic drivers which need hardware to be running before they
@@ -333,8 +331,8 @@ struct drm_crtc_helper_funcs {
         * all updated. Again the recommendation is to just call check helpers
         * until a maximal configuration is reached.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional.
         *
         * NOTE:
         *
@@ -373,8 +371,8 @@ struct drm_crtc_helper_funcs {
         * has picked. See drm_atomic_helper_commit_planes() for a discussion of
         * the tradeoffs and variants of plane commit helpers.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional.
         */
        void (*atomic_begin)(struct drm_crtc *crtc,
                             struct drm_atomic_state *state);
@@ -397,8 +395,8 @@ struct drm_crtc_helper_funcs {
         * has picked. See drm_atomic_helper_commit_planes() for a discussion of
         * the tradeoffs and variants of plane commit helpers.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional.
         */
        void (*atomic_flush)(struct drm_crtc *crtc,
                             struct drm_atomic_state *state);
@@ -507,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
 /**
  * struct drm_encoder_helper_funcs - helper operations for encoders
  *
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
  */
 struct drm_encoder_helper_funcs {
        /**
@@ -1185,8 +1183,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
 /**
  * struct drm_plane_helper_funcs - helper operations for planes
  *
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
  */
 struct drm_plane_helper_funcs {
        /**
@@ -1221,9 +1218,8 @@ struct drm_plane_helper_funcs {
         * The helpers will call @cleanup_fb with matching arguments for every
         * successful call to this hook.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional. See @begin_fb_access
-        * for preparing per-commit resources.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional. See @begin_fb_access for preparing per-commit resources.
         *
         * RETURNS:
         *
@@ -1240,8 +1236,8 @@ struct drm_plane_helper_funcs {
         * This hook is called to clean up any resources allocated for the given
         * framebuffer and plane configuration in @prepare_fb.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional.
         */
        void (*cleanup_fb)(struct drm_plane *plane,
                           struct drm_plane_state *old_state);
@@ -1295,8 +1291,8 @@ struct drm_plane_helper_funcs {
         * all updated. Again the recommendation is to just call check helpers
         * until a maximal configuration is reached.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional.
         *
         * NOTE:
         *
@@ -1326,8 +1322,7 @@ struct drm_plane_helper_funcs {
         * has picked. See drm_atomic_helper_commit_planes() for a discussion of
         * the tradeoffs and variants of plane commit helpers.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional.
+        * This callback is used by the atomic modeset helpers, but it is optional.
         */
        void (*atomic_update)(struct drm_plane *plane,
                              struct drm_atomic_state *state);
@@ -1376,9 +1371,8 @@ struct drm_plane_helper_funcs {
         * has picked. See drm_atomic_helper_commit_planes() for a discussion of
         * the tradeoffs and variants of plane commit helpers.
         *
-        * This callback is used by the atomic modeset helpers and by the
-        * transitional plane helpers, but it is optional. It's intended to
-        * reverse the effects of @atomic_enable.
+        * This callback is used by the atomic modeset helpers, but it is
+        * optional. It's intended to reverse the effects of @atomic_enable.
         */
        void (*atomic_disable)(struct drm_plane *plane,
                               struct drm_atomic_state *state);
index 432fab2347ebf5d56dba149797510ab959c0612a..10015891b056f816c7a992a2052b36fd26943c5b 100644 (file)
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/list.h>
+#include <linux/mutex.h>
 
 struct backlight_device;
 struct dentry;
 struct device_node;
 struct drm_connector;
 struct drm_device;
+struct drm_panel_follower;
 struct drm_panel;
 struct display_timing;
 
@@ -144,6 +146,45 @@ struct drm_panel_funcs {
        void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
 };
 
+struct drm_panel_follower_funcs {
+       /**
+        * @panel_prepared:
+        *
+        * Called after the panel has been powered on.
+        */
+       int (*panel_prepared)(struct drm_panel_follower *follower);
+
+       /**
+        * @panel_unpreparing:
+        *
+        * Called before the panel is powered off.
+        */
+       int (*panel_unpreparing)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+       /**
+        * @funcs:
+        *
+        * Dependent device callbacks; should be initted by the caller.
+        */
+       const struct drm_panel_follower_funcs *funcs;
+
+       /**
+        * @list
+        *
+        * Used for linking into panel's list; set by drm_panel_add_follower().
+        */
+       struct list_head list;
+
+       /**
+        * @panel
+        *
+        * The panel we're dependent on; set by drm_panel_add_follower().
+        */
+       struct drm_panel *panel;
+};
+
 /**
  * struct drm_panel - DRM panel object
  */
@@ -189,6 +230,20 @@ struct drm_panel {
         */
        struct list_head list;
 
+       /**
+        * @followers:
+        *
+        * A list of struct drm_panel_follower dependent on this panel.
+        */
+       struct list_head followers;
+
+       /**
+        * @follower_lock:
+        *
+        * Lock for followers list.
+        */
+       struct mutex follower_lock;
+
        /**
         * @prepare_prev_first:
         *
@@ -198,6 +253,20 @@ struct drm_panel {
         * the panel is powered up.
         */
        bool prepare_prev_first;
+
+       /**
+        * @prepared:
+        *
+        * If true then the panel has been prepared.
+        */
+       bool prepared;
+
+       /**
+        * @enabled:
+        *
+        * If true then the panel has been enabled.
+        */
+       bool enabled;
 };
 
 void drm_panel_init(struct drm_panel *panel, struct device *dev,
@@ -232,6 +301,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np,
 }
 #endif
 
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+                          struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+                               struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+       return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+                                        struct drm_panel_follower *follower)
+{
+       return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+                                             struct drm_panel_follower *follower)
+{
+       return -ENODEV;
+}
+#endif
+
 #if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
        (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
 int drm_panel_of_backlight(struct drm_panel *panel);
index 51291983ea445dd02097affcf3f5caf9393f0dbb..79d62856defbf781c81c2c7f19365d3f2da56457 100644 (file)
@@ -56,7 +56,7 @@ struct drm_plane_state {
        /**
         * @crtc:
         *
-        * Currently bound CRTC, NULL if disabled. Do not this write directly,
+        * Currently bound CRTC, NULL if disabled. Do not write this directly,
         * use drm_atomic_set_crtc_for_plane()
         */
        struct drm_crtc *crtc;
index 2a1d01e5b56b8c626e558c828a4c01252f3b0365..a7abf9f3e69729e00c3572003d86d1d98d399924 100644 (file)
@@ -60,19 +60,12 @@ enum dma_data_direction;
 
 struct drm_device;
 struct drm_gem_object;
-struct drm_file;
 
 /* core prime functions */
 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
                                      struct dma_buf_export_info *exp_info);
 void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
 
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
-                              struct drm_file *file_priv, int prime_fd, uint32_t *handle);
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
-                              struct drm_file *file_priv, uint32_t handle, uint32_t flags,
-                              int *prime_fd);
-
 /* helper functions for exporting */
 int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach);
index 6cf7243a1dc5e1ed53f99cf6dafda44a76b55ab8..b40052132e5238bf4a59256783f15aa6f5f8945e 100644 (file)
@@ -54,7 +54,11 @@ struct drm_syncobj {
         */
        struct list_head cb_list;
        /**
-        * @lock: Protects &cb_list and write-locks &fence.
+        * @ev_fd_list: List of registered eventfd.
+        */
+       struct list_head ev_fd_list;
+       /**
+        * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
         */
        spinlock_t lock;
        /**
index 6273cac44e479efe8828feebb8608f9ac9329599..96a5d858404b078ed2ff614e8dbf99f465ffee8a 100644 (file)
@@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev);
 
 void drm_sysfs_hotplug_event(struct drm_device *dev);
 void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
-                                     struct drm_property *property);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+                                       struct drm_property *property);
 #endif
index e95b4837e5a373f1dfcc94d6903c86bed722bf71..f9544d9b670d33aee0114b85d5946684128d8175 100644 (file)
@@ -583,15 +583,14 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 int drm_sched_entity_error(struct drm_sched_entity *entity);
 
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
-                               struct dma_fence *fence);
 struct drm_sched_fence *drm_sched_fence_alloc(
        struct drm_sched_entity *s_entity, void *owner);
 void drm_sched_fence_init(struct drm_sched_fence *fence,
                          struct drm_sched_entity *entity);
 void drm_sched_fence_free(struct drm_sched_fence *fence);
 
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+                              struct dma_fence *parent);
 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
 
 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
index 087e3f649c52f02d964deca3db3d823b1d62d9fc..f6e6ed52968133d2058c60c36c21f29aae097f1b 100644 (file)
@@ -24,8 +24,8 @@
 #include <linux/atomic.h>
 
 /*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
  */
 
 
index 8b113c3842364a5d52edcb6373b87a5b3d5368a8..0223a41a64b2427186243b778d1b45d9e9b8ea43 100644 (file)
@@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 void ttm_bo_put(struct ttm_buffer_object *bo);
 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
                          struct ttm_lru_bulk_move *bulk);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                              const struct ttm_place *place);
 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
index e6802b69cdd641cee67914a5ae39b3496b28022f..90ab33cb5d0efac8afeb9d498851f9d2ef91d7ed 100644 (file)
@@ -111,6 +111,7 @@ struct blk_crypto_profile {
         * keyslots while ensuring that they can't be changed concurrently.
         */
        struct rw_semaphore lock;
+       struct lock_class_key lockdep_key;
 
        /* List of idle slots, with least recently used slot at front */
        wait_queue_head_t idle_slots_wait_queue;
index 2b7fb8e87793caa7d1a7cf6b5be972ed2d959a2b..b96e00499f9eee3eaa8bc2757bd7c096ce4d86a0 100644 (file)
@@ -158,13 +158,13 @@ struct request {
 
        /*
         * The rb_node is only used inside the io scheduler, requests
-        * are pruned when moved to the dispatch queue. So let the
-        * completion_data share space with the rb_node.
+        * are pruned when moved to the dispatch queue. special_vec must
+        * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
+        * insert into an IO scheduler.
         */
        union {
                struct rb_node rb_node; /* sort/lookup */
                struct bio_vec special_vec;
-               void *completion_data;
        };
 
        /*
index d54b595a0fe0f9220da25ab6e809a454ff615432..0d678e9a7b2482fe05fb884f0694347dead82897 100644 (file)
@@ -606,7 +606,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
 
 struct dma_fence *dma_fence_get_stub(void);
-struct dma_fence *dma_fence_allocate_private_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
 u64 dma_fence_context_alloc(unsigned num);
 
 extern const struct dma_fence_ops dma_fence_array_ops;
index ab088c662e88d07bffc308a06e4957432e2060e8..c31a9895ecb8b5aff355034eb05937a3ebe1dc84 100644 (file)
 #include <linux/range.h>
 #include <linux/reboot.h>
 #include <linux/uuid.h>
-#include <linux/screen_info.h>
 
 #include <asm/page.h>
 
+struct screen_info;
+
 #define EFI_SUCCESS            0
 #define EFI_LOAD_ERROR         ( 1 | (1UL << (BITS_PER_LONG-1)))
 #define EFI_INVALID_PARAMETER  ( 2 | (1UL << (BITS_PER_LONG-1)))
index ce7d588edc3e64cae9f89e361b5f0e3638d46164..16c3e6d6c55d34c7d263d5c807e50eecc577072d 100644 (file)
@@ -383,7 +383,6 @@ struct fb_tile_ops {
 #endif /* CONFIG_FB_TILEBLITTING */
 
 /* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT         0
 #define FBINFO_HWACCEL_DISABLED        0x0002
        /* When FBINFO_HWACCEL_DISABLED is set:
         *  Hardware acceleration is turned off.  Software implementations
@@ -481,7 +480,9 @@ struct fb_info {
 
        const struct fb_ops *fbops;
        struct device *device;          /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
        struct device *dev;             /* This is this fb device */
+#endif
        int class_flag;                    /* private sysfs flags */
 #ifdef CONFIG_FB_TILEBLITTING
        struct fb_tile_ops *tileops;    /* Tile Blitting */
@@ -502,8 +503,6 @@ struct fb_info {
        bool skip_vt_switch; /* no VT switch on suspend/resume required */
 };
 
-#define FBINFO_FLAG_DEFAULT    FBINFO_DEFAULT
-
 /* This will go away
  * fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
  * when it wants to turn the acceleration engine on.  This is
@@ -527,7 +526,7 @@ extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
 extern int fb_blank(struct fb_info *info, int blank);
 
 /*
- * Drawing operations where framebuffer is in I/O memory
+ * Helpers for framebuffers in I/O memory
  */
 
 extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -538,29 +537,25 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
 extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
                           size_t count, loff_t *ppos);
 
-/*
- * Initializes struct fb_ops for framebuffers in I/O memory.
- */
-
-#define __FB_DEFAULT_IO_OPS_RDWR \
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
        .fb_read        = fb_io_read, \
        .fb_write       = fb_io_write
 
-#define __FB_DEFAULT_IO_OPS_DRAW \
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
        .fb_fillrect    = cfb_fillrect, \
        .fb_copyarea    = cfb_copyarea, \
        .fb_imageblit   = cfb_imageblit
 
-#define __FB_DEFAULT_IO_OPS_MMAP \
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
        .fb_mmap        = NULL /* default implementation */
 
-#define FB_DEFAULT_IO_OPS \
-       __FB_DEFAULT_IO_OPS_RDWR, \
-       __FB_DEFAULT_IO_OPS_DRAW, \
-       __FB_DEFAULT_IO_OPS_MMAP
+#define FB_DEFAULT_IOMEM_OPS \
+       __FB_DEFAULT_IOMEM_OPS_RDWR, \
+       __FB_DEFAULT_IOMEM_OPS_DRAW, \
+       __FB_DEFAULT_IOMEM_OPS_MMAP
 
 /*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
  */
 
 extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -571,27 +566,28 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
 extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
                            size_t count, loff_t *ppos);
 
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+       .fb_read        = fb_sys_read, \
+       .fb_write       = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+       .fb_fillrect    = sys_fillrect, \
+       .fb_copyarea    = sys_copyarea, \
+       .fb_imageblit   = sys_imageblit
+
 /*
- * Initializes struct fb_ops for framebuffers in system memory.
+ * Helpers for framebuffers in DMA-able memory
  */
 
-#define __FB_DEFAULT_SYS_OPS_RDWR \
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
        .fb_read        = fb_sys_read, \
        .fb_write       = fb_sys_write
 
-#define __FB_DEFAULT_SYS_OPS_DRAW \
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
        .fb_fillrect    = sys_fillrect, \
        .fb_copyarea    = sys_copyarea, \
        .fb_imageblit   = sys_imageblit
 
-#define __FB_DEFAULT_SYS_OPS_MMAP \
-       .fb_mmap        = NULL /* default implementation */
-
-#define FB_DEFAULT_SYS_OPS \
-       __FB_DEFAULT_SYS_OPS_RDWR, \
-       __FB_DEFAULT_SYS_OPS_DRAW, \
-       __FB_DEFAULT_SYS_OPS_MMAP
-
 /* drivers/video/fbmem.c */
 extern int register_framebuffer(struct fb_info *fb_info);
 extern void unregister_framebuffer(struct fb_info *fb_info);
@@ -609,7 +605,6 @@ extern int fb_new_modelist(struct fb_info *info);
 
 extern bool fb_center_logo;
 extern int fb_logo_count;
-extern struct class *fb_class;
 
 static inline void lock_fb_info(struct fb_info *info)
 {
@@ -687,11 +682,11 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
                __damage_area(info, image->dx, image->dy, image->width, image->height); \
        }
 
-#define FB_GEN_DEFAULT_DEFERRED_IO_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
        __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
        __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
 
-#define FB_GEN_DEFAULT_DEFERRED_SYS_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
        __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
        __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
 
@@ -735,11 +730,8 @@ static inline bool fb_be_math(struct fb_info *info)
 #endif /* CONFIG_FB_FOREIGN_ENDIAN */
 }
 
-/* drivers/video/fbsysfs.c */
 extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
 extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
 extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
 
 /* drivers/video/fbmon.c */
index 8e59bd9541532ad9e66be003fb4f8695bcbfef52..ce156c7704ee5435ee1e14cb1a218b710e9947f2 100644 (file)
@@ -41,6 +41,15 @@ struct ftrace_ops;
 struct ftrace_regs;
 struct dyn_ftrace;
 
+char *arch_ftrace_match_adjust(char *str, const char *search);
+
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+struct fgraph_ret_regs;
+unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+#endif
+
 #ifdef CONFIG_FUNCTION_TRACER
 /*
  * If the arch's mcount caller does not support all of ftrace's
index ea2bcdae7401235b2ea9050da240b259440a39c5..9a4c204df3da1d7e55d5277a8c31b8f4a60593b3 100644 (file)
@@ -44,9 +44,7 @@ struct ism_dev {
        u64 local_gid;
        int ieq_idx;
 
-       atomic_t free_clients_cnt;
-       atomic_t add_dev_cnt;
-       wait_queue_head_t waitq;
+       struct ism_client *subs[MAX_CLIENTS];
 };
 
 struct ism_event {
@@ -68,9 +66,6 @@ struct ism_client {
         */
        void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
        /* Private area - don't touch! */
-       struct work_struct remove_work;
-       struct work_struct add_work;
-       struct ism_dev *tgt_ism;
        u8 id;
 };
 
index 182b6d614eb19d18cf8956dcbfcc375ded34be92..26dd3f859d9d7e70e2541bed407e7820bc918bd4 100644 (file)
@@ -473,7 +473,7 @@ struct nvme_id_ns_nvm {
 };
 
 enum {
-       NVME_ID_NS_NVM_STS_MASK         = 0x3f,
+       NVME_ID_NS_NVM_STS_MASK         = 0x7f,
        NVME_ID_NS_NVM_GUARD_SHIFT      = 7,
        NVME_ID_NS_NVM_GUARD_MASK       = 0x3,
 };
index 54a06a4d2618652a1ca3a64921f623cc886d2171..596ca4f95cfa45bc168216e8a72cba491534735a 100644 (file)
@@ -8,7 +8,7 @@
 struct device;
 
 struct bd6107_platform_data {
-       struct device *fbdev;
+       struct device *dev;
        unsigned int def_value;
 };
 
index 1a8b5b1946fe4da626309afcee63ea6e1af45f44..323fbf5f7613940a5592a5042bf44ea26f892ee0 100644 (file)
@@ -8,7 +8,7 @@
 struct device;
 
 struct gpio_backlight_platform_data {
-       struct device *fbdev;
+       struct device *dev;
 };
 
 #endif
index c9da8d4027504fbd7b45e72c10d93b4b489bea97..95d85c1394bcaf2e9e64e78e7bac115cd74825bf 100644 (file)
@@ -8,7 +8,7 @@
 struct device;
 
 struct lv5207lp_platform_data {
-       struct device *fbdev;
+       struct device *dev;
        unsigned int max_value;
        unsigned int def_value;
 };
index ab26200c28033314f35cd6bd996cda4d9eedb8dd..e0745873e3f26cf4c471aac72c0e1ed373fbb662 100644 (file)
@@ -23,8 +23,9 @@ void psi_memstall_enter(unsigned long *flags);
 void psi_memstall_leave(unsigned long *flags);
 
 int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
-                       char *buf, enum psi_res res, struct file *file);
+struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
+                                      enum psi_res res, struct file *file,
+                                      struct kernfs_open_file *of);
 void psi_trigger_destroy(struct psi_trigger *t);
 
 __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
index 040c089581c6c035197f98ae183ae4c138b38552..f1fd3a8044e0eca4fcea4487243bd6b0e438aaf4 100644 (file)
@@ -137,6 +137,9 @@ struct psi_trigger {
        /* Wait queue for polling */
        wait_queue_head_t event_wait;
 
+       /* Kernfs file for cgroup triggers */
+       struct kernfs_open_file *of;
+
        /* Pending event flag */
        int event;
 
index fdf26cd0e7424e29a3552b515980d1455b6461cd..26b6f3c81a763843d9db6dc5b0c8561a0497dadb 100644 (file)
@@ -59,6 +59,7 @@ struct rethook_node {
 };
 
 struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+void rethook_stop(struct rethook *rh);
 void rethook_free(struct rethook *rh);
 void rethook_add_node(struct rethook *rh, struct rethook_node *node);
 struct rethook_node *rethook_try_get(struct rethook *rh);
index c1ef5fc60a3cba54d640a3e77dc419506801f42c..19cb803dd5ecd1abbc3d88905082b5daf9cec33b 100644 (file)
@@ -9,7 +9,8 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_data/simplefb.h>
-#include <linux/screen_info.h>
+
+struct screen_info;
 
 enum {
        M_I17,          /* 17-Inch iMac */
index 9334371c94e2b88df8f09da6f395890369b4baaa..f7dd950ff2509b1bf8c0fdf1ebaf9c51e8931849 100644 (file)
@@ -67,6 +67,9 @@ struct nf_conntrack_tuple {
                /* The protocol. */
                u_int8_t protonum;
 
+               /* The direction must be ignored for the tuplehash */
+               struct { } __nfct_hash_offsetend;
+
                /* The direction (for tuplehash) */
                u_int8_t dir;
        } dst;
index 84f2fd85fd5ae87b547247f811cbe1634d432847..640441a2f92665867828fe774e8b2b6ce84467df 100644 (file)
@@ -1211,6 +1211,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
 
 unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
 
+static inline bool nft_use_inc(u32 *use)
+{
+       if (*use == UINT_MAX)
+               return false;
+
+       (*use)++;
+
+       return true;
+}
+
+static inline void nft_use_dec(u32 *use)
+{
+       WARN_ON_ONCE((*use)-- == 0);
+}
+
+/* For error and abort path: restore use counter to previous state. */
+static inline void nft_use_inc_restore(u32 *use)
+{
+       WARN_ON_ONCE(!nft_use_inc(use));
+}
+
+#define nft_use_dec_restore    nft_use_dec
+
 /**
  *     struct nft_table - nf_tables table
  *
@@ -1296,8 +1319,8 @@ struct nft_object {
        struct list_head                list;
        struct rhlist_head              rhlhead;
        struct nft_object_hash_key      key;
-       u32                             genmask:2,
-                                       use:30;
+       u32                             genmask:2;
+       u32                             use;
        u64                             handle;
        u16                             udlen;
        u8                              *udata;
@@ -1399,8 +1422,8 @@ struct nft_flowtable {
        char                            *name;
        int                             hooknum;
        int                             ops_len;
-       u32                             genmask:2,
-                                       use:30;
+       u32                             genmask:2;
+       u32                             use;
        u64                             handle;
        /* runtime data below here */
        struct list_head                hook_list ____cacheline_aligned;
index e98aac9d5ad5737592ab7cd409c174707cd68681..15960564e0c364ef430f1e3fcdd0e835c2f94a77 100644 (file)
@@ -134,7 +134,7 @@ extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
  */
 static inline unsigned int psched_mtu(const struct net_device *dev)
 {
-       return dev->mtu + dev->hard_header_len;
+       return READ_ONCE(dev->mtu) + dev->hard_header_len;
 }
 
 static inline struct net *qdisc_net(struct Qdisc *q)
index 22aae505c813b32df6cc1e6f7060c4abcd0b2410..a8c2817335b9ae6cef0abf6638139d343800adbb 100644 (file)
@@ -663,6 +663,7 @@ struct ocelot_ops {
                              struct flow_stats *stats);
        void (*cut_through_fwd)(struct ocelot *ocelot);
        void (*tas_clock_adjust)(struct ocelot *ocelot);
+       void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
        void (*update_stats)(struct ocelot *ocelot);
 };
 
@@ -863,12 +864,12 @@ struct ocelot {
        struct mutex                    stat_view_lock;
        /* Lock for serializing access to the MAC table */
        struct mutex                    mact_lock;
-       /* Lock for serializing forwarding domain changes */
+       /* Lock for serializing forwarding domain changes, including the
+        * configuration of the Time-Aware Shaper, MAC Merge layer and
+        * cut-through forwarding, on which it depends
+        */
        struct mutex                    fwd_domain_lock;
 
-       /* Lock for serializing Time-Aware Shaper changes */
-       struct mutex                    tas_lock;
-
        struct workqueue_struct         *owq;
 
        u8                              ptp:1;
index a87bbbbca2d48ada91efcc8aaab469ba51f0224a..794c1d857677d9b9b2e20222f671f15fbb86c4aa 100644 (file)
@@ -673,8 +673,11 @@ struct drm_gem_open {
  * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
  * and &DRM_PRIME_CAP_EXPORT.
  *
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
  */
 #define DRM_CAP_PRIME                  0x5
 /**
@@ -682,6 +685,8 @@ struct drm_gem_open {
  *
  * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
  * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
  */
 #define  DRM_PRIME_CAP_IMPORT          0x1
 /**
@@ -689,6 +694,8 @@ struct drm_gem_open {
  *
  * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
  * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
  */
 #define  DRM_PRIME_CAP_EXPORT          0x2
 /**
@@ -756,15 +763,14 @@ struct drm_gem_open {
 /**
  * DRM_CAP_SYNCOBJ
  *
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
  */
 #define DRM_CAP_SYNCOBJ                0x13
 /**
  * DRM_CAP_SYNCOBJ_TIMELINE
  *
  * If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
  */
 #define DRM_CAP_SYNCOBJ_TIMELINE       0x14
 
@@ -909,6 +915,27 @@ struct drm_syncobj_timeline_wait {
        __u32 pad;
 };
 
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ *         available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+       __s32 fd;
+       __u32 pad;
+};
+
 
 struct drm_syncobj_array {
        __u64 handles;
@@ -1169,6 +1196,8 @@ extern "C" {
  */
 #define DRM_IOCTL_MODE_GETFB2          DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
+#define DRM_IOCTL_SYNCOBJ_EVENTFD      DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
 /*
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
@@ -1180,25 +1209,50 @@ extern "C" {
 #define DRM_COMMAND_BASE                0x40
 #define DRM_COMMAND_END                        0xA0
 
-/*
- * Header for events written back to userspace on the drm fd.  The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event.  A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
  *
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
  */
 struct drm_event {
        __u32 type;
        __u32 length;
 };
 
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
 #define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
 #define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
 #define DRM_EVENT_CRTC_SEQUENCE        0x03
 
 struct drm_event_vblank {
index 43691058d28fb80963a9e51c66352c1050d171e6..ea1b639bcb2883745dd07e47b52beabd2bfd4ca8 100644 (file)
@@ -488,6 +488,9 @@ struct drm_mode_get_connector {
         * This is not an object ID. This is a per-type connector number. Each
         * (type, type_id) combination is unique across all connectors of a DRM
         * device.
+        *
+        * The (type, type_id) combination is not a stable identifier: the
+        * type_id can change depending on the driver probe order.
         */
        __u32 connector_type_id;
 
@@ -883,7 +886,7 @@ struct hdr_metadata_infoframe {
         */
        struct {
                __u16 x, y;
-               } display_primaries[3];
+       } display_primaries[3];
        /**
         * @white_point: White Point of Colorspace Data.
         * These are coded as unsigned 16-bit values in units of
@@ -894,7 +897,7 @@ struct hdr_metadata_infoframe {
         */
        struct {
                __u16 x, y;
-               } white_point;
+       } white_point;
        /**
         * @max_display_mastering_luminance: Max Mastering Display Luminance.
         * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
index 839820aed87ea72476d727cc3f1399db3e075ff6..a58a14c9f22285a35adbb0ded8f09f695b12f7a8 100644 (file)
@@ -60,6 +60,7 @@ extern "C" {
 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID  10
 #define DRM_IVPU_PARAM_TILE_CONFIG         11
 #define DRM_IVPU_PARAM_SKU                 12
+#define DRM_IVPU_PARAM_CAPABILITIES        13
 
 #define DRM_IVPU_PLATFORM_TYPE_SILICON     0
 
@@ -68,6 +69,9 @@ extern "C" {
 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS            2
 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
 
+#define DRM_IVPU_CAP_METRIC_STREAMER       1
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE       2
+
 /**
  * struct drm_ivpu_param - Get/Set VPU parameters
  */
@@ -129,8 +133,10 @@ struct drm_ivpu_param {
        __u64 value;
 };
 
-#define DRM_IVPU_BO_HIGH_MEM   0x00000001
+#define DRM_IVPU_BO_SHAVE_MEM  0x00000001
+#define DRM_IVPU_BO_HIGH_MEM   DRM_IVPU_BO_SHAVE_MEM
 #define DRM_IVPU_BO_MAPPABLE   0x00000002
+#define DRM_IVPU_BO_DMA_MEM    0x00000004
 
 #define DRM_IVPU_BO_CACHED     0x00000000
 #define DRM_IVPU_BO_UNCACHED   0x00010000
@@ -140,6 +146,7 @@ struct drm_ivpu_param {
 #define DRM_IVPU_BO_FLAGS \
        (DRM_IVPU_BO_HIGH_MEM | \
         DRM_IVPU_BO_MAPPABLE | \
+        DRM_IVPU_BO_DMA_MEM | \
         DRM_IVPU_BO_CACHE_MASK)
 
 /**
index 853a327433d3fbc7cbbc00680cd732ce95023cae..b1ad9d5ffce87366ca98686a0cdb591c3c0d30a1 100644 (file)
 extern "C" {
 #endif
 
+#define NOUVEAU_GETPARAM_PCI_VENDOR      3
+#define NOUVEAU_GETPARAM_PCI_DEVICE      4
+#define NOUVEAU_GETPARAM_BUS_TYPE        5
+#define NOUVEAU_GETPARAM_FB_SIZE         8
+#define NOUVEAU_GETPARAM_AGP_SIZE        9
+#define NOUVEAU_GETPARAM_CHIPSET_ID      11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
+#define NOUVEAU_GETPARAM_PTIMER_TIME     14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
+struct drm_nouveau_getparam {
+       __u64 param;
+       __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+       __u32     fb_ctxdma_handle;
+       __u32     tt_ctxdma_handle;
+
+       __s32     channel;
+       __u32     pushbuf_domains;
+
+       /* Notifier memory */
+       __u32     notifier_handle;
+
+       /* DRM-enforced subchannel assignments */
+       struct {
+               __u32 handle;
+               __u32 grclass;
+       } subchan[8];
+       __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+       __s32 channel;
+};
+
 #define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
 #define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
 #define NOUVEAU_GEM_DOMAIN_COHERENT  (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE  (1 << 5)
 
 #define NOUVEAU_GEM_TILE_COMP        0x00030000 /* nv50-only */
 #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -126,16 +166,228 @@ struct drm_nouveau_gem_cpu_fini {
        __u32 handle;
 };
 
-#define DRM_NOUVEAU_GETPARAM           0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+       /**
+        * @flags: the flags for a sync object
+        *
+        * The first 8 bits are used to determine the type of the sync object.
+        */
+       __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+       /**
+        * @handle: the handle of the sync object
+        */
+       __u32 handle;
+       /**
+        * @timeline_value:
+        *
+        * The timeline point of the sync object in case the syncobj is of
+        * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+        */
+       __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+       /**
+        * @kernel_managed_addr: start address of the kernel managed VA space
+        * region
+        */
+       __u64 kernel_managed_addr;
+       /**
+        * @kernel_managed_size: size of the kernel managed VA space region in
+        * bytes
+        */
+       __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+       /**
+        * @op: the operation type
+        */
+       __u32 op;
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
+ *
+ * Map a GEM object to the GPU's VA space. Optionally, the
+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
+ * create sparse mappings for the given range.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
+ *
+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
+ * is located in is a sparse region, new sparse mappings are created where the
+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+       /**
+        * @flags: the flags for a &drm_nouveau_vm_bind_op
+        */
+       __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
+ *
+ * Indicates that an allocated VA space region should be sparse.
+ */
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+       /**
+        * @handle: the handle of the DRM GEM object to map
+        */
+       __u32 handle;
+       /**
+        * @pad: 32 bit padding, should be 0
+        */
+       __u32 pad;
+       /**
+        * @addr:
+        *
+        * the address the VA space region or (memory backed) mapping should be mapped to
+        */
+       __u64 addr;
+       /**
+        * @bo_offset: the offset within the BO backing the mapping
+        */
+       __u64 bo_offset;
+       /**
+        * @range: the size of the requested mapping in bytes
+        */
+       __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+       /**
+        * @op_count: the number of &drm_nouveau_vm_bind_op
+        */
+       __u32 op_count;
+       /**
+        * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+        */
+       __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
+ *
+ * Indicates that the given VM_BIND operation should be executed asynchronously
+ * by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated operations
+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
+ */
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+       /**
+        * @wait_count: the number of wait &drm_nouveau_syncs
+        */
+       __u32 wait_count;
+       /**
+        * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+        */
+       __u32 sig_count;
+       /**
+        * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+        */
+       __u64 wait_ptr;
+       /**
+        * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+        */
+       __u64 sig_ptr;
+       /**
+        * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+        */
+       __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+       /**
+        * @va: the virtual address of the push buffer mapping
+        */
+       __u64 va;
+       /**
+        * @va_len: the length of the push buffer mapping
+        */
+       __u64 va_len;
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+       /**
+        * @channel: the channel to execute the push buffer in
+        */
+       __u32 channel;
+       /**
+        * @push_count: the number of &drm_nouveau_exec_push ops
+        */
+       __u32 push_count;
+       /**
+        * @wait_count: the number of wait &drm_nouveau_syncs
+        */
+       __u32 wait_count;
+       /**
+        * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+        */
+       __u32 sig_count;
+       /**
+        * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+        */
+       __u64 wait_ptr;
+       /**
+        * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+        */
+       __u64 sig_ptr;
+       /**
+        * @push_ptr: pointer to &drm_nouveau_exec_push ops
+        */
+       __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM           0x00
 #define DRM_NOUVEAU_SETPARAM           0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC      0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE       0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC      0x02
+#define DRM_NOUVEAU_CHANNEL_FREE       0x03
 #define DRM_NOUVEAU_GROBJ_ALLOC        0x04 /* deprecated */
 #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x05 /* deprecated */
 #define DRM_NOUVEAU_GPUOBJ_FREE        0x06 /* deprecated */
 #define DRM_NOUVEAU_NVIF               0x07
 #define DRM_NOUVEAU_SVM_INIT           0x08
 #define DRM_NOUVEAU_SVM_BIND           0x09
+#define DRM_NOUVEAU_VM_INIT            0x10
+#define DRM_NOUVEAU_VM_BIND            0x11
+#define DRM_NOUVEAU_EXEC               0x12
 #define DRM_NOUVEAU_GEM_NEW            0x40
 #define DRM_NOUVEAU_GEM_PUSHBUF        0x41
 #define DRM_NOUVEAU_GEM_CPU_PREP       0x42
@@ -188,6 +440,10 @@ struct drm_nouveau_svm_bind {
 #define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM               (1UL << 31)
 
 
+#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
 #define DRM_IOCTL_NOUVEAU_SVM_INIT           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
 #define DRM_IOCTL_NOUVEAU_SVM_BIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
 
@@ -197,6 +453,9 @@ struct drm_nouveau_svm_bind {
 #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
 #define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
 
+#define DRM_IOCTL_NOUVEAU_VM_INIT            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC               DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
 #if defined(__cplusplus)
 }
 #endif
index 7b158fcb02b4557f423470fe3bbf709d2355f5dd..b1d0e56565bcbfaf88918ef58d3660020a30fada 100644 (file)
@@ -64,6 +64,16 @@ struct drm_virtgpu_map {
        __u32 pad;
 };
 
+#define VIRTGPU_EXECBUF_SYNCOBJ_RESET          0x01
+#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
+               VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
+               0)
+struct drm_virtgpu_execbuffer_syncobj {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+};
+
 /* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
 struct drm_virtgpu_execbuffer {
        __u32 flags;
@@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
        __u32 num_bo_handles;
        __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
        __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
-       __u32 pad;
+       __u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
+       __u32 num_in_syncobjs;
+       __u32 num_out_syncobjs;
+       __u64 in_syncobjs;
+       __u64 out_syncobjs;
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
index 2801b65299aa3e6bb5e217c8c6c5f2ea5cc3c84d..fd3f9e5ee2415373f1575f83a9e433f5ee41f9f2 100644 (file)
@@ -70,6 +70,31 @@ struct utp_upiu_query {
        __be32 reserved[2];
 };
 
+/**
+ * struct utp_upiu_query_v4_0 - upiu request buffer structure for
+ * query request >= UFS 4.0 spec.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @osf4: spec field B-5
+ * @osf5: spec field B 6,7
+ * @osf6: spec field DW 8,9
+ * @osf7: spec field DW 10,11
+ */
+struct utp_upiu_query_v4_0 {
+       __u8 opcode;
+       __u8 idn;
+       __u8 index;
+       __u8 selector;
+       __u8 osf3;
+       __u8 osf4;
+       __be16 osf5;
+       __be32 osf6;
+       __be32 osf7;
+       __be32 reserved;
+};
+
 /**
  * struct utp_upiu_cmd - Command UPIU structure
  * @data_transfer_len: Data Transfer Length DW-3
index 4e8d6240e589bdf1b735f7527651eb149f4ff949..198cb391f9db2e9b4473c63f9139633c14942304 100644 (file)
@@ -170,6 +170,7 @@ enum attr_idn {
        QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST    = 0x1E,
        QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE        = 0x1F,
        QUERY_ATTR_IDN_EXT_IID_EN               = 0x2A,
+       QUERY_ATTR_IDN_TIMESTAMP                = 0x30
 };
 
 /* Descriptor idn for Query requests */
index e8096d502a7cb02f263f6a8fc518eb78337d0ec0..7505de2428e03e6db37d1f9f5ab01eed03a4484e 100644 (file)
@@ -2489,6 +2489,8 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                                          struct io_wait_queue *iowq)
 {
+       int token, ret;
+
        if (unlikely(READ_ONCE(ctx->check_cq)))
                return 1;
        if (unlikely(!llist_empty(&ctx->work_llist)))
@@ -2499,11 +2501,20 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                return -EINTR;
        if (unlikely(io_should_wake(iowq)))
                return 0;
+
+       /*
+        * Use io_schedule_prepare/finish, so cpufreq can take into account
+        * that the task is waiting for IO - turns out to be important for low
+        * QD IO.
+        */
+       token = io_schedule_prepare();
+       ret = 0;
        if (iowq->timeout == KTIME_MAX)
                schedule();
        else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
-               return -ETIME;
-       return 0;
+               ret = -ETIME;
+       io_schedule_finish(token);
+       return ret;
 }
 
 /*
index 8a33e8747a0e2c2841fa792de1edd05651e04345..6ae02be7a48e3165d860f29765b0f9f7e9443069 100644 (file)
@@ -122,22 +122,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
        atomic_inc(&rcpu->refcnt);
 }
 
-/* called from workqueue, to workaround syscall using preempt_disable */
-static void cpu_map_kthread_stop(struct work_struct *work)
-{
-       struct bpf_cpu_map_entry *rcpu;
-
-       rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
-
-       /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
-        * as it waits until all in-flight call_rcu() callbacks complete.
-        */
-       rcu_barrier();
-
-       /* kthread_stop will wake_up_process and wait for it to complete */
-       kthread_stop(rcpu->kthread);
-}
-
 static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
 {
        /* The tear-down procedure should have made sure that queue is
@@ -165,6 +149,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
        }
 }
 
+/* called from workqueue, to workaround syscall using preempt_disable */
+static void cpu_map_kthread_stop(struct work_struct *work)
+{
+       struct bpf_cpu_map_entry *rcpu;
+       int err;
+
+       rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
+
+       /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
+        * as it waits until all in-flight call_rcu() callbacks complete.
+        */
+       rcu_barrier();
+
+       /* kthread_stop will wake_up_process and wait for it to complete */
+       err = kthread_stop(rcpu->kthread);
+       if (err) {
+               /* kthread_stop may be called before cpu_map_kthread_run
+                * is executed, so we need to release the memory related
+                * to rcpu.
+                */
+               put_cpu_map_entry(rcpu);
+       }
+}
+
 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
                                     struct list_head *listp,
                                     struct xdp_cpumap_stats *stats)
index 11e54dd8b6ddcc2afc9d54824e0832c364e557e0..930b5555cfd39b82650a659ae3dab36e85035215 100644 (file)
@@ -5642,8 +5642,9 @@ continue_func:
                                verbose(env, "verifier bug. subprog has tail_call and async cb\n");
                                return -EFAULT;
                        }
-                        /* async callbacks don't increase bpf prog stack size */
-                       continue;
+                       /* async callbacks don't increase bpf prog stack size unless called directly */
+                       if (!bpf_pseudo_call(insn + i))
+                               continue;
                }
                i = next_insn;
 
index bfe3cd8ccf3668416a544594eb8eea55258cbf92..f55a40db065f792edf3d1ab14b499c19c7a87626 100644 (file)
@@ -3730,7 +3730,7 @@ static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
        }
 
        psi = cgroup_psi(cgrp);
-       new = psi_trigger_create(psi, buf, res, of->file);
+       new = psi_trigger_create(psi, buf, res, of->file, of);
        if (IS_ERR(new)) {
                cgroup_put(cgrp);
                return PTR_ERR(new);
index 7982cc9d497cd6ed715fcb18f1db7ccbbd35d7b5..016d997131d4330a002f3ebc58e2801f6af5f980 100644 (file)
@@ -174,11 +174,10 @@ static bool cleanup_symbol_name(char *s)
         * LLVM appends various suffixes for local functions and variables that
         * must be promoted to global scope as part of LTO.  This can break
         * hooking of static functions with kprobes. '.' is not a valid
-        * character in an identifier in C. Suffixes observed:
+        * character in an identifier in C. Suffixes only in LLVM LTO observed:
         * - foo.llvm.[0-9a-f]+
-        * - foo.[0-9a-f]+
         */
-       res = strchr(s, '.');
+       res = strstr(s, ".llvm.");
        if (res) {
                *res = '\0';
                return true;
index ce13f1a352514b23341ade70fa748dd9f9fb2750..1fc6095d502d9c83c1317d90057a907df91d5be6 100644 (file)
@@ -1072,7 +1072,7 @@ static int kprobe_ftrace_enabled;
 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
                               int *cnt)
 {
-       int ret = 0;
+       int ret;
 
        lockdep_assert_held(&kprobe_mutex);
 
@@ -1110,7 +1110,7 @@ static int arm_kprobe_ftrace(struct kprobe *p)
 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
                                  int *cnt)
 {
-       int ret = 0;
+       int ret;
 
        lockdep_assert_held(&kprobe_mutex);
 
@@ -2007,9 +2007,9 @@ void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
                                             void *frame_pointer)
 {
-       kprobe_opcode_t *correct_ret_addr = NULL;
        struct kretprobe_instance *ri = NULL;
        struct llist_node *first, *node = NULL;
+       kprobe_opcode_t *correct_ret_addr;
        struct kretprobe *rp;
 
        /* Find correct address and all nodes for this frame. */
@@ -2693,7 +2693,7 @@ void kprobe_free_init_mem(void)
 
 static int __init init_kprobes(void)
 {
-       int i, err = 0;
+       int i, err;
 
        /* FIXME allocate the probe table, currently defined statically */
        /* initialize all list heads */
index f62e89d0d9068ea8b154e946f6bd730888f197e7..e1b4bfa938ddf8daa4610727d74ef23364a4b463 100644 (file)
@@ -1179,6 +1179,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
                unsigned maj, min, offset;
                char *p, dummy;
 
+               error = 0;
                if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2 ||
                    sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset,
                                &dummy) == 3) {
index af51ed6d45ef17e7be6f5a292625abe020201a7c..782d3b41c1f3557775603401cfc7f62f60f70ef9 100644 (file)
@@ -426,6 +426,11 @@ late_initcall(cpu_latency_qos_init);
 
 /* Definitions related to the frequency QoS below. */
 
+static inline bool freq_qos_value_invalid(s32 value)
+{
+       return value < 0 && value != PM_QOS_DEFAULT_VALUE;
+}
+
 /**
  * freq_constraints_init - Initialize frequency QoS constraints.
  * @qos: Frequency QoS constraints to initialize.
@@ -531,7 +536,7 @@ int freq_qos_add_request(struct freq_constraints *qos,
 {
        int ret;
 
-       if (IS_ERR_OR_NULL(qos) || !req || value < 0)
+       if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value))
                return -EINVAL;
 
        if (WARN(freq_qos_request_active(req),
@@ -563,7 +568,7 @@ EXPORT_SYMBOL_GPL(freq_qos_add_request);
  */
 int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
 {
-       if (!req || new_value < 0)
+       if (!req || freq_qos_value_invalid(new_value))
                return -EINVAL;
 
        if (WARN(!freq_qos_request_active(req),
index a80a73909dc2a057ac1b92c24ef6e0a825cbfa69..b3e25be58e2b74936b22184589586416a555d020 100644 (file)
@@ -7174,7 +7174,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
            recent_used_cpu != target &&
            cpus_share_cache(recent_used_cpu, target) &&
            (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
-           cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
+           cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
            asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
                return recent_used_cpu;
        }
index 81fca77397f6a2f194dccb26a02d361579de8bea..9bb3f2b3ccfcb30fed5785893451f74a6b5e4358 100644 (file)
@@ -493,8 +493,12 @@ static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total,
                        continue;
 
                /* Generate an event */
-               if (cmpxchg(&t->event, 0, 1) == 0)
-                       wake_up_interruptible(&t->event_wait);
+               if (cmpxchg(&t->event, 0, 1) == 0) {
+                       if (t->of)
+                               kernfs_notify(t->of->kn);
+                       else
+                               wake_up_interruptible(&t->event_wait);
+               }
                t->last_event_time = now;
                /* Reset threshold breach flag once event got generated */
                t->pending_event = false;
@@ -1271,8 +1275,9 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
        return 0;
 }
 
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
-                       char *buf, enum psi_res res, struct file *file)
+struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
+                                      enum psi_res res, struct file *file,
+                                      struct kernfs_open_file *of)
 {
        struct psi_trigger *t;
        enum psi_states state;
@@ -1331,7 +1336,9 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
 
        t->event = 0;
        t->last_event_time = 0;
-       init_waitqueue_head(&t->event_wait);
+       t->of = of;
+       if (!of)
+               init_waitqueue_head(&t->event_wait);
        t->pending_event = false;
        t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
 
@@ -1388,7 +1395,10 @@ void psi_trigger_destroy(struct psi_trigger *t)
         * being accessed later. Can happen if cgroup is deleted from under a
         * polling process.
         */
-       wake_up_pollfree(&t->event_wait);
+       if (t->of)
+               kernfs_notify(t->of->kn);
+       else
+               wake_up_interruptible(&t->event_wait);
 
        if (t->aggregator == PSI_AVGS) {
                mutex_lock(&group->avgs_lock);
@@ -1465,7 +1475,10 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
        if (!t)
                return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
 
-       poll_wait(file, &t->event_wait, wait);
+       if (t->of)
+               kernfs_generic_poll(t->of, wait);
+       else
+               poll_wait(file, &t->event_wait, wait);
 
        if (cmpxchg(&t->event, 1, 0) == 1)
                ret |= EPOLLPRI;
@@ -1535,7 +1548,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
                return -EBUSY;
        }
 
-       new = psi_trigger_create(&psi_system, buf, res, file);
+       new = psi_trigger_create(&psi_system, buf, res, file, NULL);
        if (IS_ERR(new)) {
                mutex_unlock(&seq->lock);
                return PTR_ERR(new);
index cd2c35b1dd8f85b3fab6db2a3ae91d281a3a8eea..c83c005e654e3fed617fcc1e05359f06aa0ed74e 100644 (file)
@@ -15,6 +15,7 @@
 #include <trace/events/sched.h>
 
 #include "ftrace_internal.h"
+#include "trace.h"
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define ASSIGN_OPS_HASH(opsname, val) \
index e4704ec26df7724363fe08ced756e2dfcc79ebb8..3b21f406325829ceedfb3911ce76ed0f219005b8 100644 (file)
@@ -100,14 +100,22 @@ static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
                return;
        }
 
+       /*
+        * This user handler is shared with other kprobes and is not expected to be
+        * called recursively. So if any other kprobe handler is running, this will
+        * exit as kprobe does. See the section 'Share the callbacks with kprobes'
+        * in Documentation/trace/fprobe.rst for more information.
+        */
        if (unlikely(kprobe_running())) {
                fp->nmissed++;
-               return;
+               goto recursion_unlock;
        }
 
        kprobe_busy_begin();
        __fprobe_handler(ip, parent_ip, ops, fregs);
        kprobe_busy_end();
+
+recursion_unlock:
        ftrace_test_recursion_unlock(bit);
 }
 
@@ -371,19 +379,16 @@ int unregister_fprobe(struct fprobe *fp)
        if (!fprobe_is_registered(fp))
                return -EINVAL;
 
-       /*
-        * rethook_free() starts disabling the rethook, but the rethook handlers
-        * may be running on other processors at this point. To make sure that all
-        * current running handlers are finished, call unregister_ftrace_function()
-        * after this.
-        */
        if (fp->rethook)
-               rethook_free(fp->rethook);
+               rethook_stop(fp->rethook);
 
        ret = unregister_ftrace_function(&fp->ops);
        if (ret < 0)
                return ret;
 
+       if (fp->rethook)
+               rethook_free(fp->rethook);
+
        ftrace_free_filter(&fp->ops);
 
        return ret;
index 3740aca79fe73aaa9a51653124c5c3c23c831052..05c0024815bf98c8d35ce8411d4a6950f539a166 100644 (file)
@@ -3305,6 +3305,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
        return cnt;
 }
 
+static void ftrace_free_pages(struct ftrace_page *pages)
+{
+       struct ftrace_page *pg = pages;
+
+       while (pg) {
+               if (pg->records) {
+                       free_pages((unsigned long)pg->records, pg->order);
+                       ftrace_number_of_pages -= 1 << pg->order;
+               }
+               pages = pg->next;
+               kfree(pg);
+               pg = pages;
+               ftrace_number_of_groups--;
+       }
+}
+
 static struct ftrace_page *
 ftrace_allocate_pages(unsigned long num_to_init)
 {
@@ -3343,17 +3359,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
        return start_pg;
 
  free_pages:
-       pg = start_pg;
-       while (pg) {
-               if (pg->records) {
-                       free_pages((unsigned long)pg->records, pg->order);
-                       ftrace_number_of_pages -= 1 << pg->order;
-               }
-               start_pg = pg->next;
-               kfree(pg);
-               pg = start_pg;
-               ftrace_number_of_groups--;
-       }
+       ftrace_free_pages(start_pg);
        pr_info("ftrace: FAILED to allocate memory for functions\n");
        return NULL;
 }
@@ -6471,9 +6477,11 @@ static int ftrace_process_locs(struct module *mod,
                               unsigned long *start,
                               unsigned long *end)
 {
+       struct ftrace_page *pg_unuse = NULL;
        struct ftrace_page *start_pg;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
+       unsigned long skipped = 0;
        unsigned long count;
        unsigned long *p;
        unsigned long addr;
@@ -6536,8 +6544,10 @@ static int ftrace_process_locs(struct module *mod,
                 * object files to satisfy alignments.
                 * Skip any NULL pointers.
                 */
-               if (!addr)
+               if (!addr) {
+                       skipped++;
                        continue;
+               }
 
                end_offset = (pg->index+1) * sizeof(pg->records[0]);
                if (end_offset > PAGE_SIZE << pg->order) {
@@ -6551,8 +6561,10 @@ static int ftrace_process_locs(struct module *mod,
                rec->ip = addr;
        }
 
-       /* We should have used all pages */
-       WARN_ON(pg->next);
+       if (pg->next) {
+               pg_unuse = pg->next;
+               pg->next = NULL;
+       }
 
        /* Assign the last page to ftrace_pages */
        ftrace_pages = pg;
@@ -6574,6 +6586,11 @@ static int ftrace_process_locs(struct module *mod,
  out:
        mutex_unlock(&ftrace_lock);
 
+       /* We should have used all pages unless we skipped some */
+       if (pg_unuse) {
+               WARN_ON(!skipped);
+               ftrace_free_pages(pg_unuse);
+       }
        return ret;
 }
 
index 382775edf6902d2da767d369954c245be48de546..5012c04f92c0021646eaad1fbf48d9b71918a5d1 100644 (file)
@@ -2,6 +2,9 @@
 #ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
 #define  _LINUX_KERNEL_FTRACE_INTERNAL_H
 
+int __register_ftrace_function(struct ftrace_ops *ops);
+int __unregister_ftrace_function(struct ftrace_ops *ops);
+
 #ifdef CONFIG_FUNCTION_TRACER
 
 extern struct mutex ftrace_lock;
@@ -15,8 +18,6 @@ int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
 
 #else /* !CONFIG_DYNAMIC_FTRACE */
 
-int __register_ftrace_function(struct ftrace_ops *ops);
-int __unregister_ftrace_function(struct ftrace_ops *ops);
 /* Keep as macros so we do not need to define the commands */
 # define ftrace_startup(ops, command)                                  \
        ({                                                              \
index f32ee484391adfc80f7727db903a5de3d09fe3f3..5eb9b598f4e9c2f13a20803a0204b6d951409610 100644 (file)
@@ -53,6 +53,19 @@ static void rethook_free_rcu(struct rcu_head *head)
                kfree(rh);
 }
 
+/**
+ * rethook_stop() - Stop using a rethook.
+ * @rh: the struct rethook to stop.
+ *
+ * Stop using a rethook to prepare for freeing it. If you want to wait for
+ * all running rethook handler before calling rethook_free(), you need to
+ * call this first and wait RCU, and call rethook_free().
+ */
+void rethook_stop(struct rethook *rh)
+{
+       WRITE_ONCE(rh->handler, NULL);
+}
+
 /**
  * rethook_free() - Free struct rethook.
  * @rh: the struct rethook to be freed.
index 834b361a4a66c35b6747df7225ebd2cbc7edd2c6..14d8001140c828199fc4f64321285f7c34985eac 100644 (file)
@@ -5242,28 +5242,34 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_size);
 
+static void rb_clear_buffer_page(struct buffer_page *page)
+{
+       local_set(&page->write, 0);
+       local_set(&page->entries, 0);
+       rb_init_page(page->page);
+       page->read = 0;
+}
+
 static void
 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
 {
+       struct buffer_page *page;
+
        rb_head_page_deactivate(cpu_buffer);
 
        cpu_buffer->head_page
                = list_entry(cpu_buffer->pages, struct buffer_page, list);
-       local_set(&cpu_buffer->head_page->write, 0);
-       local_set(&cpu_buffer->head_page->entries, 0);
-       local_set(&cpu_buffer->head_page->page->commit, 0);
-
-       cpu_buffer->head_page->read = 0;
+       rb_clear_buffer_page(cpu_buffer->head_page);
+       list_for_each_entry(page, cpu_buffer->pages, list) {
+               rb_clear_buffer_page(page);
+       }
 
        cpu_buffer->tail_page = cpu_buffer->head_page;
        cpu_buffer->commit_page = cpu_buffer->head_page;
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
        INIT_LIST_HEAD(&cpu_buffer->new_pages);
-       local_set(&cpu_buffer->reader_page->write, 0);
-       local_set(&cpu_buffer->reader_page->entries, 0);
-       local_set(&cpu_buffer->reader_page->page->commit, 0);
-       cpu_buffer->reader_page->read = 0;
+       rb_clear_buffer_page(cpu_buffer->reader_page);
 
        local_set(&cpu_buffer->entries_bytes, 0);
        local_set(&cpu_buffer->overrun, 0);
index 4529e264cb86581d69b55027d147ebd5e60dbeb2..be847d45d81cfd2f82dc90ff8ca9668a2fc40351 100644 (file)
@@ -3118,6 +3118,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
        struct ftrace_stack *fstack;
        struct stack_entry *entry;
        int stackidx;
+       void *ptr;
 
        /*
         * Add one, for this function and the call to save_stack_trace()
@@ -3161,9 +3162,25 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
                                    trace_ctx);
        if (!event)
                goto out;
-       entry = ring_buffer_event_data(event);
+       ptr = ring_buffer_event_data(event);
+       entry = ptr;
+
+       /*
+        * For backward compatibility reasons, the entry->caller is an
+        * array of 8 slots to store the stack. This is also exported
+        * to user space. The amount allocated on the ring buffer actually
+        * holds enough for the stack specified by nr_entries. This will
+        * go into the location of entry->caller. Due to string fortifiers
+        * checking the size of the destination of memcpy() it triggers
+        * when it detects that size is greater than 8. To hide this from
+        * the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
+        *
+        * The below is really just:
+        *   memcpy(&entry->caller, fstack->calls, size);
+        */
+       ptr += offsetof(typeof(*entry), caller);
+       memcpy(ptr, fstack->calls, size);
 
-       memcpy(&entry->caller, fstack->calls, size);
        entry->size = nr_entries;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
@@ -6764,6 +6781,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
 
        free_cpumask_var(iter->started);
        kfree(iter->fmt);
+       kfree(iter->temp);
        mutex_destroy(&iter->mutex);
        kfree(iter);
 
index ed7906b13f0947b565e9d66bc68c843b8a19abab..e1edc2197fc89d2d55827e4991a693fcef62c1ff 100644 (file)
@@ -113,6 +113,8 @@ enum trace_type {
 #define MEM_FAIL(condition, fmt, ...)                                  \
        DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
 
+#define FAULT_STRING "(fault)"
+
 #define HIST_STACKTRACE_DEPTH  16
 #define HIST_STACKTRACE_SIZE   (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
 #define HIST_STACKTRACE_SKIP   5
index cb0077ba2b49694b50e650155efba7f70406cd85..a0a704ba27db3471b4d029a387d50b2b12c78fed 100644 (file)
@@ -644,6 +644,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
        struct trace_eprobe *ep;
        bool enabled;
        int ret = 0;
+       int cnt = 0;
 
        tp = trace_probe_primary_from_call(call);
        if (WARN_ON_ONCE(!tp))
@@ -667,12 +668,25 @@ static int enable_trace_eprobe(struct trace_event_call *call,
                if (ret)
                        break;
                enabled = true;
+               cnt++;
        }
 
        if (ret) {
                /* Failed to enable one of them. Roll back all */
-               if (enabled)
-                       disable_eprobe(ep, file->tr);
+               if (enabled) {
+                       /*
+                        * It's a bug if one failed for something other than memory
+                        * not being available but another eprobe succeeded.
+                        */
+                       WARN_ON_ONCE(ret != -ENOMEM);
+
+                       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+                               ep = container_of(pos, struct trace_eprobe, tp);
+                               disable_eprobe(ep, file->tr);
+                               if (!--cnt)
+                                       break;
+                       }
+               }
                if (file)
                        trace_probe_remove_file(tp, file);
                else
index b97d3ad832f1a4354fc59918b2ce6a12b18ce484..c8c61381eba48397393d5361c2dec5bfa50782a3 100644 (file)
@@ -6663,13 +6663,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
        if (get_named_trigger_data(trigger_data))
                goto enable;
 
-       if (has_hist_vars(hist_data))
-               save_hist_vars(hist_data);
-
        ret = create_actions(hist_data);
        if (ret)
                goto out_unreg;
 
+       if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+               if (save_hist_vars(hist_data))
+                       goto out_unreg;
+       }
+
        ret = tracing_map_init(hist_data->map);
        if (ret)
                goto out_unreg;
index 4f5e74bbdab2ca1ec988b2a851d1fefb69fc5c3b..33cb6af31f39572c6b7a491a204c85fd68b52ddc 100644 (file)
@@ -1317,6 +1317,9 @@ static int user_field_set_string(struct ftrace_event_field *field,
        pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
        pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
 
+       if (str_has_prefix(field->type, "struct "))
+               pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
+
        if (colon)
                pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
 
index 16548ee4c8c6cb2b97e68820447a7465e72357c8..3851cd1e6a62c53e3ab4d0b2884e10aa48857810 100644 (file)
@@ -1,4 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
+
+#include "trace_kprobe_selftest.h"
+
 /*
  * Function used during the kprobe self test. This function is in a separate
  * compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it
index 7ba371da0926e7936fbeb1ef7d7c30b86de38933..b2b726bea1f9e67f39d477509745a5b2a8bc667a 100644 (file)
@@ -67,7 +67,7 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent)
        int len = *(u32 *)data >> 16;
 
        if (!len)
-               trace_seq_puts(s, "(fault)");
+               trace_seq_puts(s, FAULT_STRING);
        else
                trace_seq_printf(s, "\"%s\"",
                                 (const char *)get_loc_data(data, ent));
index c4e1d4c03a85f56664f7604975bd2f97cb2496cd..bb723eefd7b71b2396f29ac5cc1e005d669c810a 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __TRACE_PROBE_KERNEL_H_
 #define __TRACE_PROBE_KERNEL_H_
 
-#define FAULT_STRING "(fault)"
-
 /*
  * This depends on trace_probe.h, but can not include it due to
  * the way trace_probe_tmpl.h is used by trace_kprobe.c and trace_eprobe.c.
@@ -15,16 +13,8 @@ static nokprobe_inline int
 fetch_store_strlen_user(unsigned long addr)
 {
        const void __user *uaddr =  (__force const void __user *)addr;
-       int ret;
 
-       ret = strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
-       /*
-        * strnlen_user_nofault returns zero on fault, insert the
-        * FAULT_STRING when that occurs.
-        */
-       if (ret <= 0)
-               return strlen(FAULT_STRING) + 1;
-       return ret;
+       return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
 }
 
 /* Return the length of string -- including null terminal byte */
@@ -44,18 +34,14 @@ fetch_store_strlen(unsigned long addr)
                len++;
        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 
-       /* For faults, return enough to hold the FAULT_STRING */
-       return (ret < 0) ? strlen(FAULT_STRING) + 1 : len;
+       return (ret < 0) ? ret : len;
 }
 
-static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base, int len)
+static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base)
 {
-       if (ret >= 0) {
-               *(u32 *)dest = make_data_loc(ret, __dest - base);
-       } else {
-               strscpy(__dest, FAULT_STRING, len);
-               ret = strlen(__dest) + 1;
-       }
+       if (ret < 0)
+               ret = 0;
+       *(u32 *)dest = make_data_loc(ret, __dest - base);
 }
 
 /*
@@ -76,7 +62,7 @@ fetch_store_string_user(unsigned long addr, void *dest, void *base)
        __dest = get_loc_data(dest, base);
 
        ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
-       set_data_loc(ret, dest, __dest, base, maxlen);
+       set_data_loc(ret, dest, __dest, base);
 
        return ret;
 }
@@ -107,7 +93,7 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
         * probing.
         */
        ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
-       set_data_loc(ret, dest, __dest, base, maxlen);
+       set_data_loc(ret, dest, __dest, base);
 
        return ret;
 }
index 00707630788d6d4f217d1c211ad425eb2d493dc5..3935b347f874bc23a155d74edd414eff42902cc9 100644 (file)
@@ -156,11 +156,11 @@ stage3:
                        code++;
                        goto array;
                case FETCH_OP_ST_USTRING:
-                       ret += fetch_store_strlen_user(val + code->offset);
+                       ret = fetch_store_strlen_user(val + code->offset);
                        code++;
                        goto array;
                case FETCH_OP_ST_SYMSTR:
-                       ret += fetch_store_symstrlen(val + code->offset);
+                       ret = fetch_store_symstrlen(val + code->offset);
                        code++;
                        goto array;
                default:
@@ -204,6 +204,8 @@ stage3:
 array:
        /* the last stage: Loop on array */
        if (code->op == FETCH_OP_LP_ARRAY) {
+               if (ret < 0)
+                       ret = 0;
                total += ret;
                if (++i < code->param) {
                        code = s3;
@@ -265,9 +267,7 @@ store_trace_args(void *data, struct trace_probe *tp, void *rec,
                if (unlikely(arg->dynamic))
                        *dl = make_data_loc(maxlen, dyndata - base);
                ret = process_fetch_insn(arg->code, rec, dl, base);
-               if (unlikely(ret < 0 && arg->dynamic)) {
-                       *dl = make_data_loc(0, dyndata - base);
-               } else {
+               if (arg->dynamic && likely(ret > 0)) {
                        dyndata += ret;
                        maxlen -= ret;
                }
index fa09b33ee7315bcf4af61c2faf322bbd12265a05..688bf579f2f1e78be4fd339d15f2093748c09192 100644 (file)
@@ -170,7 +170,8 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
                         */
                        ret++;
                *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
-       }
+       } else
+               *(u32 *)dest = make_data_loc(0, (void *)dst - base);
 
        return ret;
 }
index b667b1e2f6886689b79c5c7158e3bd05706d78ce..e4dc809d10754fffca03b5f141d981d8805f9803 100644 (file)
@@ -1349,7 +1349,7 @@ uaccess_end:
        return ret;
 }
 
-static int copy_iovec_from_user(struct iovec *iov,
+static __noclone int copy_iovec_from_user(struct iovec *iov,
                const struct iovec __user *uiov, unsigned long nr_segs)
 {
        int ret = -EFAULT;
index 1a888b86a4948c4d8e088f76ee48adb50cb72196..1df1d29dee920bddbaff384fd36783191e699d51 100644 (file)
@@ -390,6 +390,8 @@ static int head_onwire_len(int ctrl_len, bool secure)
        int head_len;
        int rem_len;
 
+       BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
+
        if (secure) {
                head_len = CEPH_PREAMBLE_SECURE_LEN;
                if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
@@ -408,6 +410,10 @@ static int head_onwire_len(int ctrl_len, bool secure)
 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
                             bool secure)
 {
+       BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
+              middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
+              data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
+
        if (!front_len && !middle_len && !data_len)
                return 0;
 
@@ -520,29 +526,34 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
                desc->fd_aligns[i] = ceph_decode_16(&p);
        }
 
-       /*
-        * This would fire for FRAME_TAG_WAIT (it has one empty
-        * segment), but we should never get it as client.
-        */
-       if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
-               pr_err("last segment empty\n");
+       if (desc->fd_lens[0] < 0 ||
+           desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
+               pr_err("bad control segment length %d\n", desc->fd_lens[0]);
                return -EINVAL;
        }
-
-       if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
-               pr_err("control segment too big %d\n", desc->fd_lens[0]);
+       if (desc->fd_lens[1] < 0 ||
+           desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
+               pr_err("bad front segment length %d\n", desc->fd_lens[1]);
                return -EINVAL;
        }
-       if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
-               pr_err("front segment too big %d\n", desc->fd_lens[1]);
+       if (desc->fd_lens[2] < 0 ||
+           desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
+               pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
                return -EINVAL;
        }
-       if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
-               pr_err("middle segment too big %d\n", desc->fd_lens[2]);
+       if (desc->fd_lens[3] < 0 ||
+           desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
+               pr_err("bad data segment length %d\n", desc->fd_lens[3]);
                return -EINVAL;
        }
-       if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
-               pr_err("data segment too big %d\n", desc->fd_lens[3]);
+
+       /*
+        * This would fire for FRAME_TAG_WAIT (it has one empty
+        * segment), but we should never get it as client.
+        */
+       if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
+               pr_err("last segment empty, segment count %d\n",
+                      desc->fd_seg_cnt);
                return -EINVAL;
        }
 
index 805b7385dd8daa6bf8c2628acfda75002ec09943..6aef976bc1da20a66edcde19d76d8412d5378032 100644 (file)
@@ -63,4 +63,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
 EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
 EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_bad_csum);
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(udp_fail_queue_rcv_skb);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(sk_data_ready);
index 6c5915efbc17fa9da8c2e1cab1f4d9d57c631b20..a298992060e6efdecb87c7ffc8290eafe330583f 100644 (file)
@@ -4261,6 +4261,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
+       /* Ensure the head is writeable before touching the shared info */
+       err = skb_unclone(skb, GFP_ATOMIC);
+       if (err)
+               goto err_linearize;
+
        skb_shinfo(skb)->frag_list = NULL;
 
        while (list_skb) {
index 41e5ca8643ec93fd71439192f208a407614ec616..8362130bf085d53d3c5f18bc00763718db8cad5d 100644 (file)
@@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
 __diag_pop();
 
 BTF_SET8_START(xdp_metadata_kfunc_ids)
-#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0)
+#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
 XDP_METADATA_KFUNC_xxx
 #undef XDP_METADATA_KFUNC
 BTF_SET8_END(xdp_metadata_kfunc_ids)
index 5479da08ef40d61980cfebf7e63206ec9ce287bf..e5213e598a0408a55091cab9da7351c9cefc604d 100644 (file)
@@ -318,9 +318,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
                                  unsigned long when)
 {
-       if (!timer_pending(&idev->rs_timer))
+       if (!mod_timer(&idev->rs_timer, jiffies + when))
                in6_dev_hold(idev);
-       mod_timer(&idev->rs_timer, jiffies + when);
 }
 
 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
index 9edf1f45b1ed6e40290b8470f1ce5ab20cd8e15e..65fa5014bc85ef035672737d7aaa695d54d22b21 100644 (file)
@@ -424,7 +424,10 @@ static struct net_device *icmp6_dev(const struct sk_buff *skb)
        if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
                const struct rt6_info *rt6 = skb_rt6_info(skb);
 
-               if (rt6)
+               /* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
+                * and ip6_null_entry could be set to skb if no route is found.
+                */
+               if (rt6 && rt6->rt6i_idev)
                        dev = rt6->rt6i_idev->dev;
        }
 
index 317b01c9bc39f9283c2e81ceaaecd26bc3532b80..b7c972aa09a75404e0edb33f0354c53702c991f8 100644 (file)
@@ -45,6 +45,7 @@
 #include <net/tcp_states.h>
 #include <net/ip6_checksum.h>
 #include <net/ip6_tunnel.h>
+#include <trace/events/udp.h>
 #include <net/xfrm.h>
 #include <net/inet_hashtables.h>
 #include <net/inet6_hashtables.h>
@@ -90,7 +91,7 @@ static u32 udp6_ehashfn(const struct net *net,
        fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
 
        return __inet6_ehashfn(lhash, lport, fhash, fport,
-                              udp_ipv6_hash_secret + net_hash_mix(net));
+                              udp6_ehash_secret + net_hash_mix(net));
 }
 
 int udp_v6_get_port(struct sock *sk, unsigned short snum)
@@ -680,6 +681,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
                UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
                kfree_skb_reason(skb, drop_reason);
+               trace_udp_fail_queue_rcv_skb(rc, sk);
                return -1;
        }
 
index d119f1d4c2fc863f598a7b2359716513ff6058a0..992393102d5f536bcf6e0807ce284601d0051c9a 100644 (file)
@@ -211,24 +211,18 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
                              unsigned int zoneid,
                              const struct net *net)
 {
-       u64 a, b, c, d;
+       siphash_key_t key;
 
        get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
 
-       /* The direction must be ignored, handle usable tuplehash members manually */
-       a = (u64)tuple->src.u3.all[0] << 32 | tuple->src.u3.all[3];
-       b = (u64)tuple->dst.u3.all[0] << 32 | tuple->dst.u3.all[3];
+       key = nf_conntrack_hash_rnd;
 
-       c = (__force u64)tuple->src.u.all << 32 | (__force u64)tuple->dst.u.all << 16;
-       c |= tuple->dst.protonum;
+       key.key[0] ^= zoneid;
+       key.key[1] ^= net_hash_mix(net);
 
-       d = (u64)zoneid << 32 | net_hash_mix(net);
-
-       /* IPv4: u3.all[1,2,3] == 0 */
-       c ^= (u64)tuple->src.u3.all[1] << 32 | tuple->src.u3.all[2];
-       d += (u64)tuple->dst.u3.all[1] << 32 | tuple->dst.u3.all[2];
-
-       return (u32)siphash_4u64(a, b, c, d, &nf_conntrack_hash_rnd);
+       return siphash((void *)tuple,
+                       offsetofend(struct nf_conntrack_tuple, dst.__nfct_hash_offsetend),
+                       &key);
 }
 
 static u32 scale_hash(u32 hash)
index 0c4db2f2ac43e38081baab2a6bbb6d811018bdcb..f22691f8385363243d16a1536fd4b1195484f31b 100644 (file)
@@ -360,6 +360,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
        BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
        BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
 
+       if (!nf_ct_helper_hash)
+               return -ENOENT;
+
        if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
                return -EINVAL;
 
@@ -515,4 +518,5 @@ int nf_conntrack_helper_init(void)
 void nf_conntrack_helper_fini(void)
 {
        kvfree(nf_ct_helper_hash);
+       nf_ct_helper_hash = NULL;
 }
index ad6f0ca40cd2b41e8acfef435cdd7197c6096fbe..af369e686fc5eeecbb8352cdcc56b13a8fb0fc3c 100644 (file)
@@ -205,6 +205,8 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
                            const struct nf_hook_state *state)
 {
+       unsigned long status;
+
        if (!nf_ct_is_confirmed(ct)) {
                unsigned int *timeouts = nf_ct_timeout_lookup(ct);
 
@@ -217,11 +219,17 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
                ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
        }
 
+       status = READ_ONCE(ct->status);
        /* If we've seen traffic both ways, this is a GRE connection.
         * Extend timeout. */
-       if (ct->status & IPS_SEEN_REPLY) {
+       if (status & IPS_SEEN_REPLY) {
                nf_ct_refresh_acct(ct, ctinfo, skb,
                                   ct->proto.gre.stream_timeout);
+
+               /* never set ASSURED for IPS_NAT_CLASH, they time out soon */
+               if (unlikely((status & IPS_NAT_CLASH)))
+                       return NF_ACCEPT;
+
                /* Also, more likely to be important, and not a probe. */
                if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
                        nf_conntrack_event_cache(IPCT_ASSURED, ct);
index 9573a8fcad796282b97d19353f65ed86a8b6af61..237f739da3ca713c31eaa5f13da8b1ec184a91e4 100644 (file)
@@ -253,8 +253,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
        if (chain->bound)
                return -EBUSY;
 
+       if (!nft_use_inc(&chain->use))
+               return -EMFILE;
+
        chain->bound = true;
-       chain->use++;
        nft_chain_trans_bind(ctx, chain);
 
        return 0;
@@ -437,7 +439,7 @@ static int nft_delchain(struct nft_ctx *ctx)
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       ctx->table->use--;
+       nft_use_dec(&ctx->table->use);
        nft_deactivate_next(ctx->net, ctx->chain);
 
        return 0;
@@ -476,7 +478,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
        /* You cannot delete the same rule twice */
        if (nft_is_active_next(ctx->net, rule)) {
                nft_deactivate_next(ctx->net, rule);
-               ctx->chain->use--;
+               nft_use_dec(&ctx->chain->use);
                return 0;
        }
        return -ENOENT;
@@ -644,7 +646,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
                nft_map_deactivate(ctx, set);
 
        nft_deactivate_next(ctx->net, set);
-       ctx->table->use--;
+       nft_use_dec(&ctx->table->use);
 
        return err;
 }
@@ -676,7 +678,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
                return err;
 
        nft_deactivate_next(ctx->net, obj);
-       ctx->table->use--;
+       nft_use_dec(&ctx->table->use);
 
        return err;
 }
@@ -711,7 +713,7 @@ static int nft_delflowtable(struct nft_ctx *ctx,
                return err;
 
        nft_deactivate_next(ctx->net, flowtable);
-       ctx->table->use--;
+       nft_use_dec(&ctx->table->use);
 
        return err;
 }
@@ -2396,9 +2398,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        struct nft_chain *chain;
        int err;
 
-       if (table->use == UINT_MAX)
-               return -EOVERFLOW;
-
        if (nla[NFTA_CHAIN_HOOK]) {
                struct nft_stats __percpu *stats = NULL;
                struct nft_chain_hook hook = {};
@@ -2494,6 +2493,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (err < 0)
                goto err_destroy_chain;
 
+       if (!nft_use_inc(&table->use)) {
+               err = -EMFILE;
+               goto err_use;
+       }
+
        trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
@@ -2510,10 +2514,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                goto err_unregister_hook;
        }
 
-       table->use++;
-
        return 0;
+
 err_unregister_hook:
+       nft_use_dec_restore(&table->use);
+err_use:
        nf_tables_unregister_hook(net, table, chain);
 err_destroy_chain:
        nf_tables_chain_destroy(ctx);
@@ -2694,7 +2699,7 @@ err_hooks:
 
 static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
                                               const struct nft_table *table,
-                                              const struct nlattr *nla)
+                                              const struct nlattr *nla, u8 genmask)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        u32 id = ntohl(nla_get_be32(nla));
@@ -2705,7 +2710,8 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
 
                if (trans->msg_type == NFT_MSG_NEWCHAIN &&
                    chain->table == table &&
-                   id == nft_trans_chain_id(trans))
+                   id == nft_trans_chain_id(trans) &&
+                   nft_active_genmask(chain, genmask))
                        return chain;
        }
        return ERR_PTR(-ENOENT);
@@ -3809,7 +3815,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                        return -EOPNOTSUPP;
 
        } else if (nla[NFTA_RULE_CHAIN_ID]) {
-               chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]);
+               chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
+                                             genmask);
                if (IS_ERR(chain)) {
                        NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
                        return PTR_ERR(chain);
@@ -3840,9 +3847,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                        return -EINVAL;
                handle = nf_tables_alloc_handle(table);
 
-               if (chain->use == UINT_MAX)
-                       return -EOVERFLOW;
-
                if (nla[NFTA_RULE_POSITION]) {
                        pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
                        old_rule = __nft_rule_lookup(chain, pos_handle);
@@ -3936,6 +3940,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                }
        }
 
+       if (!nft_use_inc(&chain->use)) {
+               err = -EMFILE;
+               goto err_release_rule;
+       }
+
        if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
                err = nft_delrule(&ctx, old_rule);
                if (err < 0)
@@ -3967,7 +3976,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                }
        }
        kvfree(expr_info);
-       chain->use++;
 
        if (flow)
                nft_trans_flow_rule(trans) = flow;
@@ -3978,6 +3986,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
        return 0;
 
 err_destroy_flow_rule:
+       nft_use_dec_restore(&chain->use);
        if (flow)
                nft_flow_rule_destroy(flow);
 err_release_rule:
@@ -5014,9 +5023,15 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        alloc_size = sizeof(*set) + size + udlen;
        if (alloc_size < size || alloc_size > INT_MAX)
                return -ENOMEM;
+
+       if (!nft_use_inc(&table->use))
+               return -EMFILE;
+
        set = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT);
-       if (!set)
-               return -ENOMEM;
+       if (!set) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
 
        name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL_ACCOUNT);
        if (!name) {
@@ -5074,7 +5089,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                goto err_set_expr_alloc;
 
        list_add_tail_rcu(&set->list, &table->sets);
-       table->use++;
+
        return 0;
 
 err_set_expr_alloc:
@@ -5086,6 +5101,9 @@ err_set_init:
        kfree(set->name);
 err_set_name:
        kvfree(set);
+err_alloc:
+       nft_use_dec_restore(&table->use);
+
        return err;
 }
 
@@ -5224,9 +5242,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        struct nft_set_binding *i;
        struct nft_set_iter iter;
 
-       if (set->use == UINT_MAX)
-               return -EOVERFLOW;
-
        if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
                return -EBUSY;
 
@@ -5254,10 +5269,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                        return iter.err;
        }
 bind:
+       if (!nft_use_inc(&set->use))
+               return -EMFILE;
+
        binding->chain = ctx->chain;
        list_add_tail_rcu(&binding->list, &set->bindings);
        nft_set_trans_bind(ctx, set);
-       set->use++;
 
        return 0;
 }
@@ -5331,7 +5348,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
                nft_clear(ctx->net, set);
        }
 
-       set->use++;
+       nft_use_inc_restore(&set->use);
 }
 EXPORT_SYMBOL_GPL(nf_tables_activate_set);
 
@@ -5347,7 +5364,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
                else
                        list_del_rcu(&binding->list);
 
-               set->use--;
+               nft_use_dec(&set->use);
                break;
        case NFT_TRANS_PREPARE:
                if (nft_set_is_anonymous(set)) {
@@ -5356,7 +5373,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
 
                        nft_deactivate_next(ctx->net, set);
                }
-               set->use--;
+               nft_use_dec(&set->use);
                return;
        case NFT_TRANS_ABORT:
        case NFT_TRANS_RELEASE:
@@ -5364,7 +5381,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
                    set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
                        nft_map_deactivate(ctx, set);
 
-               set->use--;
+               nft_use_dec(&set->use);
                fallthrough;
        default:
                nf_tables_unbind_set(ctx, set, binding,
@@ -6155,7 +6172,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
                nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
-               (*nft_set_ext_obj(ext))->use--;
+               nft_use_dec(&(*nft_set_ext_obj(ext))->use);
        kfree(elem);
 }
 EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
@@ -6657,8 +6674,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                     set->objtype, genmask);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
+                       obj = NULL;
+                       goto err_parse_key_end;
+               }
+
+               if (!nft_use_inc(&obj->use)) {
+                       err = -EMFILE;
+                       obj = NULL;
                        goto err_parse_key_end;
                }
+
                err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
                if (err < 0)
                        goto err_parse_key_end;
@@ -6727,10 +6752,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (flags)
                *nft_set_ext_flags(ext) = flags;
 
-       if (obj) {
+       if (obj)
                *nft_set_ext_obj(ext) = obj;
-               obj->use++;
-       }
+
        if (ulen > 0) {
                if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
                        err = -EINVAL;
@@ -6798,12 +6822,13 @@ err_element_clash:
        kfree(trans);
 err_elem_free:
        nf_tables_set_elem_destroy(ctx, set, elem.priv);
-       if (obj)
-               obj->use--;
 err_parse_data:
        if (nla[NFTA_SET_ELEM_DATA] != NULL)
                nft_data_release(&elem.data.val, desc.type);
 err_parse_key_end:
+       if (obj)
+               nft_use_dec_restore(&obj->use);
+
        nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
 err_parse_key:
        nft_data_release(&elem.key.val, NFT_DATA_VALUE);
@@ -6883,7 +6908,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
                case NFT_JUMP:
                case NFT_GOTO:
                        chain = data->verdict.chain;
-                       chain->use++;
+                       nft_use_inc_restore(&chain->use);
                        break;
                }
        }
@@ -6898,7 +6923,7 @@ static void nft_setelem_data_activate(const struct net *net,
        if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
                nft_data_hold(nft_set_ext_data(ext), set->dtype);
        if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
-               (*nft_set_ext_obj(ext))->use++;
+               nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
 }
 
 static void nft_setelem_data_deactivate(const struct net *net,
@@ -6910,7 +6935,7 @@ static void nft_setelem_data_deactivate(const struct net *net,
        if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
                nft_data_release(nft_set_ext_data(ext), set->dtype);
        if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
-               (*nft_set_ext_obj(ext))->use--;
+               nft_use_dec(&(*nft_set_ext_obj(ext))->use);
 }
 
 static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
@@ -7453,9 +7478,14 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
 
        nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
 
+       if (!nft_use_inc(&table->use))
+               return -EMFILE;
+
        type = nft_obj_type_get(net, objtype);
-       if (IS_ERR(type))
-               return PTR_ERR(type);
+       if (IS_ERR(type)) {
+               err = PTR_ERR(type);
+               goto err_type;
+       }
 
        obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
        if (IS_ERR(obj)) {
@@ -7489,7 +7519,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
                goto err_obj_ht;
 
        list_add_tail_rcu(&obj->list, &table->objects);
-       table->use++;
+
        return 0;
 err_obj_ht:
        /* queued in transaction log */
@@ -7505,6 +7535,9 @@ err_strdup:
        kfree(obj);
 err_init:
        module_put(type->owner);
+err_type:
+       nft_use_dec_restore(&table->use);
+
        return err;
 }
 
@@ -7906,7 +7939,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
        case NFT_TRANS_PREPARE:
        case NFT_TRANS_ABORT:
        case NFT_TRANS_RELEASE:
-               flowtable->use--;
+               nft_use_dec(&flowtable->use);
                fallthrough;
        default:
                return;
@@ -8260,9 +8293,14 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
 
        nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
 
+       if (!nft_use_inc(&table->use))
+               return -EMFILE;
+
        flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL_ACCOUNT);
-       if (!flowtable)
-               return -ENOMEM;
+       if (!flowtable) {
+               err = -ENOMEM;
+               goto flowtable_alloc;
+       }
 
        flowtable->table = table;
        flowtable->handle = nf_tables_alloc_handle(table);
@@ -8317,7 +8355,6 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
                goto err5;
 
        list_add_tail_rcu(&flowtable->list, &table->flowtables);
-       table->use++;
 
        return 0;
 err5:
@@ -8334,6 +8371,9 @@ err2:
        kfree(flowtable->name);
 err1:
        kfree(flowtable);
+flowtable_alloc:
+       nft_use_dec_restore(&table->use);
+
        return err;
 }
 
@@ -9713,7 +9753,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                                 */
                                if (nft_set_is_anonymous(nft_trans_set(trans)) &&
                                    !list_empty(&nft_trans_set(trans)->bindings))
-                                       trans->ctx.table->use--;
+                                       nft_use_dec(&trans->ctx.table->use);
                        }
                        nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
                                             NFT_MSG_NEWSET, GFP_KERNEL);
@@ -9943,7 +9983,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                        nft_trans_destroy(trans);
                                        break;
                                }
-                               trans->ctx.table->use--;
+                               nft_use_dec_restore(&trans->ctx.table->use);
                                nft_chain_del(trans->ctx.chain);
                                nf_tables_unregister_hook(trans->ctx.net,
                                                          trans->ctx.table,
@@ -9956,7 +9996,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                list_splice(&nft_trans_chain_hooks(trans),
                                            &nft_trans_basechain(trans)->hook_list);
                        } else {
-                               trans->ctx.table->use++;
+                               nft_use_inc_restore(&trans->ctx.table->use);
                                nft_clear(trans->ctx.net, trans->ctx.chain);
                        }
                        nft_trans_destroy(trans);
@@ -9966,7 +10006,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                nft_trans_destroy(trans);
                                break;
                        }
-                       trans->ctx.chain->use--;
+                       nft_use_dec_restore(&trans->ctx.chain->use);
                        list_del_rcu(&nft_trans_rule(trans)->list);
                        nft_rule_expr_deactivate(&trans->ctx,
                                                 nft_trans_rule(trans),
@@ -9976,7 +10016,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                        break;
                case NFT_MSG_DELRULE:
                case NFT_MSG_DESTROYRULE:
-                       trans->ctx.chain->use++;
+                       nft_use_inc_restore(&trans->ctx.chain->use);
                        nft_clear(trans->ctx.net, nft_trans_rule(trans));
                        nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
                        if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
@@ -9989,7 +10029,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                nft_trans_destroy(trans);
                                break;
                        }
-                       trans->ctx.table->use--;
+                       nft_use_dec_restore(&trans->ctx.table->use);
                        if (nft_trans_set_bound(trans)) {
                                nft_trans_destroy(trans);
                                break;
@@ -9998,7 +10038,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                        break;
                case NFT_MSG_DELSET:
                case NFT_MSG_DESTROYSET:
-                       trans->ctx.table->use++;
+                       nft_use_inc_restore(&trans->ctx.table->use);
                        nft_clear(trans->ctx.net, nft_trans_set(trans));
                        if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
                                nft_map_activate(&trans->ctx, nft_trans_set(trans));
@@ -10042,13 +10082,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
                                nft_trans_destroy(trans);
                        } else {
-                               trans->ctx.table->use--;
+                               nft_use_dec_restore(&trans->ctx.table->use);
                                nft_obj_del(nft_trans_obj(trans));
                        }
                        break;
                case NFT_MSG_DELOBJ:
                case NFT_MSG_DESTROYOBJ:
-                       trans->ctx.table->use++;
+                       nft_use_inc_restore(&trans->ctx.table->use);
                        nft_clear(trans->ctx.net, nft_trans_obj(trans));
                        nft_trans_destroy(trans);
                        break;
@@ -10057,7 +10097,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                nft_unregister_flowtable_net_hooks(net,
                                                &nft_trans_flowtable_hooks(trans));
                        } else {
-                               trans->ctx.table->use--;
+                               nft_use_dec_restore(&trans->ctx.table->use);
                                list_del_rcu(&nft_trans_flowtable(trans)->list);
                                nft_unregister_flowtable_net_hooks(net,
                                                &nft_trans_flowtable(trans)->hook_list);
@@ -10069,7 +10109,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                                list_splice(&nft_trans_flowtable_hooks(trans),
                                            &nft_trans_flowtable(trans)->hook_list);
                        } else {
-                               trans->ctx.table->use++;
+                               nft_use_inc_restore(&trans->ctx.table->use);
                                nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
                        }
                        nft_trans_destroy(trans);
@@ -10502,7 +10542,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
                                                 genmask);
                } else if (tb[NFTA_VERDICT_CHAIN_ID]) {
                        chain = nft_chain_lookup_byid(ctx->net, ctx->table,
-                                                     tb[NFTA_VERDICT_CHAIN_ID]);
+                                                     tb[NFTA_VERDICT_CHAIN_ID],
+                                                     genmask);
                        if (IS_ERR(chain))
                                return PTR_ERR(chain);
                } else {
@@ -10518,8 +10559,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
                if (desc->flags & NFT_DATA_DESC_SETELEM &&
                    chain->flags & NFT_CHAIN_BINDING)
                        return -EINVAL;
+               if (!nft_use_inc(&chain->use))
+                       return -EMFILE;
 
-               chain->use++;
                data->verdict.chain = chain;
                break;
        }
@@ -10537,7 +10579,7 @@ static void nft_verdict_uninit(const struct nft_data *data)
        case NFT_JUMP:
        case NFT_GOTO:
                chain = data->verdict.chain;
-               chain->use--;
+               nft_use_dec(&chain->use);
                break;
        }
 }
@@ -10706,11 +10748,11 @@ int __nft_release_basechain(struct nft_ctx *ctx)
        nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
        list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
                list_del(&rule->list);
-               ctx->chain->use--;
+               nft_use_dec(&ctx->chain->use);
                nf_tables_rule_release(ctx, rule);
        }
        nft_chain_del(ctx->chain);
-       ctx->table->use--;
+       nft_use_dec(&ctx->table->use);
        nf_tables_chain_destroy(ctx);
 
        return 0;
@@ -10760,18 +10802,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
                ctx.chain = chain;
                list_for_each_entry_safe(rule, nr, &chain->rules, list) {
                        list_del(&rule->list);
-                       chain->use--;
+                       nft_use_dec(&chain->use);
                        nf_tables_rule_release(&ctx, rule);
                }
        }
        list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
                list_del(&flowtable->list);
-               table->use--;
+               nft_use_dec(&table->use);
                nf_tables_flowtable_destroy(flowtable);
        }
        list_for_each_entry_safe(set, ns, &table->sets, list) {
                list_del(&set->list);
-               table->use--;
+               nft_use_dec(&table->use);
                if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
                        nft_map_deactivate(&ctx, set);
 
@@ -10779,13 +10821,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
        }
        list_for_each_entry_safe(obj, ne, &table->objects, list) {
                nft_obj_del(obj);
-               table->use--;
+               nft_use_dec(&table->use);
                nft_obj_destroy(&ctx, obj);
        }
        list_for_each_entry_safe(chain, nc, &table->chains, list) {
                ctx.chain = chain;
                nft_chain_del(chain);
-               table->use--;
+               nft_use_dec(&table->use);
                nf_tables_chain_destroy(&ctx);
        }
        nf_tables_table_destroy(&ctx);
index 9a85e797ed58bee9fa2e378a1f876f5cbf45d4ae..e596d1a842f7024a5b0237985d9d69a999528b95 100644 (file)
@@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
        const struct nft_byteorder *priv = nft_expr_priv(expr);
        u32 *src = &regs->data[priv->sreg];
        u32 *dst = &regs->data[priv->dreg];
-       union { u32 u32; u16 u16; } *s, *d;
+       u16 *s16, *d16;
        unsigned int i;
 
-       s = (void *)src;
-       d = (void *)dst;
+       s16 = (void *)src;
+       d16 = (void *)dst;
 
        switch (priv->size) {
        case 8: {
@@ -62,11 +62,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
                switch (priv->op) {
                case NFT_BYTEORDER_NTOH:
                        for (i = 0; i < priv->len / 4; i++)
-                               d[i].u32 = ntohl((__force __be32)s[i].u32);
+                               dst[i] = ntohl((__force __be32)src[i]);
                        break;
                case NFT_BYTEORDER_HTON:
                        for (i = 0; i < priv->len / 4; i++)
-                               d[i].u32 = (__force __u32)htonl(s[i].u32);
+                               dst[i] = (__force __u32)htonl(src[i]);
                        break;
                }
                break;
@@ -74,11 +74,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
                switch (priv->op) {
                case NFT_BYTEORDER_NTOH:
                        for (i = 0; i < priv->len / 2; i++)
-                               d[i].u16 = ntohs((__force __be16)s[i].u16);
+                               d16[i] = ntohs((__force __be16)s16[i]);
                        break;
                case NFT_BYTEORDER_HTON:
                        for (i = 0; i < priv->len / 2; i++)
-                               d[i].u16 = (__force __u16)htons(s[i].u16);
+                               d16[i] = (__force __u16)htons(s16[i]);
                        break;
                }
                break;
index 5ef9146e74ad97392b1c3106933909955cd88253..ab3362c483b4a78c1e138815764e9e80bfd5d43d 100644 (file)
@@ -408,8 +408,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
        if (IS_ERR(flowtable))
                return PTR_ERR(flowtable);
 
+       if (!nft_use_inc(&flowtable->use))
+               return -EMFILE;
+
        priv->flowtable = flowtable;
-       flowtable->use++;
 
        return nf_ct_netns_get(ctx->net, ctx->family);
 }
@@ -428,7 +430,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
 {
        struct nft_flow_offload *priv = nft_expr_priv(expr);
 
-       priv->flowtable->use++;
+       nft_use_inc_restore(&priv->flowtable->use);
 }
 
 static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
index 3d76ebfe8939bdf569409cbf600a27673287124b..407d7197f75bb328f3fd4f96c826240bef126caf 100644 (file)
@@ -159,7 +159,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
                        default:
                                nft_chain_del(chain);
                                chain->bound = false;
-                               chain->table->use--;
+                               nft_use_dec(&chain->table->use);
                                break;
                        }
                        break;
@@ -198,7 +198,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
                 * let the transaction records release this chain and its rules.
                 */
                if (chain->bound) {
-                       chain->use--;
+                       nft_use_dec(&chain->use);
                        break;
                }
 
@@ -206,9 +206,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
                chain_ctx = *ctx;
                chain_ctx.chain = chain;
 
-               chain->use--;
+               nft_use_dec(&chain->use);
                list_for_each_entry_safe(rule, n, &chain->rules, list) {
-                       chain->use--;
+                       nft_use_dec(&chain->use);
                        list_del(&rule->list);
                        nf_tables_rule_destroy(&chain_ctx, rule);
                }
index a48dd5b5d45b13256b0d30d4806ddca49bbcbc6f..509011b1ef597ca96abbbc3acf7fdb87536acdf0 100644 (file)
@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
        if (IS_ERR(obj))
                return -ENOENT;
 
+       if (!nft_use_inc(&obj->use))
+               return -EMFILE;
+
        nft_objref_priv(expr) = obj;
-       obj->use++;
 
        return 0;
 }
@@ -72,7 +74,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
        if (phase == NFT_TRANS_COMMIT)
                return;
 
-       obj->use--;
+       nft_use_dec(&obj->use);
 }
 
 static void nft_objref_activate(const struct nft_ctx *ctx,
@@ -80,7 +82,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
 {
        struct nft_object *obj = nft_objref_priv(expr);
 
-       obj->use++;
+       nft_use_inc_restore(&obj->use);
 }
 
 static const struct nft_expr_ops nft_objref_ops = {
index f7887f42d5427f352c58fc1b213a7f04a7f02f17..9d3f26bf0440d9d2296e73ad39157e9122cc0b1d 100644 (file)
@@ -1320,7 +1320,7 @@ struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
                        return ERR_PTR(err);
                }
        } else {
-               if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
+               if (strscpy(act_name, "police", IFNAMSIZ) < 0) {
                        NL_SET_ERR_MSG(extack, "TC action name too long");
                        return ERR_PTR(-EINVAL);
                }
index 56065cc5a6614e1fb9c0aa84be2d295d9bf5975b..f2b0bc4142fe3b5bfc8c2947897c413c52390ed4 100644 (file)
@@ -812,6 +812,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
                       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
                       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
 
+       if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
+               NL_SET_ERR_MSG(extack,
+                              "Both min and max destination ports must be specified");
+               return -EINVAL;
+       }
+       if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
+               NL_SET_ERR_MSG(extack,
+                              "Both min and max source ports must be specified");
+               return -EINVAL;
+       }
        if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
            ntohs(key->tp_range.tp_max.dst) <=
            ntohs(key->tp_range.tp_min.dst)) {
index ae9439a6c56c90f8f1711c8cde33cd967f1ef7e5..8641f805931793a468abb01b7fcd95c8cf9891f4 100644 (file)
@@ -212,11 +212,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
        if (err < 0)
                return err;
 
-       if (tb[TCA_FW_CLASSID]) {
-               f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
-               tcf_bind_filter(tp, &f->res, base);
-       }
-
        if (tb[TCA_FW_INDEV]) {
                int ret;
                ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
@@ -233,6 +228,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
        } else if (head->mask != 0xFFFFFFFF)
                return err;
 
+       if (tb[TCA_FW_CLASSID]) {
+               f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
+               tcf_bind_filter(tp, &f->res, base);
+       }
+
        return 0;
 }
 
index dfd9a99e62570f18d814d3155a7df545d284fc75..befaf74b33caa2d4b0b80c826191c3b2a9222ea1 100644 (file)
@@ -381,8 +381,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
                           u32 lmax)
 {
        struct qfq_sched *q = qdisc_priv(sch);
-       struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
+       struct qfq_aggregate *new_agg;
 
+       /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
+       if (lmax > QFQ_MAX_LMAX)
+               return -EINVAL;
+
+       new_agg = qfq_find_agg(q, lmax, weight);
        if (new_agg == NULL) { /* create new aggregate */
                new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
                if (new_agg == NULL)
@@ -423,10 +428,17 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        else
                weight = 1;
 
-       if (tb[TCA_QFQ_LMAX])
+       if (tb[TCA_QFQ_LMAX]) {
                lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
-       else
+       } else {
+               /* MTU size is user controlled */
                lmax = psched_mtu(qdisc_dev(sch));
+               if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "MTU size out of bounds for qfq");
+                       return -EINVAL;
+               }
+       }
 
        inv_w = ONE_FP / weight;
        weight = ONE_FP / inv_w;
index 89c9ad6c886e09fea2d8f842a6da952abbefba9f..1783ab9d57a3193bca80a6edd92824bdbc4c5022 100644 (file)
@@ -580,6 +580,8 @@ int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb)
                hdrlen += ETH_ALEN + 2;
        else if (!pskb_may_pull(skb, hdrlen))
                return -EINVAL;
+       else
+               payload.eth.h_proto = htons(skb->len - hdrlen);
 
        mesh_addr = skb->data + sizeof(payload.eth) + ETH_ALEN;
        switch (payload.flags & MESH_FLAGS_AE) {
index bf49ed0d73623f9d45c8a52fc739144bd9c8b51b..b0ddf5f3673887174da5a0a5c1d46f51003ebdf2 100644 (file)
@@ -210,9 +210,7 @@ config SAMPLE_VFIO_MDEV_MDPY
 config SAMPLE_VFIO_MDEV_MDPY_FB
        tristate "Build VFIO mdpy example guest fbdev driver"
        depends on FB
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_IOMEM_HELPERS
        help
          Guest fbdev driver for the virtual display sample driver.
 
index 06d88914901266e248956d3809c982058168a66c..e5ed08098ff31dce52442a922e4c4975b66ec6ff 100644 (file)
@@ -2,7 +2,9 @@
 #include <linux/module.h>
 #include <linux/kthread.h>
 #include <linux/ftrace.h>
+#ifndef CONFIG_ARM64
 #include <asm/asm-offsets.h>
+#endif
 
 extern void my_direct_func1(void);
 extern void my_direct_func2(void);
@@ -96,6 +98,38 @@ asm (
 
 #endif /* CONFIG_S390 */
 
+#ifdef CONFIG_ARM64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:"
+"      bti     c\n"
+"      sub     sp, sp, #16\n"
+"      stp     x9, x30, [sp]\n"
+"      bl      my_direct_func1\n"
+"      ldp     x30, x9, [sp]\n"
+"      add     sp, sp, #16\n"
+"      ret     x9\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+
+"      .type           my_tramp2, @function\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:"
+"      bti     c\n"
+"      sub     sp, sp, #16\n"
+"      stp     x9, x30, [sp]\n"
+"      bl      my_direct_func2\n"
+"      ldp     x30, x9, [sp]\n"
+"      add     sp, sp, #16\n"
+"      ret     x9\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_ARM64 */
+
 #ifdef CONFIG_LOONGARCH
 
 asm (
index 62f6b681999e0b76f3ecdf304b87679592359e49..292cff2b3f5d755f21bb80a3f5f52a05a368049b 100644 (file)
@@ -2,7 +2,9 @@
 #include <linux/module.h>
 #include <linux/kthread.h>
 #include <linux/ftrace.h>
+#ifndef CONFIG_ARM64
 #include <asm/asm-offsets.h>
+#endif
 
 extern void my_direct_func1(unsigned long ip);
 extern void my_direct_func2(unsigned long ip);
@@ -103,6 +105,44 @@ asm (
 
 #endif /* CONFIG_S390 */
 
+#ifdef CONFIG_ARM64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:"
+"      bti     c\n"
+"      sub     sp, sp, #32\n"
+"      stp     x9, x30, [sp]\n"
+"      str     x0, [sp, #16]\n"
+"      mov     x0, x30\n"
+"      bl      my_direct_func1\n"
+"      ldp     x30, x9, [sp]\n"
+"      ldr     x0, [sp, #16]\n"
+"      add     sp, sp, #32\n"
+"      ret     x9\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+
+"      .type           my_tramp2, @function\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:"
+"      bti     c\n"
+"      sub     sp, sp, #32\n"
+"      stp     x9, x30, [sp]\n"
+"      str     x0, [sp, #16]\n"
+"      mov     x0, x30\n"
+"      bl      my_direct_func2\n"
+"      ldp     x30, x9, [sp]\n"
+"      ldr     x0, [sp, #16]\n"
+"      add     sp, sp, #32\n"
+"      ret     x9\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_ARM64 */
+
 #ifdef CONFIG_LOONGARCH
 #include <asm/asm.h>
 
index 5482cf616b433b90e9ed2101adb525fc48d91b96..b4391e08c913e8cbcba96965036f093024861825 100644 (file)
@@ -4,7 +4,9 @@
 #include <linux/mm.h> /* for handle_mm_fault() */
 #include <linux/ftrace.h>
 #include <linux/sched/stat.h>
+#ifndef CONFIG_ARM64
 #include <asm/asm-offsets.h>
+#endif
 
 extern void my_direct_func(unsigned long ip);
 
@@ -66,6 +68,29 @@ asm (
 
 #endif /* CONFIG_S390 */
 
+#ifdef CONFIG_ARM64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
+"   my_tramp:"
+"      bti     c\n"
+"      sub     sp, sp, #32\n"
+"      stp     x9, x30, [sp]\n"
+"      str     x0, [sp, #16]\n"
+"      mov     x0, x30\n"
+"      bl      my_direct_func\n"
+"      ldp     x30, x9, [sp]\n"
+"      ldr     x0, [sp, #16]\n"
+"      add     sp, sp, #32\n"
+"      ret     x9\n"
+"      .size           my_tramp, .-my_tramp\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_ARM64 */
+
 #ifdef CONFIG_LOONGARCH
 
 #include <asm/asm.h>
index a05bc2cc2261456e878ed8150436b091f1771b06..e9804c5307c0c7481f5bf8909b9431ed41efd25f 100644 (file)
@@ -3,16 +3,18 @@
 
 #include <linux/mm.h> /* for handle_mm_fault() */
 #include <linux/ftrace.h>
+#ifndef CONFIG_ARM64
 #include <asm/asm-offsets.h>
+#endif
 
-extern void my_direct_func(struct vm_area_struct *vma,
-                          unsigned long address, unsigned int flags);
+extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
+                          unsigned int flags, struct pt_regs *regs);
 
-void my_direct_func(struct vm_area_struct *vma,
-                       unsigned long address, unsigned int flags)
+void my_direct_func(struct vm_area_struct *vma, unsigned long address,
+                   unsigned int flags, struct pt_regs *regs)
 {
-       trace_printk("handle mm fault vma=%p address=%lx flags=%x\n",
-                    vma, address, flags);
+       trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
+                    vma, address, flags, regs);
 }
 
 extern void my_tramp(void *);
@@ -34,7 +36,9 @@ asm (
 "      pushq %rdi\n"
 "      pushq %rsi\n"
 "      pushq %rdx\n"
+"      pushq %rcx\n"
 "      call my_direct_func\n"
+"      popq %rcx\n"
 "      popq %rdx\n"
 "      popq %rsi\n"
 "      popq %rdi\n"
@@ -70,6 +74,30 @@ asm (
 
 #endif /* CONFIG_S390 */
 
+#ifdef CONFIG_ARM64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
+"   my_tramp:"
+"      bti     c\n"
+"      sub     sp, sp, #48\n"
+"      stp     x9, x30, [sp]\n"
+"      stp     x0, x1, [sp, #16]\n"
+"      stp     x2, x3, [sp, #32]\n"
+"      bl      my_direct_func\n"
+"      ldp     x30, x9, [sp]\n"
+"      ldp     x0, x1, [sp, #16]\n"
+"      ldp     x2, x3, [sp, #32]\n"
+"      add     sp, sp, #48\n"
+"      ret     x9\n"
+"      .size           my_tramp, .-my_tramp\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_ARM64 */
+
 #ifdef CONFIG_LOONGARCH
 
 asm (
index 06879bbd339936d837649409507527984481aafd..20f4a7caa810ebd0f175cf73ad48bc73f06baa66 100644 (file)
@@ -3,7 +3,9 @@
 
 #include <linux/sched.h> /* for wake_up_process() */
 #include <linux/ftrace.h>
+#ifndef CONFIG_ARM64
 #include <asm/asm-offsets.h>
+#endif
 
 extern void my_direct_func(struct task_struct *p);
 
@@ -63,6 +65,28 @@ asm (
 
 #endif /* CONFIG_S390 */
 
+#ifdef CONFIG_ARM64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
+"   my_tramp:"
+"      bti     c\n"
+"      sub     sp, sp, #32\n"
+"      stp     x9, x30, [sp]\n"
+"      str     x0, [sp, #16]\n"
+"      bl      my_direct_func\n"
+"      ldp     x30, x9, [sp]\n"
+"      ldr     x0, [sp, #16]\n"
+"      add     sp, sp, #32\n"
+"      ret     x9\n"
+"      .size           my_tramp, .-my_tramp\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_ARM64 */
+
 #ifdef CONFIG_LOONGARCH
 
 asm (
index 3c8001b9e407ea58cfce1819060537cc42a21ccf..4598bc28acd996c064cba434964c621952f15da2 100644 (file)
@@ -88,11 +88,9 @@ static void mdpy_fb_destroy(struct fb_info *info)
 
 static const struct fb_ops mdpy_fb_ops = {
        .owner          = THIS_MODULE,
+       FB_DEFAULT_IOMEM_OPS,
        .fb_destroy     = mdpy_fb_destroy,
        .fb_setcolreg   = mdpy_fb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
 };
 
 static int mdpy_fb_probe(struct pci_dev *pdev,
@@ -162,7 +160,6 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
        }
 
        info->fbops = &mdpy_fb_ops;
-       info->flags = FBINFO_DEFAULT;
        info->pseudo_palette = par->palette;
 
        ret = register_framebuffer(info);
index d387c9381650730a86685d76f657f5d4ab60f9e5..16c87938b3165c2be64eb5886b9ed4c4e7e15c01 100644 (file)
@@ -349,10 +349,10 @@ static void cleanup_symbol_name(char *s)
         * ASCII[_]   = 5f
         * ASCII[a-z] = 61,7a
         *
-        * As above, replacing '.' with '\0' does not affect the main sorting,
-        * but it helps us with subsorting.
+        * As above, replacing the first '.' in ".llvm." with '\0' does not
+        * affect the main sorting, but it helps us with subsorting.
         */
-       p = strchr(s, '.');
+       p = strstr(s, ".llvm.");
        if (p)
                *p = '\0';
 }
index d420b5d2e2b6751a43078913500e3d101acc7eec..081befa4674b86cbdbeed1b7085ba960b262d7c0 100644 (file)
@@ -1005,7 +1005,7 @@ struct elf *elf_open_read(const char *name, int flags)
                perror("malloc");
                return NULL;
        }
-       memset(elf, 0, offsetof(struct elf, sections));
+       memset(elf, 0, sizeof(*elf));
 
        INIT_LIST_HEAD(&elf->sections);
 
diff --git a/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c b/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c
new file mode 100644 (file)
index 0000000..118abc2
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "async_stack_depth.skel.h"
+
+void test_async_stack_depth(void)
+{
+       RUN_TESTS(async_stack_depth);
+}
diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c
new file mode 100644 (file)
index 0000000..477ba95
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct hmap_elem {
+       struct bpf_timer timer;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(max_entries, 64);
+       __type(key, int);
+       __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+__attribute__((noinline))
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+       volatile char buf[256] = {};
+       return buf[69];
+}
+
+SEC("tc")
+__failure __msg("combined stack size of 2 calls")
+int prog(struct __sk_buff *ctx)
+{
+       struct hmap_elem *elem;
+       volatile char buf[256] = {};
+
+       elem = bpf_map_lookup_elem(&hmap, &(int){0});
+       if (!elem)
+               return 0;
+
+       timer_cb(NULL, NULL, NULL);
+       return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
+}
+
+char _license[] SEC("license") = "GPL";
index 681b906b485325d901c11a1bfda8914c4630aabc..4da48bf6b328a2d1a51bb950ec413940c6bd9720 100755 (executable)
@@ -79,6 +79,7 @@ recompile_kernel()
        cd "${kernel_checkout}"
 
        ${make_command} olddefconfig
+       ${make_command} headers
        ${make_command}
 }
 
index 147899a868d34be8925a3bab6d0859dd3069b07b..976dffda465459a26bd2864450bc242e5035657b 100644 (file)
             "$TC qdisc del dev $DUMMY handle 1: root",
             "$IP link del dev $DUMMY type dummy"
         ]
+    },
+    {
+        "id": "85ee",
+        "name": "QFQ with big MTU",
+        "category": [
+            "qdisc",
+            "qfq"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$IP link add dev $DUMMY type dummy || /bin/true",
+            "$IP link set dev $DUMMY mtu 2147483647 || /bin/true",
+            "$TC qdisc add dev $DUMMY handle 1: root qfq"
+        ],
+        "cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100",
+        "expExitCode": "2",
+        "verifyCmd": "$TC class show dev $DUMMY",
+        "matchPattern": "class qfq 1:",
+        "matchCount": "0",
+        "teardown": [
+            "$IP link del dev $DUMMY type dummy"
+        ]
+    },
+    {
+        "id": "ddfa",
+        "name": "QFQ with small MTU",
+        "category": [
+            "qdisc",
+            "qfq"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$IP link add dev $DUMMY type dummy || /bin/true",
+            "$IP link set dev $DUMMY mtu 256 || /bin/true",
+            "$TC qdisc add dev $DUMMY handle 1: root qfq"
+        ],
+        "cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100",
+        "expExitCode": "2",
+        "verifyCmd": "$TC class show dev $DUMMY",
+        "matchPattern": "class qfq 1:",
+        "matchCount": "0",
+        "teardown": [
+            "$IP link del dev $DUMMY type dummy"
+        ]
+    },
+    {
+        "id": "5993",
+        "name": "QFQ with stab overhead greater than max packet len",
+        "category": [
+            "qdisc",
+            "qfq",
+            "scapy"
+        ],
+        "plugins": {
+            "requires": [
+                "nsPlugin",
+                "scapyPlugin"
+            ]
+        },
+        "setup": [
+            "$IP link add dev $DUMMY type dummy || /bin/true",
+            "$IP link set dev $DUMMY up || /bin/true",
+            "$TC qdisc add dev $DUMMY handle 1: stab mtu 2048 tsize 512 mpu 0 overhead 999999999 linklayer ethernet root qfq",
+            "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 100",
+            "$TC qdisc add dev $DEV1 clsact",
+            "$TC filter add dev $DEV1 ingress protocol ip flower dst_ip 1.3.3.7/32 action mirred egress mirror dev $DUMMY"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DUMMY parent 1: matchall classid 1:1",
+        "scapy": [
+            {
+                "iface": "$DEV0",
+                "count": 22,
+                "packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='1.3.3.7')/TCP(sport=5000,dport=10)"
+            }
+        ],
+        "expExitCode": "0",
+        "verifyCmd": "$TC -s qdisc ls dev $DUMMY",
+        "matchPattern": "dropped 22",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DUMMY handle 1: root qfq"
+        ]
     }
 ]
index d6979a48478fc35fe7f7628ebc2dac0117ca3a49..91a4444ad42b28381bf5b4d9d3ae41cdd204e598 100644 (file)
@@ -217,6 +217,18 @@ TEST_F(user, matching) {
        /* Types don't match */
        TEST_NMATCH("__test_event u64 a; u64 b",
                    "__test_event u32 a; u32 b");
+
+       /* Struct name and size matches */
+       TEST_MATCH("__test_event struct my_struct a 20",
+                  "__test_event struct my_struct a 20");
+
+       /* Struct name don't match */
+       TEST_NMATCH("__test_event struct my_struct a 20",
+                   "__test_event struct my_struct b 20");
+
+       /* Struct size don't match */
+       TEST_NMATCH("__test_event struct my_struct a 20",
+                   "__test_event struct my_struct a 21");
 }
 
 int main(int argc, char **argv)